From f52498603cfd33e5b0ebc858e5ca8a5f5185f777 Mon Sep 17 00:00:00 2001 From: huangzhenpc Date: Thu, 15 Jan 2026 20:29:55 +0800 Subject: [PATCH] first commit --- .dockerignore | 74 + .github/audit-exceptions.yml | 16 + .github/workflows/backend-ci.yml | 47 + .github/workflows/release.yml | 272 + .github/workflows/security-scan.yml | 62 + .gitignore | 131 + .goreleaser.simple.yaml | 86 + .goreleaser.yaml | 200 + Dockerfile | 111 + Dockerfile.goreleaser | 40 + LICENSE | 21 + Linux DO Connect.md | 368 + Makefile | 22 + PR_DESCRIPTION.md | 164 + README.md | 458 + README_CN.md | 463 + backend/.dockerignore | 2 + backend/.golangci.yml | 599 + backend/Dockerfile | 24 + backend/Makefile | 17 + backend/cmd/jwtgen/main.go | 57 + backend/cmd/server/VERSION | 1 + backend/cmd/server/main.go | 155 + backend/cmd/server/wire.go | 187 + backend/cmd/server/wire_gen.go | 318 + backend/ent/account.go | 494 + backend/ent/account/account.go | 392 + backend/ent/account/where.go | 1413 + backend/ent/account_create.go | 2296 ++ backend/ent/account_delete.go | 88 + backend/ent/account_query.go | 900 + backend/ent/account_update.go | 1735 + backend/ent/accountgroup.go | 176 + backend/ent/accountgroup/accountgroup.go | 123 + backend/ent/accountgroup/where.go | 212 + backend/ent/accountgroup_create.go | 653 + backend/ent/accountgroup_delete.go | 87 + backend/ent/accountgroup_query.go | 640 + backend/ent/accountgroup_update.go | 477 + backend/ent/apikey.go | 282 + backend/ent/apikey/apikey.go | 213 + backend/ent/apikey/where.go | 575 + backend/ent/apikey_create.go | 1127 + backend/ent/apikey_delete.go | 88 + backend/ent/apikey_query.go | 796 + backend/ent/apikey_update.go | 940 + backend/ent/client.go | 3009 ++ backend/ent/driver_access.go | 8 + backend/ent/ent.go | 636 + backend/ent/enttest/enttest.go | 84 + backend/ent/generate.go | 6 + backend/ent/group.go | 473 + backend/ent/group/group.go | 478 + backend/ent/group/where.go | 1265 + backend/ent/group_create.go | 2129 ++ backend/ent/group_delete.go | 88 + backend/ent/group_query.go | 1232 + backend/ent/group_update.go | 2226 ++ backend/ent/hook/hook.go | 367 + backend/ent/intercept/intercept.go | 569 + backend/ent/migrate/migrate.go | 64 + backend/ent/migrate/schema.go | 882 + backend/ent/mutation.go | 18629 +++++++++ backend/ent/predicate/predicate.go | 52 + backend/ent/promocode.go | 228 + backend/ent/promocode/promocode.go | 165 + backend/ent/promocode/where.go | 594 + backend/ent/promocode_create.go | 1081 + backend/ent/promocode_delete.go | 88 + backend/ent/promocode_query.go | 643 + backend/ent/promocode_update.go | 745 + backend/ent/promocodeusage.go | 187 + backend/ent/promocodeusage/promocodeusage.go | 125 + backend/ent/promocodeusage/where.go | 257 + backend/ent/promocodeusage_create.go | 696 + backend/ent/promocodeusage_delete.go | 88 + backend/ent/promocodeusage_query.go | 718 + backend/ent/promocodeusage_update.go | 510 + backend/ent/proxy.go | 240 + backend/ent/proxy/proxy.go | 183 + backend/ent/proxy/where.go | 724 + backend/ent/proxy_create.go | 1112 + backend/ent/proxy_delete.go | 88 + backend/ent/proxy_query.go | 646 + backend/ent/proxy_update.go | 809 + backend/ent/redeemcode.go | 267 + backend/ent/redeemcode/redeemcode.go | 187 + backend/ent/redeemcode/where.go | 667 + backend/ent/redeemcode_create.go | 1177 + backend/ent/redeemcode_delete.go | 88 + backend/ent/redeemcode_query.go | 724 + backend/ent/redeemcode_update.go | 806 + backend/ent/runtime.go | 5 + backend/ent/runtime/runtime.go | 871 + backend/ent/schema/account.go | 218 + backend/ent/schema/account_group.go | 60 + backend/ent/schema/api_key.go | 81 + backend/ent/schema/group.go | 127 + backend/ent/schema/mixins/soft_delete.go | 139 + backend/ent/schema/mixins/time.go | 32 + backend/ent/schema/promo_code.go | 87 + backend/ent/schema/promo_code_usage.go | 66 + backend/ent/schema/proxy.go | 72 + backend/ent/schema/redeem_code.go | 94 + backend/ent/schema/setting.go | 54 + backend/ent/schema/usage_log.go | 174 + backend/ent/schema/user.go | 87 + backend/ent/schema/user_allowed_group.go | 57 + .../ent/schema/user_attribute_definition.go | 109 + backend/ent/schema/user_attribute_value.go | 74 + backend/ent/schema/user_subscription.go | 117 + backend/ent/setting.go | 128 + backend/ent/setting/setting.go | 74 + backend/ent/setting/where.go | 255 + backend/ent/setting_create.go | 584 + backend/ent/setting_delete.go | 88 + backend/ent/setting_query.go | 564 + backend/ent/setting_update.go | 306 + backend/ent/tx.go | 278 + backend/ent/usagelog.go | 558 + backend/ent/usagelog/usagelog.go | 444 + backend/ent/usagelog/where.go | 1611 + backend/ent/usagelog_create.go | 2863 ++ backend/ent/usagelog_delete.go | 88 + backend/ent/usagelog_query.go | 949 + backend/ent/usagelog_update.go | 2112 ++ backend/ent/user.go | 375 + backend/ent/user/user.go | 443 + backend/ent/user/where.go | 933 + backend/ent/user_create.go | 1417 + backend/ent/user_delete.go | 88 + backend/ent/user_query.go | 1273 + backend/ent/user_update.go | 1981 + backend/ent/userallowedgroup.go | 165 + .../ent/userallowedgroup/userallowedgroup.go | 113 + backend/ent/userallowedgroup/where.go | 167 + backend/ent/userallowedgroup_create.go | 568 + backend/ent/userallowedgroup_delete.go | 87 + backend/ent/userallowedgroup_query.go | 640 + backend/ent/userallowedgroup_update.go | 423 + backend/ent/userattributedefinition.go | 276 + .../userattributedefinition.go | 205 + backend/ent/userattributedefinition/where.go | 664 + backend/ent/userattributedefinition_create.go | 1267 + backend/ent/userattributedefinition_delete.go | 88 + backend/ent/userattributedefinition_query.go | 643 + backend/ent/userattributedefinition_update.go | 846 + backend/ent/userattributevalue.go | 198 + .../userattributevalue/userattributevalue.go | 139 + backend/ent/userattributevalue/where.go | 327 + backend/ent/userattributevalue_create.go | 731 + backend/ent/userattributevalue_delete.go | 88 + backend/ent/userattributevalue_query.go | 718 + backend/ent/userattributevalue_update.go | 504 + backend/ent/usersubscription.go | 384 + .../ent/usersubscription/usersubscription.go | 306 + backend/ent/usersubscription/where.go | 978 + backend/ent/usersubscription_create.go | 1700 + backend/ent/usersubscription_delete.go | 88 + backend/ent/usersubscription_query.go | 873 + backend/ent/usersubscription_update.go | 1349 + backend/go.mod | 151 + backend/go.sum | 390 + backend/internal/config/config.go | 1249 + backend/internal/config/config_test.go | 282 + backend/internal/config/wire.go | 13 + .../internal/handler/admin/account_handler.go | 1307 + .../admin/antigravity_oauth_handler.go | 67 + .../handler/admin/dashboard_handler.go | 397 + .../handler/admin/gemini_oauth_handler.go | 142 + .../internal/handler/admin/group_handler.go | 274 + .../handler/admin/openai_oauth_handler.go | 229 + .../handler/admin/ops_alerts_handler.go | 602 + .../handler/admin/ops_dashboard_handler.go | 243 + backend/internal/handler/admin/ops_handler.go | 925 + .../handler/admin/ops_realtime_handler.go | 213 + .../handler/admin/ops_settings_handler.go | 194 + .../internal/handler/admin/ops_ws_handler.go | 771 + .../internal/handler/admin/promo_handler.go | 209 + .../internal/handler/admin/proxy_handler.go | 348 + .../internal/handler/admin/redeem_handler.go | 244 + .../internal/handler/admin/setting_handler.go | 721 + .../handler/admin/subscription_handler.go | 278 + .../internal/handler/admin/system_handler.go | 87 + .../internal/handler/admin/usage_handler.go | 346 + .../handler/admin/user_attribute_handler.go | 342 + .../internal/handler/admin/user_handler.go | 279 + backend/internal/handler/api_key_handler.go | 218 + backend/internal/handler/auth_handler.go | 240 + .../internal/handler/auth_linuxdo_oauth.go | 679 + .../handler/auth_linuxdo_oauth_test.go | 108 + backend/internal/handler/dto/mappers.go | 427 + backend/internal/handler/dto/settings.go | 77 + backend/internal/handler/dto/types.go | 295 + backend/internal/handler/gateway_handler.go | 874 + backend/internal/handler/gateway_helper.go | 323 + .../internal/handler/gateway_helper_test.go | 141 + .../internal/handler/gemini_v1beta_handler.go | 435 + .../handler/gemini_v1beta_handler_test.go | 143 + backend/internal/handler/handler.go | 46 + .../handler/openai_gateway_handler.go | 367 + backend/internal/handler/ops_error_logger.go | 1015 + backend/internal/handler/redeem_handler.go | 85 + .../internal/handler/request_body_limit.go | 27 + .../handler/request_body_limit_test.go | 45 + backend/internal/handler/setting_handler.go | 49 + .../internal/handler/subscription_handler.go | 188 + backend/internal/handler/usage_handler.go | 402 + backend/internal/handler/user_handler.go | 112 + backend/internal/handler/wire.go | 123 + .../internal/integration/e2e_gateway_test.go | 799 + backend/internal/middleware/rate_limiter.go | 161 + .../rate_limiter_integration_test.go | 114 + .../internal/middleware/rate_limiter_test.go | 100 + .../internal/pkg/antigravity/claude_types.go | 228 + backend/internal/pkg/antigravity/client.go | 474 + .../internal/pkg/antigravity/gemini_types.go | 175 + backend/internal/pkg/antigravity/oauth.go | 263 + .../pkg/antigravity/request_transformer.go | 773 + .../antigravity/request_transformer_test.go | 244 + .../pkg/antigravity/response_transformer.go | 273 + .../pkg/antigravity/stream_transformer.go | 464 + backend/internal/pkg/claude/constants.go | 81 + backend/internal/pkg/ctxkey/ctxkey.go | 21 + backend/internal/pkg/errors/errors.go | 158 + backend/internal/pkg/errors/errors_test.go | 168 + backend/internal/pkg/errors/http.go | 21 + backend/internal/pkg/errors/types.go | 115 + backend/internal/pkg/gemini/models.go | 43 + .../pkg/geminicli/codeassist_types.go | 82 + backend/internal/pkg/geminicli/constants.go | 48 + .../internal/pkg/geminicli/drive_client.go | 157 + .../pkg/geminicli/drive_client_test.go | 18 + backend/internal/pkg/geminicli/models.go | 22 + backend/internal/pkg/geminicli/oauth.go | 257 + backend/internal/pkg/geminicli/oauth_test.go | 113 + backend/internal/pkg/geminicli/sanitize.go | 46 + backend/internal/pkg/geminicli/token_types.go | 9 + backend/internal/pkg/googleapi/status.go | 25 + backend/internal/pkg/httpclient/pool.go | 165 + backend/internal/pkg/ip/ip.go | 168 + backend/internal/pkg/oauth/oauth.go | 237 + backend/internal/pkg/openai/constants.go | 43 + backend/internal/pkg/openai/instructions.txt | 118 + backend/internal/pkg/openai/oauth.go | 366 + backend/internal/pkg/openai/request.go | 18 + backend/internal/pkg/pagination/pagination.go | 43 + backend/internal/pkg/proxyutil/dialer.go | 62 + backend/internal/pkg/proxyutil/dialer_test.go | 204 + backend/internal/pkg/response/response.go | 186 + .../internal/pkg/response/response_test.go | 171 + backend/internal/pkg/sysutil/restart.go | 48 + backend/internal/pkg/timezone/timezone.go | 161 + .../internal/pkg/timezone/timezone_test.go | 137 + .../internal/pkg/usagestats/account_stats.go | 14 + .../pkg/usagestats/usage_log_types.go | 228 + backend/internal/repository/account_repo.go | 1425 + .../account_repo_integration_test.go | 587 + ...llowed_groups_contract_integration_test.go | 145 + backend/internal/repository/api_key_cache.go | 93 + .../api_key_cache_integration_test.go | 127 + .../internal/repository/api_key_cache_test.go | 46 + backend/internal/repository/api_key_repo.go | 435 + .../api_key_repo_integration_test.go | 385 + backend/internal/repository/billing_cache.go | 183 + .../billing_cache_integration_test.go | 283 + .../internal/repository/billing_cache_test.go | 87 + .../repository/claude_oauth_service.go | 248 + .../repository/claude_oauth_service_test.go | 396 + .../repository/claude_usage_service.go | 62 + .../repository/claude_usage_service_test.go | 117 + .../internal/repository/concurrency_cache.go | 391 + .../concurrency_cache_benchmark_test.go | 135 + .../concurrency_cache_integration_test.go | 412 + .../repository/dashboard_aggregation_repo.go | 392 + .../internal/repository/dashboard_cache.go | 58 + .../repository/dashboard_cache_test.go | 28 + backend/internal/repository/db_pool.go | 32 + backend/internal/repository/db_pool_test.go | 50 + backend/internal/repository/email_cache.go | 52 + .../email_cache_integration_test.go | 92 + .../internal/repository/email_cache_test.go | 45 + backend/internal/repository/ent.go | 69 + .../internal/repository/error_translate.go | 97 + .../repository/fixtures_integration_test.go | 391 + backend/internal/repository/gateway_cache.go | 41 + .../gateway_cache_integration_test.go | 96 + .../gateway_routing_integration_test.go | 250 + .../repository/gemini_oauth_client.go | 119 + .../internal/repository/gemini_token_cache.go | 49 + .../gemini_token_cache_integration_test.go | 47 + .../repository/gemini_token_cache_test.go | 28 + .../repository/geminicli_codeassist_client.go | 104 + .../repository/github_release_service.go | 136 + .../repository/github_release_service_test.go | 317 + backend/internal/repository/group_repo.go | 413 + .../repository/group_repo_integration_test.go | 677 + backend/internal/repository/http_upstream.go | 653 + .../http_upstream_benchmark_test.go | 70 + .../internal/repository/http_upstream_test.go | 291 + backend/internal/repository/identity_cache.go | 51 + .../identity_cache_integration_test.go | 67 + .../repository/identity_cache_test.go | 46 + .../repository/inprocess_transport_test.go | 63 + .../repository/integration_harness_test.go | 408 + .../internal/repository/migrations_runner.go | 302 + .../migrations_schema_integration_test.go | 103 + .../repository/openai_oauth_service.go | 89 + .../repository/openai_oauth_service_test.go | 249 + backend/internal/repository/ops_repo.go | 1098 + .../internal/repository/ops_repo_alerts.go | 853 + .../internal/repository/ops_repo_dashboard.go | 1015 + .../repository/ops_repo_histograms.go | 79 + .../ops_repo_latency_histogram_buckets.go | 64 + ...ops_repo_latency_histogram_buckets_test.go | 14 + .../internal/repository/ops_repo_metrics.go | 422 + .../internal/repository/ops_repo_preagg.go | 363 + .../repository/ops_repo_realtime_traffic.go | 129 + .../repository/ops_repo_request_details.go | 286 + .../internal/repository/ops_repo_trends.go | 573 + .../repository/ops_repo_window_stats.go | 50 + backend/internal/repository/pagination.go | 16 + .../internal/repository/pricing_service.go | 81 + .../repository/pricing_service_test.go | 145 + .../internal/repository/promo_code_repo.go | 273 + .../repository/proxy_latency_cache.go | 74 + .../repository/proxy_probe_service.go | 118 + .../repository/proxy_probe_service_test.go | 119 + backend/internal/repository/proxy_repo.go | 359 + .../repository/proxy_repo_integration_test.go | 329 + backend/internal/repository/redeem_cache.go | 62 + .../redeem_cache_integration_test.go | 103 + .../internal/repository/redeem_cache_test.go | 77 + .../internal/repository/redeem_code_repo.go | 239 + .../redeem_code_repo_integration_test.go | 390 + backend/internal/repository/redis.go | 39 + backend/internal/repository/redis_test.go | 35 + .../internal/repository/req_client_pool.go | 64 + .../internal/repository/scheduler_cache.go | 276 + .../repository/scheduler_outbox_repo.go | 96 + ...eduler_snapshot_outbox_integration_test.go | 68 + backend/internal/repository/setting_repo.go | 105 + .../setting_repo_integration_test.go | 163 + .../soft_delete_ent_integration_test.go | 216 + backend/internal/repository/sql_scan.go | 42 + .../internal/repository/temp_unsched_cache.go | 91 + .../repository/timeout_counter_cache.go | 80 + .../internal/repository/turnstile_service.go | 63 + .../repository/turnstile_service_test.go | 141 + backend/internal/repository/update_cache.go | 27 + .../update_cache_integration_test.go | 73 + backend/internal/repository/usage_log_repo.go | 2271 ++ .../usage_log_repo_integration_test.go | 1215 + .../repository/user_attribute_repo.go | 385 + backend/internal/repository/user_repo.go | 468 + .../repository/user_repo_integration_test.go | 537 + .../repository/user_subscription_repo.go | 435 + ...user_subscription_repo_integration_test.go | 747 + backend/internal/repository/wire.go | 139 + backend/internal/server/api_contract_test.go | 1484 + backend/internal/server/http.go | 69 + .../internal/server/middleware/admin_auth.go | 193 + .../internal/server/middleware/admin_only.go | 27 + .../server/middleware/api_key_auth.go | 202 + .../server/middleware/api_key_auth_google.go | 146 + .../middleware/api_key_auth_google_test.go | 353 + .../server/middleware/api_key_auth_test.go | 427 + .../server/middleware/auth_subject.go | 28 + .../server/middleware/client_request_id.go | 30 + backend/internal/server/middleware/cors.go | 103 + .../internal/server/middleware/jwt_auth.go | 81 + backend/internal/server/middleware/logger.go | 52 + .../internal/server/middleware/middleware.go | 73 + .../internal/server/middleware/recovery.go | 64 + .../server/middleware/recovery_test.go | 81 + .../server/middleware/request_body_limit.go | 15 + .../server/middleware/security_headers.go | 26 + backend/internal/server/middleware/wire.go | 22 + backend/internal/server/router.go | 79 + backend/internal/server/routes/admin.go | 370 + backend/internal/server/routes/auth.go | 50 + backend/internal/server/routes/common.go | 32 + backend/internal/server/routes/gateway.go | 85 + backend/internal/server/routes/user.go | 72 + backend/internal/service/account.go | 559 + .../account_billing_rate_multiplier_test.go | 27 + .../service/account_expiry_service.go | 71 + backend/internal/service/account_group.go | 13 + backend/internal/service/account_service.go | 351 + .../service/account_service_delete_test.go | 239 + .../internal/service/account_test_service.go | 847 + .../internal/service/account_usage_service.go | 577 + backend/internal/service/admin_service.go | 1512 + .../service/admin_service_bulk_update_test.go | 80 + .../service/admin_service_create_user_test.go | 67 + .../service/admin_service_delete_test.go | 489 + .../service/admin_service_group_test.go | 380 + .../service/admin_service_search_test.go | 238 + .../admin_service_update_balance_test.go | 97 + .../service/antigravity_gateway_service.go | 2474 ++ .../antigravity_gateway_service_test.go | 83 + .../service/antigravity_image_test.go | 123 + .../service/antigravity_model_mapping_test.go | 269 + .../service/antigravity_oauth_service.go | 276 + .../service/antigravity_quota_fetcher.go | 111 + .../service/antigravity_quota_scope.go | 88 + .../service/antigravity_token_provider.go | 130 + .../service/antigravity_token_refresher.go | 65 + backend/internal/service/api_key.go | 22 + .../internal/service/api_key_auth_cache.go | 46 + .../service/api_key_auth_cache_impl.go | 269 + .../service/api_key_auth_cache_invalidate.go | 48 + backend/internal/service/api_key_service.go | 570 + .../service/api_key_service_cache_test.go | 417 + .../service/api_key_service_delete_test.go | 252 + .../service/auth_cache_invalidation_test.go | 33 + backend/internal/service/auth_service.go | 582 + .../service/auth_service_register_test.go | 295 + .../internal/service/billing_cache_port.go | 15 + .../internal/service/billing_cache_service.go | 661 + .../service/billing_cache_service_test.go | 75 + backend/internal/service/billing_service.go | 382 + .../service/billing_service_image_test.go | 149 + .../internal/service/claude_code_validator.go | 265 + .../internal/service/concurrency_service.go | 314 + backend/internal/service/crs_sync_service.go | 1255 + .../service/dashboard_aggregation_service.go | 258 + .../dashboard_aggregation_service_test.go | 106 + backend/internal/service/dashboard_service.go | 336 + .../service/dashboard_service_test.go | 387 + backend/internal/service/deferred_service.go | 76 + backend/internal/service/domain_constants.go | 159 + .../internal/service/email_queue_service.go | 109 + backend/internal/service/email_service.go | 359 + .../service/gateway_multiplatform_test.go | 1467 + .../internal/service/gateway_prompt_test.go | 233 + backend/internal/service/gateway_request.go | 505 + .../internal/service/gateway_request_test.go | 298 + backend/internal/service/gateway_service.go | 3031 ++ .../service/gateway_service_benchmark_test.go | 50 + .../service/gemini_messages_compat_service.go | 2818 ++ .../gemini_messages_compat_service_test.go | 128 + .../service/gemini_multiplatform_test.go | 609 + backend/internal/service/gemini_oauth.go | 13 + .../internal/service/gemini_oauth_service.go | 1074 + .../service/gemini_oauth_service_test.go | 130 + backend/internal/service/gemini_quota.go | 448 + .../internal/service/gemini_token_cache.go | 17 + .../internal/service/gemini_token_provider.go | 160 + .../service/gemini_token_refresher.go | 45 + .../internal/service/geminicli_codeassist.go | 13 + backend/internal/service/group.go | 92 + backend/internal/service/group_service.go | 208 + backend/internal/service/group_test.go | 92 + .../internal/service/http_upstream_port.go | 30 + backend/internal/service/identity_service.go | 271 + backend/internal/service/oauth_service.go | 301 + .../service/openai_codex_transform.go | 528 + .../service/openai_codex_transform_test.go | 167 + .../service/openai_gateway_service.go | 1736 + .../service/openai_gateway_service_test.go | 410 + .../internal/service/openai_oauth_service.go | 255 + .../service/openai_tool_continuation.go | 213 + .../service/openai_tool_continuation_test.go | 98 + .../service/ops_account_availability.go | 194 + backend/internal/service/ops_advisory_lock.go | 46 + .../service/ops_aggregation_service.go | 443 + .../service/ops_alert_evaluator_service.go | 922 + .../ops_alert_evaluator_service_test.go | 210 + backend/internal/service/ops_alert_models.go | 95 + backend/internal/service/ops_alerts.go | 232 + .../internal/service/ops_cleanup_service.go | 365 + backend/internal/service/ops_concurrency.go | 257 + backend/internal/service/ops_dashboard.go | 90 + .../internal/service/ops_dashboard_models.go | 87 + backend/internal/service/ops_errors.go | 45 + backend/internal/service/ops_health_score.go | 143 + .../internal/service/ops_health_score_test.go | 442 + backend/internal/service/ops_histograms.go | 26 + .../internal/service/ops_metrics_collector.go | 920 + backend/internal/service/ops_models.go | 169 + backend/internal/service/ops_port.go | 259 + backend/internal/service/ops_query_mode.go | 40 + backend/internal/service/ops_realtime.go | 36 + .../internal/service/ops_realtime_models.go | 81 + .../internal/service/ops_realtime_traffic.go | 36 + .../service/ops_realtime_traffic_models.go | 19 + .../internal/service/ops_request_details.go | 151 + backend/internal/service/ops_retry.go | 720 + .../service/ops_scheduled_report_service.go | 705 + backend/internal/service/ops_service.go | 613 + backend/internal/service/ops_settings.go | 562 + .../internal/service/ops_settings_models.go | 100 + backend/internal/service/ops_trend_models.go | 65 + backend/internal/service/ops_trends.go | 26 + .../internal/service/ops_upstream_context.go | 131 + backend/internal/service/ops_window_stats.go | 24 + backend/internal/service/pricing_service.go | 745 + backend/internal/service/promo_code.go | 73 + .../internal/service/promo_code_repository.go | 30 + backend/internal/service/promo_service.go | 268 + .../service/prompts/codex_cli_instructions.md | 275 + .../service/prompts/codex_opencode_bridge.txt | 122 + .../service/prompts/tool_remap_message.txt | 63 + backend/internal/service/proxy.go | 51 + .../internal/service/proxy_latency_cache.go | 23 + backend/internal/service/proxy_service.go | 193 + backend/internal/service/quota_fetcher.go | 19 + backend/internal/service/ratelimit_service.go | 725 + .../service/ratelimit_service_401_test.go | 121 + backend/internal/service/redeem_code.go | 41 + backend/internal/service/redeem_service.go | 438 + backend/internal/service/scheduler_cache.go | 68 + backend/internal/service/scheduler_events.go | 10 + backend/internal/service/scheduler_outbox.go | 21 + .../service/scheduler_snapshot_service.go | 786 + backend/internal/service/setting.go | 10 + backend/internal/service/setting_service.go | 759 + backend/internal/service/settings_view.go | 103 + .../internal/service/subscription_service.go | 669 + backend/internal/service/temp_unsched.go | 36 + .../internal/service/timing_wheel_service.go | 63 + .../service/token_cache_invalidator.go | 35 + .../service/token_cache_invalidator_test.go | 97 + .../internal/service/token_cache_key_test.go | 153 + .../internal/service/token_refresh_service.go | 240 + .../service/token_refresh_service_test.go | 361 + backend/internal/service/token_refresher.go | 132 + .../internal/service/token_refresher_test.go | 228 + backend/internal/service/turnstile_service.go | 105 + backend/internal/service/update_service.go | 540 + backend/internal/service/usage_log.go | 61 + backend/internal/service/usage_service.go | 343 + backend/internal/service/user.go | 63 + backend/internal/service/user_attribute.go | 125 + .../service/user_attribute_service.go | 323 + backend/internal/service/user_service.go | 223 + backend/internal/service/user_subscription.go | 124 + .../service/user_subscription_port.go | 35 + backend/internal/service/wire.go | 249 + backend/internal/setup/cli.go | 295 + backend/internal/setup/handler.go | 354 + backend/internal/setup/setup.go | 573 + backend/internal/util/logredact/redact.go | 100 + .../util/responseheaders/responseheaders.go | 99 + .../responseheaders/responseheaders_test.go | 67 + .../internal/util/urlvalidator/validator.go | 154 + .../util/urlvalidator/validator_test.go | 24 + backend/internal/web/embed_off.go | 48 + backend/internal/web/embed_on.go | 238 + backend/internal/web/html_cache.go | 77 + backend/migrations/001_init.sql | 172 + .../migrations/002_account_type_migration.sql | 33 + backend/migrations/003_subscription.sql | 65 + .../migrations/004_add_redeem_code_notes.sql | 6 + backend/migrations/005_schema_parity.sql | 42 + ...06_fix_invalid_subscription_expires_at.sql | 10 + .../007_add_user_allowed_groups.sql | 20 + backend/migrations/008_seed_default_group.sql | 4 + .../009_fix_usage_logs_cache_columns.sql | 37 + .../010_add_usage_logs_aggregated_indexes.sql | 4 + .../011_remove_duplicate_unique_indexes.sql | 39 + .../012_add_user_subscription_soft_delete.sql | 13 + .../013_log_orphan_allowed_groups.sql | 32 + .../014_drop_legacy_allowed_groups.sql | 15 + .../015_fix_settings_unique_constraint.sql | 19 + ...016_soft_delete_partial_unique_indexes.sql | 51 + backend/migrations/018_user_attributes.sql | 48 + .../019_migrate_wechat_to_attributes.sql | 83 + .../migrations/020_add_temp_unschedulable.sql | 15 + backend/migrations/024_add_gemini_tier_id.sql | 30 + .../026_ops_metrics_aggregation_tables.sql | 104 + .../027_usage_billing_consistency.sql | 58 + backend/migrations/028_add_account_notes.sql | 7 + .../028_add_usage_logs_user_agent.sql | 10 + .../migrations/028_group_image_pricing.sql | 10 + .../029_add_group_claude_code_restriction.sql | 21 + .../migrations/029_usage_log_image_fields.sql | 5 + .../migrations/030_add_account_expires_at.sql | 10 + backend/migrations/031_add_ip_address.sql | 5 + .../032_add_api_key_ip_restriction.sql | 9 + backend/migrations/033_add_promo_codes.sql | 34 + .../migrations/033_ops_monitoring_vnext.sql | 717 + .../034_ops_upstream_error_events.sql | 9 + ...034_usage_dashboard_aggregation_tables.sql | 77 + .../035_usage_logs_partitioning.sql | 54 + ...036_ops_error_logs_add_is_count_tokens.sql | 16 + backend/migrations/036_scheduler_outbox.sql | 10 + .../037_add_account_rate_multiplier.sql | 14 + backend/migrations/037_ops_alert_silences.sql | 28 + ...results_and_standardize_classification.sql | 111 + backend/migrations/README.md | 178 + backend/migrations/migrations.go | 34 + backend/resources/model-pricing/README.md | 37 + .../model_prices_and_context_window.json | 31356 ++++++++++++++++ backend/tools.go | 9 + build_image.sh | 8 + config.yaml | 506 + deploy/.env.example | 218 + deploy/Caddyfile | 188 + deploy/DOCKER.md | 76 + deploy/Makefile | 41 + deploy/README.md | 403 + deploy/config.example.yaml | 563 + deploy/docker-compose-test.yml | 197 + deploy/docker-compose.override.yml.example | 137 + deploy/docker-compose.standalone.yml | 93 + deploy/docker-compose.yml | 211 + deploy/flow.md | 222 + deploy/install.sh | 1169 + deploy/sub2api.service | 33 + frontend/.eslintignore | 14 + frontend/.eslintrc.cjs | 36 + frontend/.npmrc | 4 + frontend/audit.json | 118 + frontend/index.html | 13 + frontend/package-lock.json | 5304 +++ frontend/package.json | 45 + frontend/pnpm-lock.yaml | 8419 +++++ frontend/postcss.config.js | 6 + frontend/public/logo.png | Bin 0 -> 149928 bytes frontend/src/App.vue | 89 + frontend/src/api/admin/accounts.ts | 376 + frontend/src/api/admin/antigravity.ts | 56 + frontend/src/api/admin/dashboard.ts | 207 + frontend/src/api/admin/gemini.ts | 72 + frontend/src/api/admin/groups.ts | 169 + frontend/src/api/admin/index.ts | 61 + frontend/src/api/admin/ops.ts | 1209 + frontend/src/api/admin/promo.ts | 69 + frontend/src/api/admin/proxies.ts | 227 + frontend/src/api/admin/redeem.ts | 174 + frontend/src/api/admin/settings.ts | 251 + frontend/src/api/admin/subscriptions.ts | 175 + frontend/src/api/admin/system.ts | 81 + frontend/src/api/admin/usage.ts | 118 + frontend/src/api/admin/userAttributes.ts | 131 + frontend/src/api/admin/users.ts | 191 + frontend/src/api/auth.ts | 150 + frontend/src/api/client.ts | 165 + frontend/src/api/groups.ts | 25 + frontend/src/api/index.ts | 23 + frontend/src/api/keys.ts | 114 + frontend/src/api/redeem.ts | 65 + frontend/src/api/setup.ts | 87 + frontend/src/api/subscriptions.ts | 76 + frontend/src/api/usage.ts | 274 + frontend/src/api/user.ts | 54 + frontend/src/components/Guide/steps.ts | 309 + frontend/src/components/TurnstileWidget.vue | 182 + .../components/account/AccountGroupsCell.vue | 158 + .../components/account/AccountQuotaInfo.vue | 198 + .../components/account/AccountStatsModal.vue | 736 + .../account/AccountStatusIndicator.vue | 171 + .../components/account/AccountTestModal.vue | 469 + .../account/AccountTodayStatsCell.vue | 100 + .../components/account/AccountUsageCell.vue | 845 + .../account/BulkEditAccountModal.vue | 1038 + .../components/account/CreateAccountModal.vue | 2538 ++ .../components/account/EditAccountModal.vue | 1227 + .../account/ModelWhitelistSelector.vue | 200 + .../account/OAuthAuthorizationFlow.vue | 582 + .../components/account/ReAuthAccountModal.vue | 533 + .../components/account/SyncFromCrsModal.vue | 189 + .../account/TempUnschedStatusModal.vue | 249 + .../components/account/UsageProgressBar.vue | 167 + frontend/src/components/account/index.ts | 13 + .../admin/account/AccountActionMenu.vue | 50 + .../admin/account/AccountBulkActionsBar.vue | 33 + .../admin/account/AccountStatsModal.vue | 700 + .../admin/account/AccountTableActions.vue | 19 + .../admin/account/AccountTableFilters.vue | 25 + .../admin/account/AccountTestModal.vue | 409 + .../admin/account/ReAuthAccountModal.vue | 533 + .../admin/usage/UsageExportProgress.vue | 16 + .../components/admin/usage/UsageFilters.vue | 433 + .../admin/usage/UsageStatsCards.vue | 72 + .../src/components/admin/usage/UsageTable.vue | 327 + .../admin/user/UserAllowedGroupsModal.vue | 59 + .../admin/user/UserApiKeysModal.vue | 47 + .../admin/user/UserBalanceModal.vue | 86 + .../components/admin/user/UserCreateModal.vue | 78 + .../components/admin/user/UserEditModal.vue | 110 + .../components/auth/LinuxDoOAuthSection.vue | 61 + .../charts/ModelDistributionChart.vue | 152 + .../src/components/charts/TokenUsageTrend.vue | 187 + frontend/src/components/common/BaseDialog.vue | 142 + .../src/components/common/ConfirmDialog.vue | 70 + frontend/src/components/common/DataTable.vue | 538 + .../src/components/common/DateRangePicker.vue | 425 + frontend/src/components/common/EmptyState.vue | 80 + .../common/ExportProgressDialog.vue | 68 + frontend/src/components/common/GroupBadge.vue | 126 + .../src/components/common/GroupOptionItem.vue | 52 + .../src/components/common/GroupSelector.vue | 82 + .../src/components/common/HelpTooltip.vue | 44 + frontend/src/components/common/Input.vue | 103 + .../src/components/common/LoadingSpinner.vue | 65 + .../src/components/common/LocaleSwitcher.vue | 91 + frontend/src/components/common/ModelIcon.vue | 278 + frontend/src/components/common/Pagination.vue | 205 + .../src/components/common/PlatformIcon.vue | 52 + .../components/common/PlatformTypeBadge.vue | 92 + .../src/components/common/ProxySelector.vue | 426 + frontend/src/components/common/README.md | 268 + .../src/components/common/SearchInput.vue | 43 + frontend/src/components/common/Select.vue | 514 + frontend/src/components/common/Skeleton.vue | 46 + frontend/src/components/common/StatCard.vue | 81 + .../src/components/common/StatusBadge.vue | 39 + .../common/SubscriptionProgressMini.vue | 320 + frontend/src/components/common/TextArea.vue | 81 + frontend/src/components/common/Toast.vue | 164 + frontend/src/components/common/Toggle.vue | 29 + .../src/components/common/VersionBadge.vue | 555 + frontend/src/components/common/index.ts | 14 + frontend/src/components/common/types.ts | 10 + frontend/src/components/icons/Icon.vue | 140 + frontend/src/components/icons/index.ts | 1 + frontend/src/components/keys/UseKeyModal.vue | 625 + frontend/src/components/layout/AppHeader.vue | 304 + frontend/src/components/layout/AppLayout.vue | 52 + frontend/src/components/layout/AppSidebar.vue | 547 + frontend/src/components/layout/AuthLayout.vue | 90 + frontend/src/components/layout/EXAMPLES.md | 424 + frontend/src/components/layout/INTEGRATION.md | 484 + frontend/src/components/layout/README.md | 218 + .../src/components/layout/TablePageLayout.vue | 114 + frontend/src/components/layout/index.ts | 9 + .../src/components/user/UserAttributeForm.vue | 199 + .../user/UserAttributesConfigModal.vue | 405 + .../user/dashboard/UserDashboardCharts.vue | 151 + .../dashboard/UserDashboardQuickActions.vue | 61 + .../dashboard/UserDashboardRecentUsage.vue | 57 + .../user/dashboard/UserDashboardStats.vue | 162 + .../user/profile/ProfileEditForm.vue | 74 + .../user/profile/ProfileInfoCard.vue | 58 + .../user/profile/ProfilePasswordForm.vue | 109 + frontend/src/composables/useAccountOAuth.ts | 182 + .../src/composables/useAntigravityOAuth.ts | 115 + frontend/src/composables/useClipboard.ts | 68 + frontend/src/composables/useForm.ts | 43 + frontend/src/composables/useGeminiOAuth.ts | 165 + frontend/src/composables/useModelWhitelist.ts | 312 + frontend/src/composables/useOnboardingTour.ts | 569 + frontend/src/composables/useOpenAIOAuth.ts | 158 + frontend/src/composables/useTableLoader.ts | 108 + frontend/src/i18n/index.ts | 53 + frontend/src/i18n/locales/en.ts | 3005 ++ frontend/src/i18n/locales/zh.ts | 3149 ++ frontend/src/main.ts | 29 + frontend/src/router/README.md | 276 + frontend/src/router/index.ts | 408 + frontend/src/router/meta.d.ts | 46 + frontend/src/stores/README.md | 197 + frontend/src/stores/adminSettings.ts | 130 + frontend/src/stores/app.ts | 425 + frontend/src/stores/auth.ts | 262 + frontend/src/stores/index.ts | 14 + frontend/src/stores/onboarding.ts | 88 + frontend/src/stores/subscriptions.ts | 140 + frontend/src/style.css | 725 + frontend/src/styles/onboarding.css | 228 + frontend/src/types/global.d.ts | 9 + frontend/src/types/index.ts | 1036 + frontend/src/utils/format.ts | 218 + frontend/src/utils/url.ts | 37 + frontend/src/views/HomeView.vue | 644 + frontend/src/views/NotFoundView.vue | 91 + frontend/src/views/admin/AccountsView.vue | 422 + frontend/src/views/admin/DashboardView.vue | 560 + frontend/src/views/admin/GroupsView.vue | 1172 + frontend/src/views/admin/PromoCodesView.vue | 718 + frontend/src/views/admin/ProxiesView.vue | 1324 + frontend/src/views/admin/RedeemView.vue | 701 + frontend/src/views/admin/SettingsView.vue | 1320 + .../src/views/admin/SubscriptionsView.vue | 992 + frontend/src/views/admin/UsageView.vue | 146 + frontend/src/views/admin/UsersView.vue | 1101 + frontend/src/views/admin/ops/OpsDashboard.vue | 730 + .../ops/components/OpsAlertEventsCard.vue | 648 + .../ops/components/OpsAlertRulesCard.vue | 591 + .../ops/components/OpsConcurrencyCard.vue | 525 + .../ops/components/OpsDashboardHeader.vue | 1526 + .../ops/components/OpsDashboardSkeleton.vue | 99 + .../components/OpsEmailNotificationCard.vue | 441 + .../ops/components/OpsErrorDetailModal.vue | 309 + .../ops/components/OpsErrorDetailsModal.vue | 270 + .../components/OpsErrorDistributionChart.vue | 157 + .../admin/ops/components/OpsErrorLogTable.vue | 266 + .../ops/components/OpsErrorTrendChart.vue | 200 + .../admin/ops/components/OpsLatencyChart.vue | 101 + .../ops/components/OpsRequestDetailsModal.vue | 284 + .../ops/components/OpsRuntimeSettingsCard.vue | 536 + .../ops/components/OpsSettingsDialog.vue | 549 + .../components/OpsThroughputTrendChart.vue | 255 + frontend/src/views/admin/ops/types.ts | 21 + .../views/admin/ops/utils/opsFormatters.ts | 75 + frontend/src/views/auth/EmailVerifyView.vue | 435 + .../src/views/auth/LinuxDoCallbackView.vue | 119 + frontend/src/views/auth/LoginView.vue | 342 + frontend/src/views/auth/OAuthCallbackView.vue | 89 + frontend/src/views/auth/README.md | 360 + frontend/src/views/auth/RegisterView.vue | 554 + frontend/src/views/auth/USAGE_EXAMPLES.md | 610 + frontend/src/views/auth/VISUAL_GUIDE.md | 642 + frontend/src/views/auth/index.ts | 7 + frontend/src/views/setup/SetupWizardView.vue | 646 + frontend/src/views/user/DashboardView.vue | 36 + frontend/src/views/user/KeysView.vue | 997 + frontend/src/views/user/ProfileView.vue | 41 + frontend/src/views/user/RedeemView.vue | 493 + frontend/src/views/user/SubscriptionsView.vue | 342 + frontend/src/views/user/UsageView.vue | 815 + frontend/src/vite-env.d.ts | 16 + frontend/tailwind.config.js | 131 + frontend/tsconfig.json | 25 + frontend/tsconfig.node.json | 10 + frontend/vite.config.ts | 77 + tools/check_pnpm_audit_exceptions.py | 247 + 820 files changed, 320002 insertions(+) create mode 100644 .dockerignore create mode 100644 .github/audit-exceptions.yml create mode 100644 .github/workflows/backend-ci.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/security-scan.yml create mode 100644 .gitignore create mode 100644 .goreleaser.simple.yaml create mode 100644 .goreleaser.yaml create mode 100644 Dockerfile create mode 100644 Dockerfile.goreleaser create mode 100644 LICENSE create mode 100644 Linux DO Connect.md create mode 100644 Makefile create mode 100644 PR_DESCRIPTION.md create mode 100644 README.md create mode 100644 README_CN.md create mode 100644 backend/.dockerignore create mode 100644 backend/.golangci.yml create mode 100644 backend/Dockerfile create mode 100644 backend/Makefile create mode 100644 backend/cmd/jwtgen/main.go create mode 100644 backend/cmd/server/VERSION create mode 100644 backend/cmd/server/main.go create mode 100644 backend/cmd/server/wire.go create mode 100644 backend/cmd/server/wire_gen.go create mode 100644 backend/ent/account.go create mode 100644 backend/ent/account/account.go create mode 100644 backend/ent/account/where.go create mode 100644 backend/ent/account_create.go create mode 100644 backend/ent/account_delete.go create mode 100644 backend/ent/account_query.go create mode 100644 backend/ent/account_update.go create mode 100644 backend/ent/accountgroup.go create mode 100644 backend/ent/accountgroup/accountgroup.go create mode 100644 backend/ent/accountgroup/where.go create mode 100644 backend/ent/accountgroup_create.go create mode 100644 backend/ent/accountgroup_delete.go create mode 100644 backend/ent/accountgroup_query.go create mode 100644 backend/ent/accountgroup_update.go create mode 100644 backend/ent/apikey.go create mode 100644 backend/ent/apikey/apikey.go create mode 100644 backend/ent/apikey/where.go create mode 100644 backend/ent/apikey_create.go create mode 100644 backend/ent/apikey_delete.go create mode 100644 backend/ent/apikey_query.go create mode 100644 backend/ent/apikey_update.go create mode 100644 backend/ent/client.go create mode 100644 backend/ent/driver_access.go create mode 100644 backend/ent/ent.go create mode 100644 backend/ent/enttest/enttest.go create mode 100644 backend/ent/generate.go create mode 100644 backend/ent/group.go create mode 100644 backend/ent/group/group.go create mode 100644 backend/ent/group/where.go create mode 100644 backend/ent/group_create.go create mode 100644 backend/ent/group_delete.go create mode 100644 backend/ent/group_query.go create mode 100644 backend/ent/group_update.go create mode 100644 backend/ent/hook/hook.go create mode 100644 backend/ent/intercept/intercept.go create mode 100644 backend/ent/migrate/migrate.go create mode 100644 backend/ent/migrate/schema.go create mode 100644 backend/ent/mutation.go create mode 100644 backend/ent/predicate/predicate.go create mode 100644 backend/ent/promocode.go create mode 100644 backend/ent/promocode/promocode.go create mode 100644 backend/ent/promocode/where.go create mode 100644 backend/ent/promocode_create.go create mode 100644 backend/ent/promocode_delete.go create mode 100644 backend/ent/promocode_query.go create mode 100644 backend/ent/promocode_update.go create mode 100644 backend/ent/promocodeusage.go create mode 100644 backend/ent/promocodeusage/promocodeusage.go create mode 100644 backend/ent/promocodeusage/where.go create mode 100644 backend/ent/promocodeusage_create.go create mode 100644 backend/ent/promocodeusage_delete.go create mode 100644 backend/ent/promocodeusage_query.go create mode 100644 backend/ent/promocodeusage_update.go create mode 100644 backend/ent/proxy.go create mode 100644 backend/ent/proxy/proxy.go create mode 100644 backend/ent/proxy/where.go create mode 100644 backend/ent/proxy_create.go create mode 100644 backend/ent/proxy_delete.go create mode 100644 backend/ent/proxy_query.go create mode 100644 backend/ent/proxy_update.go create mode 100644 backend/ent/redeemcode.go create mode 100644 backend/ent/redeemcode/redeemcode.go create mode 100644 backend/ent/redeemcode/where.go create mode 100644 backend/ent/redeemcode_create.go create mode 100644 backend/ent/redeemcode_delete.go create mode 100644 backend/ent/redeemcode_query.go create mode 100644 backend/ent/redeemcode_update.go create mode 100644 backend/ent/runtime.go create mode 100644 backend/ent/runtime/runtime.go create mode 100644 backend/ent/schema/account.go create mode 100644 backend/ent/schema/account_group.go create mode 100644 backend/ent/schema/api_key.go create mode 100644 backend/ent/schema/group.go create mode 100644 backend/ent/schema/mixins/soft_delete.go create mode 100644 backend/ent/schema/mixins/time.go create mode 100644 backend/ent/schema/promo_code.go create mode 100644 backend/ent/schema/promo_code_usage.go create mode 100644 backend/ent/schema/proxy.go create mode 100644 backend/ent/schema/redeem_code.go create mode 100644 backend/ent/schema/setting.go create mode 100644 backend/ent/schema/usage_log.go create mode 100644 backend/ent/schema/user.go create mode 100644 backend/ent/schema/user_allowed_group.go create mode 100644 backend/ent/schema/user_attribute_definition.go create mode 100644 backend/ent/schema/user_attribute_value.go create mode 100644 backend/ent/schema/user_subscription.go create mode 100644 backend/ent/setting.go create mode 100644 backend/ent/setting/setting.go create mode 100644 backend/ent/setting/where.go create mode 100644 backend/ent/setting_create.go create mode 100644 backend/ent/setting_delete.go create mode 100644 backend/ent/setting_query.go create mode 100644 backend/ent/setting_update.go create mode 100644 backend/ent/tx.go create mode 100644 backend/ent/usagelog.go create mode 100644 backend/ent/usagelog/usagelog.go create mode 100644 backend/ent/usagelog/where.go create mode 100644 backend/ent/usagelog_create.go create mode 100644 backend/ent/usagelog_delete.go create mode 100644 backend/ent/usagelog_query.go create mode 100644 backend/ent/usagelog_update.go create mode 100644 backend/ent/user.go create mode 100644 backend/ent/user/user.go create mode 100644 backend/ent/user/where.go create mode 100644 backend/ent/user_create.go create mode 100644 backend/ent/user_delete.go create mode 100644 backend/ent/user_query.go create mode 100644 backend/ent/user_update.go create mode 100644 backend/ent/userallowedgroup.go create mode 100644 backend/ent/userallowedgroup/userallowedgroup.go create mode 100644 backend/ent/userallowedgroup/where.go create mode 100644 backend/ent/userallowedgroup_create.go create mode 100644 backend/ent/userallowedgroup_delete.go create mode 100644 backend/ent/userallowedgroup_query.go create mode 100644 backend/ent/userallowedgroup_update.go create mode 100644 backend/ent/userattributedefinition.go create mode 100644 backend/ent/userattributedefinition/userattributedefinition.go create mode 100644 backend/ent/userattributedefinition/where.go create mode 100644 backend/ent/userattributedefinition_create.go create mode 100644 backend/ent/userattributedefinition_delete.go create mode 100644 backend/ent/userattributedefinition_query.go create mode 100644 backend/ent/userattributedefinition_update.go create mode 100644 backend/ent/userattributevalue.go create mode 100644 backend/ent/userattributevalue/userattributevalue.go create mode 100644 backend/ent/userattributevalue/where.go create mode 100644 backend/ent/userattributevalue_create.go create mode 100644 backend/ent/userattributevalue_delete.go create mode 100644 backend/ent/userattributevalue_query.go create mode 100644 backend/ent/userattributevalue_update.go create mode 100644 backend/ent/usersubscription.go create mode 100644 backend/ent/usersubscription/usersubscription.go create mode 100644 backend/ent/usersubscription/where.go create mode 100644 backend/ent/usersubscription_create.go create mode 100644 backend/ent/usersubscription_delete.go create mode 100644 backend/ent/usersubscription_query.go create mode 100644 backend/ent/usersubscription_update.go create mode 100644 backend/go.mod create mode 100644 backend/go.sum create mode 100644 backend/internal/config/config.go create mode 100644 backend/internal/config/config_test.go create mode 100644 backend/internal/config/wire.go create mode 100644 backend/internal/handler/admin/account_handler.go create mode 100644 backend/internal/handler/admin/antigravity_oauth_handler.go create mode 100644 backend/internal/handler/admin/dashboard_handler.go create mode 100644 backend/internal/handler/admin/gemini_oauth_handler.go create mode 100644 backend/internal/handler/admin/group_handler.go create mode 100644 backend/internal/handler/admin/openai_oauth_handler.go create mode 100644 backend/internal/handler/admin/ops_alerts_handler.go create mode 100644 backend/internal/handler/admin/ops_dashboard_handler.go create mode 100644 backend/internal/handler/admin/ops_handler.go create mode 100644 backend/internal/handler/admin/ops_realtime_handler.go create mode 100644 backend/internal/handler/admin/ops_settings_handler.go create mode 100644 backend/internal/handler/admin/ops_ws_handler.go create mode 100644 backend/internal/handler/admin/promo_handler.go create mode 100644 backend/internal/handler/admin/proxy_handler.go create mode 100644 backend/internal/handler/admin/redeem_handler.go create mode 100644 backend/internal/handler/admin/setting_handler.go create mode 100644 backend/internal/handler/admin/subscription_handler.go create mode 100644 backend/internal/handler/admin/system_handler.go create mode 100644 backend/internal/handler/admin/usage_handler.go create mode 100644 backend/internal/handler/admin/user_attribute_handler.go create mode 100644 backend/internal/handler/admin/user_handler.go create mode 100644 backend/internal/handler/api_key_handler.go create mode 100644 backend/internal/handler/auth_handler.go create mode 100644 backend/internal/handler/auth_linuxdo_oauth.go create mode 100644 backend/internal/handler/auth_linuxdo_oauth_test.go create mode 100644 backend/internal/handler/dto/mappers.go create mode 100644 backend/internal/handler/dto/settings.go create mode 100644 backend/internal/handler/dto/types.go create mode 100644 backend/internal/handler/gateway_handler.go create mode 100644 backend/internal/handler/gateway_helper.go create mode 100644 backend/internal/handler/gateway_helper_test.go create mode 100644 backend/internal/handler/gemini_v1beta_handler.go create mode 100644 backend/internal/handler/gemini_v1beta_handler_test.go create mode 100644 backend/internal/handler/handler.go create mode 100644 backend/internal/handler/openai_gateway_handler.go create mode 100644 backend/internal/handler/ops_error_logger.go create mode 100644 backend/internal/handler/redeem_handler.go create mode 100644 backend/internal/handler/request_body_limit.go create mode 100644 backend/internal/handler/request_body_limit_test.go create mode 100644 backend/internal/handler/setting_handler.go create mode 100644 backend/internal/handler/subscription_handler.go create mode 100644 backend/internal/handler/usage_handler.go create mode 100644 backend/internal/handler/user_handler.go create mode 100644 backend/internal/handler/wire.go create mode 100644 backend/internal/integration/e2e_gateway_test.go create mode 100644 backend/internal/middleware/rate_limiter.go create mode 100644 backend/internal/middleware/rate_limiter_integration_test.go create mode 100644 backend/internal/middleware/rate_limiter_test.go create mode 100644 backend/internal/pkg/antigravity/claude_types.go create mode 100644 backend/internal/pkg/antigravity/client.go create mode 100644 backend/internal/pkg/antigravity/gemini_types.go create mode 100644 backend/internal/pkg/antigravity/oauth.go create mode 100644 backend/internal/pkg/antigravity/request_transformer.go create mode 100644 backend/internal/pkg/antigravity/request_transformer_test.go create mode 100644 backend/internal/pkg/antigravity/response_transformer.go create mode 100644 backend/internal/pkg/antigravity/stream_transformer.go create mode 100644 backend/internal/pkg/claude/constants.go create mode 100644 backend/internal/pkg/ctxkey/ctxkey.go create mode 100644 backend/internal/pkg/errors/errors.go create mode 100644 backend/internal/pkg/errors/errors_test.go create mode 100644 backend/internal/pkg/errors/http.go create mode 100644 backend/internal/pkg/errors/types.go create mode 100644 backend/internal/pkg/gemini/models.go create mode 100644 backend/internal/pkg/geminicli/codeassist_types.go create mode 100644 backend/internal/pkg/geminicli/constants.go create mode 100644 backend/internal/pkg/geminicli/drive_client.go create mode 100644 backend/internal/pkg/geminicli/drive_client_test.go create mode 100644 backend/internal/pkg/geminicli/models.go create mode 100644 backend/internal/pkg/geminicli/oauth.go create mode 100644 backend/internal/pkg/geminicli/oauth_test.go create mode 100644 backend/internal/pkg/geminicli/sanitize.go create mode 100644 backend/internal/pkg/geminicli/token_types.go create mode 100644 backend/internal/pkg/googleapi/status.go create mode 100644 backend/internal/pkg/httpclient/pool.go create mode 100644 backend/internal/pkg/ip/ip.go create mode 100644 backend/internal/pkg/oauth/oauth.go create mode 100644 backend/internal/pkg/openai/constants.go create mode 100644 backend/internal/pkg/openai/instructions.txt create mode 100644 backend/internal/pkg/openai/oauth.go create mode 100644 backend/internal/pkg/openai/request.go create mode 100644 backend/internal/pkg/pagination/pagination.go create mode 100644 backend/internal/pkg/proxyutil/dialer.go create mode 100644 backend/internal/pkg/proxyutil/dialer_test.go create mode 100644 backend/internal/pkg/response/response.go create mode 100644 backend/internal/pkg/response/response_test.go create mode 100644 backend/internal/pkg/sysutil/restart.go create mode 100644 backend/internal/pkg/timezone/timezone.go create mode 100644 backend/internal/pkg/timezone/timezone_test.go create mode 100644 backend/internal/pkg/usagestats/account_stats.go create mode 100644 backend/internal/pkg/usagestats/usage_log_types.go create mode 100644 backend/internal/repository/account_repo.go create mode 100644 backend/internal/repository/account_repo_integration_test.go create mode 100644 backend/internal/repository/allowed_groups_contract_integration_test.go create mode 100644 backend/internal/repository/api_key_cache.go create mode 100644 backend/internal/repository/api_key_cache_integration_test.go create mode 100644 backend/internal/repository/api_key_cache_test.go create mode 100644 backend/internal/repository/api_key_repo.go create mode 100644 backend/internal/repository/api_key_repo_integration_test.go create mode 100644 backend/internal/repository/billing_cache.go create mode 100644 backend/internal/repository/billing_cache_integration_test.go create mode 100644 backend/internal/repository/billing_cache_test.go create mode 100644 backend/internal/repository/claude_oauth_service.go create mode 100644 backend/internal/repository/claude_oauth_service_test.go create mode 100644 backend/internal/repository/claude_usage_service.go create mode 100644 backend/internal/repository/claude_usage_service_test.go create mode 100644 backend/internal/repository/concurrency_cache.go create mode 100644 backend/internal/repository/concurrency_cache_benchmark_test.go create mode 100644 backend/internal/repository/concurrency_cache_integration_test.go create mode 100644 backend/internal/repository/dashboard_aggregation_repo.go create mode 100644 backend/internal/repository/dashboard_cache.go create mode 100644 backend/internal/repository/dashboard_cache_test.go create mode 100644 backend/internal/repository/db_pool.go create mode 100644 backend/internal/repository/db_pool_test.go create mode 100644 backend/internal/repository/email_cache.go create mode 100644 backend/internal/repository/email_cache_integration_test.go create mode 100644 backend/internal/repository/email_cache_test.go create mode 100644 backend/internal/repository/ent.go create mode 100644 backend/internal/repository/error_translate.go create mode 100644 backend/internal/repository/fixtures_integration_test.go create mode 100644 backend/internal/repository/gateway_cache.go create mode 100644 backend/internal/repository/gateway_cache_integration_test.go create mode 100644 backend/internal/repository/gateway_routing_integration_test.go create mode 100644 backend/internal/repository/gemini_oauth_client.go create mode 100644 backend/internal/repository/gemini_token_cache.go create mode 100644 backend/internal/repository/gemini_token_cache_integration_test.go create mode 100644 backend/internal/repository/gemini_token_cache_test.go create mode 100644 backend/internal/repository/geminicli_codeassist_client.go create mode 100644 backend/internal/repository/github_release_service.go create mode 100644 backend/internal/repository/github_release_service_test.go create mode 100644 backend/internal/repository/group_repo.go create mode 100644 backend/internal/repository/group_repo_integration_test.go create mode 100644 backend/internal/repository/http_upstream.go create mode 100644 backend/internal/repository/http_upstream_benchmark_test.go create mode 100644 backend/internal/repository/http_upstream_test.go create mode 100644 backend/internal/repository/identity_cache.go create mode 100644 backend/internal/repository/identity_cache_integration_test.go create mode 100644 backend/internal/repository/identity_cache_test.go create mode 100644 backend/internal/repository/inprocess_transport_test.go create mode 100644 backend/internal/repository/integration_harness_test.go create mode 100644 backend/internal/repository/migrations_runner.go create mode 100644 backend/internal/repository/migrations_schema_integration_test.go create mode 100644 backend/internal/repository/openai_oauth_service.go create mode 100644 backend/internal/repository/openai_oauth_service_test.go create mode 100644 backend/internal/repository/ops_repo.go create mode 100644 backend/internal/repository/ops_repo_alerts.go create mode 100644 backend/internal/repository/ops_repo_dashboard.go create mode 100644 backend/internal/repository/ops_repo_histograms.go create mode 100644 backend/internal/repository/ops_repo_latency_histogram_buckets.go create mode 100644 backend/internal/repository/ops_repo_latency_histogram_buckets_test.go create mode 100644 backend/internal/repository/ops_repo_metrics.go create mode 100644 backend/internal/repository/ops_repo_preagg.go create mode 100644 backend/internal/repository/ops_repo_realtime_traffic.go create mode 100644 backend/internal/repository/ops_repo_request_details.go create mode 100644 backend/internal/repository/ops_repo_trends.go create mode 100644 backend/internal/repository/ops_repo_window_stats.go create mode 100644 backend/internal/repository/pagination.go create mode 100644 backend/internal/repository/pricing_service.go create mode 100644 backend/internal/repository/pricing_service_test.go create mode 100644 backend/internal/repository/promo_code_repo.go create mode 100644 backend/internal/repository/proxy_latency_cache.go create mode 100644 backend/internal/repository/proxy_probe_service.go create mode 100644 backend/internal/repository/proxy_probe_service_test.go create mode 100644 backend/internal/repository/proxy_repo.go create mode 100644 backend/internal/repository/proxy_repo_integration_test.go create mode 100644 backend/internal/repository/redeem_cache.go create mode 100644 backend/internal/repository/redeem_cache_integration_test.go create mode 100644 backend/internal/repository/redeem_cache_test.go create mode 100644 backend/internal/repository/redeem_code_repo.go create mode 100644 backend/internal/repository/redeem_code_repo_integration_test.go create mode 100644 backend/internal/repository/redis.go create mode 100644 backend/internal/repository/redis_test.go create mode 100644 backend/internal/repository/req_client_pool.go create mode 100644 backend/internal/repository/scheduler_cache.go create mode 100644 backend/internal/repository/scheduler_outbox_repo.go create mode 100644 backend/internal/repository/scheduler_snapshot_outbox_integration_test.go create mode 100644 backend/internal/repository/setting_repo.go create mode 100644 backend/internal/repository/setting_repo_integration_test.go create mode 100644 backend/internal/repository/soft_delete_ent_integration_test.go create mode 100644 backend/internal/repository/sql_scan.go create mode 100644 backend/internal/repository/temp_unsched_cache.go create mode 100644 backend/internal/repository/timeout_counter_cache.go create mode 100644 backend/internal/repository/turnstile_service.go create mode 100644 backend/internal/repository/turnstile_service_test.go create mode 100644 backend/internal/repository/update_cache.go create mode 100644 backend/internal/repository/update_cache_integration_test.go create mode 100644 backend/internal/repository/usage_log_repo.go create mode 100644 backend/internal/repository/usage_log_repo_integration_test.go create mode 100644 backend/internal/repository/user_attribute_repo.go create mode 100644 backend/internal/repository/user_repo.go create mode 100644 backend/internal/repository/user_repo_integration_test.go create mode 100644 backend/internal/repository/user_subscription_repo.go create mode 100644 backend/internal/repository/user_subscription_repo_integration_test.go create mode 100644 backend/internal/repository/wire.go create mode 100644 backend/internal/server/api_contract_test.go create mode 100644 backend/internal/server/http.go create mode 100644 backend/internal/server/middleware/admin_auth.go create mode 100644 backend/internal/server/middleware/admin_only.go create mode 100644 backend/internal/server/middleware/api_key_auth.go create mode 100644 backend/internal/server/middleware/api_key_auth_google.go create mode 100644 backend/internal/server/middleware/api_key_auth_google_test.go create mode 100644 backend/internal/server/middleware/api_key_auth_test.go create mode 100644 backend/internal/server/middleware/auth_subject.go create mode 100644 backend/internal/server/middleware/client_request_id.go create mode 100644 backend/internal/server/middleware/cors.go create mode 100644 backend/internal/server/middleware/jwt_auth.go create mode 100644 backend/internal/server/middleware/logger.go create mode 100644 backend/internal/server/middleware/middleware.go create mode 100644 backend/internal/server/middleware/recovery.go create mode 100644 backend/internal/server/middleware/recovery_test.go create mode 100644 backend/internal/server/middleware/request_body_limit.go create mode 100644 backend/internal/server/middleware/security_headers.go create mode 100644 backend/internal/server/middleware/wire.go create mode 100644 backend/internal/server/router.go create mode 100644 backend/internal/server/routes/admin.go create mode 100644 backend/internal/server/routes/auth.go create mode 100644 backend/internal/server/routes/common.go create mode 100644 backend/internal/server/routes/gateway.go create mode 100644 backend/internal/server/routes/user.go create mode 100644 backend/internal/service/account.go create mode 100644 backend/internal/service/account_billing_rate_multiplier_test.go create mode 100644 backend/internal/service/account_expiry_service.go create mode 100644 backend/internal/service/account_group.go create mode 100644 backend/internal/service/account_service.go create mode 100644 backend/internal/service/account_service_delete_test.go create mode 100644 backend/internal/service/account_test_service.go create mode 100644 backend/internal/service/account_usage_service.go create mode 100644 backend/internal/service/admin_service.go create mode 100644 backend/internal/service/admin_service_bulk_update_test.go create mode 100644 backend/internal/service/admin_service_create_user_test.go create mode 100644 backend/internal/service/admin_service_delete_test.go create mode 100644 backend/internal/service/admin_service_group_test.go create mode 100644 backend/internal/service/admin_service_search_test.go create mode 100644 backend/internal/service/admin_service_update_balance_test.go create mode 100644 backend/internal/service/antigravity_gateway_service.go create mode 100644 backend/internal/service/antigravity_gateway_service_test.go create mode 100644 backend/internal/service/antigravity_image_test.go create mode 100644 backend/internal/service/antigravity_model_mapping_test.go create mode 100644 backend/internal/service/antigravity_oauth_service.go create mode 100644 backend/internal/service/antigravity_quota_fetcher.go create mode 100644 backend/internal/service/antigravity_quota_scope.go create mode 100644 backend/internal/service/antigravity_token_provider.go create mode 100644 backend/internal/service/antigravity_token_refresher.go create mode 100644 backend/internal/service/api_key.go create mode 100644 backend/internal/service/api_key_auth_cache.go create mode 100644 backend/internal/service/api_key_auth_cache_impl.go create mode 100644 backend/internal/service/api_key_auth_cache_invalidate.go create mode 100644 backend/internal/service/api_key_service.go create mode 100644 backend/internal/service/api_key_service_cache_test.go create mode 100644 backend/internal/service/api_key_service_delete_test.go create mode 100644 backend/internal/service/auth_cache_invalidation_test.go create mode 100644 backend/internal/service/auth_service.go create mode 100644 backend/internal/service/auth_service_register_test.go create mode 100644 backend/internal/service/billing_cache_port.go create mode 100644 backend/internal/service/billing_cache_service.go create mode 100644 backend/internal/service/billing_cache_service_test.go create mode 100644 backend/internal/service/billing_service.go create mode 100644 backend/internal/service/billing_service_image_test.go create mode 100644 backend/internal/service/claude_code_validator.go create mode 100644 backend/internal/service/concurrency_service.go create mode 100644 backend/internal/service/crs_sync_service.go create mode 100644 backend/internal/service/dashboard_aggregation_service.go create mode 100644 backend/internal/service/dashboard_aggregation_service_test.go create mode 100644 backend/internal/service/dashboard_service.go create mode 100644 backend/internal/service/dashboard_service_test.go create mode 100644 backend/internal/service/deferred_service.go create mode 100644 backend/internal/service/domain_constants.go create mode 100644 backend/internal/service/email_queue_service.go create mode 100644 backend/internal/service/email_service.go create mode 100644 backend/internal/service/gateway_multiplatform_test.go create mode 100644 backend/internal/service/gateway_prompt_test.go create mode 100644 backend/internal/service/gateway_request.go create mode 100644 backend/internal/service/gateway_request_test.go create mode 100644 backend/internal/service/gateway_service.go create mode 100644 backend/internal/service/gateway_service_benchmark_test.go create mode 100644 backend/internal/service/gemini_messages_compat_service.go create mode 100644 backend/internal/service/gemini_messages_compat_service_test.go create mode 100644 backend/internal/service/gemini_multiplatform_test.go create mode 100644 backend/internal/service/gemini_oauth.go create mode 100644 backend/internal/service/gemini_oauth_service.go create mode 100644 backend/internal/service/gemini_oauth_service_test.go create mode 100644 backend/internal/service/gemini_quota.go create mode 100644 backend/internal/service/gemini_token_cache.go create mode 100644 backend/internal/service/gemini_token_provider.go create mode 100644 backend/internal/service/gemini_token_refresher.go create mode 100644 backend/internal/service/geminicli_codeassist.go create mode 100644 backend/internal/service/group.go create mode 100644 backend/internal/service/group_service.go create mode 100644 backend/internal/service/group_test.go create mode 100644 backend/internal/service/http_upstream_port.go create mode 100644 backend/internal/service/identity_service.go create mode 100644 backend/internal/service/oauth_service.go create mode 100644 backend/internal/service/openai_codex_transform.go create mode 100644 backend/internal/service/openai_codex_transform_test.go create mode 100644 backend/internal/service/openai_gateway_service.go create mode 100644 backend/internal/service/openai_gateway_service_test.go create mode 100644 backend/internal/service/openai_oauth_service.go create mode 100644 backend/internal/service/openai_tool_continuation.go create mode 100644 backend/internal/service/openai_tool_continuation_test.go create mode 100644 backend/internal/service/ops_account_availability.go create mode 100644 backend/internal/service/ops_advisory_lock.go create mode 100644 backend/internal/service/ops_aggregation_service.go create mode 100644 backend/internal/service/ops_alert_evaluator_service.go create mode 100644 backend/internal/service/ops_alert_evaluator_service_test.go create mode 100644 backend/internal/service/ops_alert_models.go create mode 100644 backend/internal/service/ops_alerts.go create mode 100644 backend/internal/service/ops_cleanup_service.go create mode 100644 backend/internal/service/ops_concurrency.go create mode 100644 backend/internal/service/ops_dashboard.go create mode 100644 backend/internal/service/ops_dashboard_models.go create mode 100644 backend/internal/service/ops_errors.go create mode 100644 backend/internal/service/ops_health_score.go create mode 100644 backend/internal/service/ops_health_score_test.go create mode 100644 backend/internal/service/ops_histograms.go create mode 100644 backend/internal/service/ops_metrics_collector.go create mode 100644 backend/internal/service/ops_models.go create mode 100644 backend/internal/service/ops_port.go create mode 100644 backend/internal/service/ops_query_mode.go create mode 100644 backend/internal/service/ops_realtime.go create mode 100644 backend/internal/service/ops_realtime_models.go create mode 100644 backend/internal/service/ops_realtime_traffic.go create mode 100644 backend/internal/service/ops_realtime_traffic_models.go create mode 100644 backend/internal/service/ops_request_details.go create mode 100644 backend/internal/service/ops_retry.go create mode 100644 backend/internal/service/ops_scheduled_report_service.go create mode 100644 backend/internal/service/ops_service.go create mode 100644 backend/internal/service/ops_settings.go create mode 100644 backend/internal/service/ops_settings_models.go create mode 100644 backend/internal/service/ops_trend_models.go create mode 100644 backend/internal/service/ops_trends.go create mode 100644 backend/internal/service/ops_upstream_context.go create mode 100644 backend/internal/service/ops_window_stats.go create mode 100644 backend/internal/service/pricing_service.go create mode 100644 backend/internal/service/promo_code.go create mode 100644 backend/internal/service/promo_code_repository.go create mode 100644 backend/internal/service/promo_service.go create mode 100644 backend/internal/service/prompts/codex_cli_instructions.md create mode 100644 backend/internal/service/prompts/codex_opencode_bridge.txt create mode 100644 backend/internal/service/prompts/tool_remap_message.txt create mode 100644 backend/internal/service/proxy.go create mode 100644 backend/internal/service/proxy_latency_cache.go create mode 100644 backend/internal/service/proxy_service.go create mode 100644 backend/internal/service/quota_fetcher.go create mode 100644 backend/internal/service/ratelimit_service.go create mode 100644 backend/internal/service/ratelimit_service_401_test.go create mode 100644 backend/internal/service/redeem_code.go create mode 100644 backend/internal/service/redeem_service.go create mode 100644 backend/internal/service/scheduler_cache.go create mode 100644 backend/internal/service/scheduler_events.go create mode 100644 backend/internal/service/scheduler_outbox.go create mode 100644 backend/internal/service/scheduler_snapshot_service.go create mode 100644 backend/internal/service/setting.go create mode 100644 backend/internal/service/setting_service.go create mode 100644 backend/internal/service/settings_view.go create mode 100644 backend/internal/service/subscription_service.go create mode 100644 backend/internal/service/temp_unsched.go create mode 100644 backend/internal/service/timing_wheel_service.go create mode 100644 backend/internal/service/token_cache_invalidator.go create mode 100644 backend/internal/service/token_cache_invalidator_test.go create mode 100644 backend/internal/service/token_cache_key_test.go create mode 100644 backend/internal/service/token_refresh_service.go create mode 100644 backend/internal/service/token_refresh_service_test.go create mode 100644 backend/internal/service/token_refresher.go create mode 100644 backend/internal/service/token_refresher_test.go create mode 100644 backend/internal/service/turnstile_service.go create mode 100644 backend/internal/service/update_service.go create mode 100644 backend/internal/service/usage_log.go create mode 100644 backend/internal/service/usage_service.go create mode 100644 backend/internal/service/user.go create mode 100644 backend/internal/service/user_attribute.go create mode 100644 backend/internal/service/user_attribute_service.go create mode 100644 backend/internal/service/user_service.go create mode 100644 backend/internal/service/user_subscription.go create mode 100644 backend/internal/service/user_subscription_port.go create mode 100644 backend/internal/service/wire.go create mode 100644 backend/internal/setup/cli.go create mode 100644 backend/internal/setup/handler.go create mode 100644 backend/internal/setup/setup.go create mode 100644 backend/internal/util/logredact/redact.go create mode 100644 backend/internal/util/responseheaders/responseheaders.go create mode 100644 backend/internal/util/responseheaders/responseheaders_test.go create mode 100644 backend/internal/util/urlvalidator/validator.go create mode 100644 backend/internal/util/urlvalidator/validator_test.go create mode 100644 backend/internal/web/embed_off.go create mode 100644 backend/internal/web/embed_on.go create mode 100644 backend/internal/web/html_cache.go create mode 100644 backend/migrations/001_init.sql create mode 100644 backend/migrations/002_account_type_migration.sql create mode 100644 backend/migrations/003_subscription.sql create mode 100644 backend/migrations/004_add_redeem_code_notes.sql create mode 100644 backend/migrations/005_schema_parity.sql create mode 100644 backend/migrations/006_fix_invalid_subscription_expires_at.sql create mode 100644 backend/migrations/007_add_user_allowed_groups.sql create mode 100644 backend/migrations/008_seed_default_group.sql create mode 100644 backend/migrations/009_fix_usage_logs_cache_columns.sql create mode 100644 backend/migrations/010_add_usage_logs_aggregated_indexes.sql create mode 100644 backend/migrations/011_remove_duplicate_unique_indexes.sql create mode 100644 backend/migrations/012_add_user_subscription_soft_delete.sql create mode 100644 backend/migrations/013_log_orphan_allowed_groups.sql create mode 100644 backend/migrations/014_drop_legacy_allowed_groups.sql create mode 100644 backend/migrations/015_fix_settings_unique_constraint.sql create mode 100644 backend/migrations/016_soft_delete_partial_unique_indexes.sql create mode 100644 backend/migrations/018_user_attributes.sql create mode 100644 backend/migrations/019_migrate_wechat_to_attributes.sql create mode 100644 backend/migrations/020_add_temp_unschedulable.sql create mode 100644 backend/migrations/024_add_gemini_tier_id.sql create mode 100644 backend/migrations/026_ops_metrics_aggregation_tables.sql create mode 100644 backend/migrations/027_usage_billing_consistency.sql create mode 100644 backend/migrations/028_add_account_notes.sql create mode 100644 backend/migrations/028_add_usage_logs_user_agent.sql create mode 100644 backend/migrations/028_group_image_pricing.sql create mode 100644 backend/migrations/029_add_group_claude_code_restriction.sql create mode 100644 backend/migrations/029_usage_log_image_fields.sql create mode 100644 backend/migrations/030_add_account_expires_at.sql create mode 100644 backend/migrations/031_add_ip_address.sql create mode 100644 backend/migrations/032_add_api_key_ip_restriction.sql create mode 100644 backend/migrations/033_add_promo_codes.sql create mode 100644 backend/migrations/033_ops_monitoring_vnext.sql create mode 100644 backend/migrations/034_ops_upstream_error_events.sql create mode 100644 backend/migrations/034_usage_dashboard_aggregation_tables.sql create mode 100644 backend/migrations/035_usage_logs_partitioning.sql create mode 100644 backend/migrations/036_ops_error_logs_add_is_count_tokens.sql create mode 100644 backend/migrations/036_scheduler_outbox.sql create mode 100644 backend/migrations/037_add_account_rate_multiplier.sql create mode 100644 backend/migrations/037_ops_alert_silences.sql create mode 100644 backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql create mode 100644 backend/migrations/README.md create mode 100644 backend/migrations/migrations.go create mode 100644 backend/resources/model-pricing/README.md create mode 100644 backend/resources/model-pricing/model_prices_and_context_window.json create mode 100644 backend/tools.go create mode 100644 build_image.sh create mode 100644 config.yaml create mode 100644 deploy/.env.example create mode 100644 deploy/Caddyfile create mode 100644 deploy/DOCKER.md create mode 100644 deploy/Makefile create mode 100644 deploy/README.md create mode 100644 deploy/config.example.yaml create mode 100644 deploy/docker-compose-test.yml create mode 100644 deploy/docker-compose.override.yml.example create mode 100644 deploy/docker-compose.standalone.yml create mode 100644 deploy/docker-compose.yml create mode 100644 deploy/flow.md create mode 100644 deploy/install.sh create mode 100644 deploy/sub2api.service create mode 100644 frontend/.eslintignore create mode 100644 frontend/.eslintrc.cjs create mode 100644 frontend/.npmrc create mode 100644 frontend/audit.json create mode 100644 frontend/index.html create mode 100644 frontend/package-lock.json create mode 100644 frontend/package.json create mode 100644 frontend/pnpm-lock.yaml create mode 100644 frontend/postcss.config.js create mode 100644 frontend/public/logo.png create mode 100644 frontend/src/App.vue create mode 100644 frontend/src/api/admin/accounts.ts create mode 100644 frontend/src/api/admin/antigravity.ts create mode 100644 frontend/src/api/admin/dashboard.ts create mode 100644 frontend/src/api/admin/gemini.ts create mode 100644 frontend/src/api/admin/groups.ts create mode 100644 frontend/src/api/admin/index.ts create mode 100644 frontend/src/api/admin/ops.ts create mode 100644 frontend/src/api/admin/promo.ts create mode 100644 frontend/src/api/admin/proxies.ts create mode 100644 frontend/src/api/admin/redeem.ts create mode 100644 frontend/src/api/admin/settings.ts create mode 100644 frontend/src/api/admin/subscriptions.ts create mode 100644 frontend/src/api/admin/system.ts create mode 100644 frontend/src/api/admin/usage.ts create mode 100644 frontend/src/api/admin/userAttributes.ts create mode 100644 frontend/src/api/admin/users.ts create mode 100644 frontend/src/api/auth.ts create mode 100644 frontend/src/api/client.ts create mode 100644 frontend/src/api/groups.ts create mode 100644 frontend/src/api/index.ts create mode 100644 frontend/src/api/keys.ts create mode 100644 frontend/src/api/redeem.ts create mode 100644 frontend/src/api/setup.ts create mode 100644 frontend/src/api/subscriptions.ts create mode 100644 frontend/src/api/usage.ts create mode 100644 frontend/src/api/user.ts create mode 100644 frontend/src/components/Guide/steps.ts create mode 100644 frontend/src/components/TurnstileWidget.vue create mode 100644 frontend/src/components/account/AccountGroupsCell.vue create mode 100644 frontend/src/components/account/AccountQuotaInfo.vue create mode 100644 frontend/src/components/account/AccountStatsModal.vue create mode 100644 frontend/src/components/account/AccountStatusIndicator.vue create mode 100644 frontend/src/components/account/AccountTestModal.vue create mode 100644 frontend/src/components/account/AccountTodayStatsCell.vue create mode 100644 frontend/src/components/account/AccountUsageCell.vue create mode 100644 frontend/src/components/account/BulkEditAccountModal.vue create mode 100644 frontend/src/components/account/CreateAccountModal.vue create mode 100644 frontend/src/components/account/EditAccountModal.vue create mode 100644 frontend/src/components/account/ModelWhitelistSelector.vue create mode 100644 frontend/src/components/account/OAuthAuthorizationFlow.vue create mode 100644 frontend/src/components/account/ReAuthAccountModal.vue create mode 100644 frontend/src/components/account/SyncFromCrsModal.vue create mode 100644 frontend/src/components/account/TempUnschedStatusModal.vue create mode 100644 frontend/src/components/account/UsageProgressBar.vue create mode 100644 frontend/src/components/account/index.ts create mode 100644 frontend/src/components/admin/account/AccountActionMenu.vue create mode 100644 frontend/src/components/admin/account/AccountBulkActionsBar.vue create mode 100644 frontend/src/components/admin/account/AccountStatsModal.vue create mode 100644 frontend/src/components/admin/account/AccountTableActions.vue create mode 100644 frontend/src/components/admin/account/AccountTableFilters.vue create mode 100644 frontend/src/components/admin/account/AccountTestModal.vue create mode 100644 frontend/src/components/admin/account/ReAuthAccountModal.vue create mode 100644 frontend/src/components/admin/usage/UsageExportProgress.vue create mode 100644 frontend/src/components/admin/usage/UsageFilters.vue create mode 100644 frontend/src/components/admin/usage/UsageStatsCards.vue create mode 100644 frontend/src/components/admin/usage/UsageTable.vue create mode 100644 frontend/src/components/admin/user/UserAllowedGroupsModal.vue create mode 100644 frontend/src/components/admin/user/UserApiKeysModal.vue create mode 100644 frontend/src/components/admin/user/UserBalanceModal.vue create mode 100644 frontend/src/components/admin/user/UserCreateModal.vue create mode 100644 frontend/src/components/admin/user/UserEditModal.vue create mode 100644 frontend/src/components/auth/LinuxDoOAuthSection.vue create mode 100644 frontend/src/components/charts/ModelDistributionChart.vue create mode 100644 frontend/src/components/charts/TokenUsageTrend.vue create mode 100644 frontend/src/components/common/BaseDialog.vue create mode 100644 frontend/src/components/common/ConfirmDialog.vue create mode 100644 frontend/src/components/common/DataTable.vue create mode 100644 frontend/src/components/common/DateRangePicker.vue create mode 100644 frontend/src/components/common/EmptyState.vue create mode 100644 frontend/src/components/common/ExportProgressDialog.vue create mode 100644 frontend/src/components/common/GroupBadge.vue create mode 100644 frontend/src/components/common/GroupOptionItem.vue create mode 100644 frontend/src/components/common/GroupSelector.vue create mode 100644 frontend/src/components/common/HelpTooltip.vue create mode 100644 frontend/src/components/common/Input.vue create mode 100644 frontend/src/components/common/LoadingSpinner.vue create mode 100644 frontend/src/components/common/LocaleSwitcher.vue create mode 100644 frontend/src/components/common/ModelIcon.vue create mode 100644 frontend/src/components/common/Pagination.vue create mode 100644 frontend/src/components/common/PlatformIcon.vue create mode 100644 frontend/src/components/common/PlatformTypeBadge.vue create mode 100644 frontend/src/components/common/ProxySelector.vue create mode 100644 frontend/src/components/common/README.md create mode 100644 frontend/src/components/common/SearchInput.vue create mode 100644 frontend/src/components/common/Select.vue create mode 100644 frontend/src/components/common/Skeleton.vue create mode 100644 frontend/src/components/common/StatCard.vue create mode 100644 frontend/src/components/common/StatusBadge.vue create mode 100644 frontend/src/components/common/SubscriptionProgressMini.vue create mode 100644 frontend/src/components/common/TextArea.vue create mode 100644 frontend/src/components/common/Toast.vue create mode 100644 frontend/src/components/common/Toggle.vue create mode 100644 frontend/src/components/common/VersionBadge.vue create mode 100644 frontend/src/components/common/index.ts create mode 100644 frontend/src/components/common/types.ts create mode 100644 frontend/src/components/icons/Icon.vue create mode 100644 frontend/src/components/icons/index.ts create mode 100644 frontend/src/components/keys/UseKeyModal.vue create mode 100644 frontend/src/components/layout/AppHeader.vue create mode 100644 frontend/src/components/layout/AppLayout.vue create mode 100644 frontend/src/components/layout/AppSidebar.vue create mode 100644 frontend/src/components/layout/AuthLayout.vue create mode 100644 frontend/src/components/layout/EXAMPLES.md create mode 100644 frontend/src/components/layout/INTEGRATION.md create mode 100644 frontend/src/components/layout/README.md create mode 100644 frontend/src/components/layout/TablePageLayout.vue create mode 100644 frontend/src/components/layout/index.ts create mode 100644 frontend/src/components/user/UserAttributeForm.vue create mode 100644 frontend/src/components/user/UserAttributesConfigModal.vue create mode 100644 frontend/src/components/user/dashboard/UserDashboardCharts.vue create mode 100644 frontend/src/components/user/dashboard/UserDashboardQuickActions.vue create mode 100644 frontend/src/components/user/dashboard/UserDashboardRecentUsage.vue create mode 100644 frontend/src/components/user/dashboard/UserDashboardStats.vue create mode 100644 frontend/src/components/user/profile/ProfileEditForm.vue create mode 100644 frontend/src/components/user/profile/ProfileInfoCard.vue create mode 100644 frontend/src/components/user/profile/ProfilePasswordForm.vue create mode 100644 frontend/src/composables/useAccountOAuth.ts create mode 100644 frontend/src/composables/useAntigravityOAuth.ts create mode 100644 frontend/src/composables/useClipboard.ts create mode 100644 frontend/src/composables/useForm.ts create mode 100644 frontend/src/composables/useGeminiOAuth.ts create mode 100644 frontend/src/composables/useModelWhitelist.ts create mode 100644 frontend/src/composables/useOnboardingTour.ts create mode 100644 frontend/src/composables/useOpenAIOAuth.ts create mode 100644 frontend/src/composables/useTableLoader.ts create mode 100644 frontend/src/i18n/index.ts create mode 100644 frontend/src/i18n/locales/en.ts create mode 100644 frontend/src/i18n/locales/zh.ts create mode 100644 frontend/src/main.ts create mode 100644 frontend/src/router/README.md create mode 100644 frontend/src/router/index.ts create mode 100644 frontend/src/router/meta.d.ts create mode 100644 frontend/src/stores/README.md create mode 100644 frontend/src/stores/adminSettings.ts create mode 100644 frontend/src/stores/app.ts create mode 100644 frontend/src/stores/auth.ts create mode 100644 frontend/src/stores/index.ts create mode 100644 frontend/src/stores/onboarding.ts create mode 100644 frontend/src/stores/subscriptions.ts create mode 100644 frontend/src/style.css create mode 100644 frontend/src/styles/onboarding.css create mode 100644 frontend/src/types/global.d.ts create mode 100644 frontend/src/types/index.ts create mode 100644 frontend/src/utils/format.ts create mode 100644 frontend/src/utils/url.ts create mode 100644 frontend/src/views/HomeView.vue create mode 100644 frontend/src/views/NotFoundView.vue create mode 100644 frontend/src/views/admin/AccountsView.vue create mode 100644 frontend/src/views/admin/DashboardView.vue create mode 100644 frontend/src/views/admin/GroupsView.vue create mode 100644 frontend/src/views/admin/PromoCodesView.vue create mode 100644 frontend/src/views/admin/ProxiesView.vue create mode 100644 frontend/src/views/admin/RedeemView.vue create mode 100644 frontend/src/views/admin/SettingsView.vue create mode 100644 frontend/src/views/admin/SubscriptionsView.vue create mode 100644 frontend/src/views/admin/UsageView.vue create mode 100644 frontend/src/views/admin/UsersView.vue create mode 100644 frontend/src/views/admin/ops/OpsDashboard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsAlertEventsCard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsAlertRulesCard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsDashboardHeader.vue create mode 100644 frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue create mode 100644 frontend/src/views/admin/ops/components/OpsEmailNotificationCard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsErrorDetailModal.vue create mode 100644 frontend/src/views/admin/ops/components/OpsErrorDetailsModal.vue create mode 100644 frontend/src/views/admin/ops/components/OpsErrorDistributionChart.vue create mode 100644 frontend/src/views/admin/ops/components/OpsErrorLogTable.vue create mode 100644 frontend/src/views/admin/ops/components/OpsErrorTrendChart.vue create mode 100644 frontend/src/views/admin/ops/components/OpsLatencyChart.vue create mode 100644 frontend/src/views/admin/ops/components/OpsRequestDetailsModal.vue create mode 100644 frontend/src/views/admin/ops/components/OpsRuntimeSettingsCard.vue create mode 100644 frontend/src/views/admin/ops/components/OpsSettingsDialog.vue create mode 100644 frontend/src/views/admin/ops/components/OpsThroughputTrendChart.vue create mode 100644 frontend/src/views/admin/ops/types.ts create mode 100644 frontend/src/views/admin/ops/utils/opsFormatters.ts create mode 100644 frontend/src/views/auth/EmailVerifyView.vue create mode 100644 frontend/src/views/auth/LinuxDoCallbackView.vue create mode 100644 frontend/src/views/auth/LoginView.vue create mode 100644 frontend/src/views/auth/OAuthCallbackView.vue create mode 100644 frontend/src/views/auth/README.md create mode 100644 frontend/src/views/auth/RegisterView.vue create mode 100644 frontend/src/views/auth/USAGE_EXAMPLES.md create mode 100644 frontend/src/views/auth/VISUAL_GUIDE.md create mode 100644 frontend/src/views/auth/index.ts create mode 100644 frontend/src/views/setup/SetupWizardView.vue create mode 100644 frontend/src/views/user/DashboardView.vue create mode 100644 frontend/src/views/user/KeysView.vue create mode 100644 frontend/src/views/user/ProfileView.vue create mode 100644 frontend/src/views/user/RedeemView.vue create mode 100644 frontend/src/views/user/SubscriptionsView.vue create mode 100644 frontend/src/views/user/UsageView.vue create mode 100644 frontend/src/vite-env.d.ts create mode 100644 frontend/tailwind.config.js create mode 100644 frontend/tsconfig.json create mode 100644 frontend/tsconfig.node.json create mode 100644 frontend/vite.config.ts create mode 100644 tools/check_pnpm_audit_exceptions.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..ab803d44 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,74 @@ +# ============================================================================= +# Docker Ignore File for Sub2API +# ============================================================================= + +# Git +.git +.gitignore +.gitattributes + +# Documentation +*.md +!deploy/DOCKER.md +docs/ + +# IDE +.idea/ +.vscode/ +*.swp +*.swo + +# OS files +.DS_Store +Thumbs.db + +# Build artifacts +dist/ +build/ + +# Node modules (will be installed in container) +frontend/node_modules/ +node_modules/ + +# Go build cache (will be built in container) +backend/vendor/ + +# Test files +*_test.go +**/*.test.js +coverage/ +.nyc_output/ + +# Environment files +.env +.env.* +!.env.example + +# Local config +config.yaml +config.local.yaml + +# Logs +*.log +logs/ + +# Temporary files +tmp/ +temp/ +*.tmp + +# Deploy files (not needed in image) +deploy/install.sh +deploy/sub2api.service +deploy/sub2api-sudoers + +# GoReleaser +.goreleaser.yaml + +# GitHub +.github/ + +# Claude files +.claude/ +issues/ +CLAUDE.md diff --git a/.github/audit-exceptions.yml b/.github/audit-exceptions.yml new file mode 100644 index 00000000..a1d8411c --- /dev/null +++ b/.github/audit-exceptions.yml @@ -0,0 +1,16 @@ +version: 1 +exceptions: + - package: xlsx + advisory: "GHSA-4r6h-8v6p-xvw6" + severity: high + reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2023-30533)" + mitigation: "Load only on export; restrict export permissions and data scope" + expires_on: "2026-04-05" + owner: "security@your-domain" + - package: xlsx + advisory: "GHSA-5pgg-2g8v-p4x9" + severity: high + reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2024-22363)" + mitigation: "Load only on export; restrict export permissions and data scope" + expires_on: "2026-04-05" + owner: "security@your-domain" diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml new file mode 100644 index 00000000..3ea8860a --- /dev/null +++ b/.github/workflows/backend-ci.yml @@ -0,0 +1,47 @@ +name: CI + +on: + push: + pull_request: + +permissions: + contents: read + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: backend/go.mod + check-latest: false + cache: true + - name: Verify Go version + run: | + go version | grep -q 'go1.25.5' + - name: Unit tests + working-directory: backend + run: make test-unit + - name: Integration tests + working-directory: backend + run: make test-integration + + golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: backend/go.mod + check-latest: false + cache: true + - name: Verify Go version + run: | + go version | grep -q 'go1.25.5' + - name: golangci-lint + uses: golangci/golangci-lint-action@v9 + with: + version: v2.7 + args: --timeout=5m + working-directory: backend diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..73ca35d9 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,272 @@ +name: Release + +on: + push: + tags: + - 'v*' + workflow_dispatch: + inputs: + tag: + description: 'Tag to release (e.g., v1.0.0)' + required: true + type: string + simple_release: + description: 'Simple release: only x86_64 GHCR image, skip other artifacts' + required: false + type: boolean + default: false + +# 环境变量:合并 workflow_dispatch 输入和 repository variable +# tag push 触发时读取 vars.SIMPLE_RELEASE,workflow_dispatch 时使用输入参数 +env: + SIMPLE_RELEASE: ${{ github.event.inputs.simple_release == 'true' || vars.SIMPLE_RELEASE == 'true' }} + +permissions: + contents: write + packages: write + +jobs: + # Update VERSION file with tag version + update-version: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Update VERSION file + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + VERSION=${{ github.event.inputs.tag }} + VERSION=${VERSION#v} + else + VERSION=${GITHUB_REF#refs/tags/v} + fi + echo "$VERSION" > backend/cmd/server/VERSION + echo "Updated VERSION file to: $VERSION" + + - name: Upload VERSION artifact + uses: actions/upload-artifact@v4 + with: + name: version-file + path: backend/cmd/server/VERSION + retention-days: 1 + + build-frontend: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 9 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'pnpm' + cache-dependency-path: frontend/pnpm-lock.yaml + + - name: Install dependencies + run: pnpm install --frozen-lockfile + working-directory: frontend + + - name: Build frontend + run: pnpm run build + working-directory: frontend + + - name: Upload frontend artifact + uses: actions/upload-artifact@v4 + with: + name: frontend-dist + path: backend/internal/web/dist/ + retention-days: 1 + + release: + needs: [update-version, build-frontend] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.tag || github.ref }} + + - name: Download VERSION artifact + uses: actions/download-artifact@v4 + with: + name: version-file + path: backend/cmd/server/ + + - name: Download frontend artifact + uses: actions/download-artifact@v4 + with: + name: frontend-dist + path: backend/internal/web/dist/ + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version-file: backend/go.mod + check-latest: false + cache-dependency-path: backend/go.sum + + - name: Verify Go version + run: | + go version | grep -q 'go1.25.5' + + # Docker setup for GoReleaser + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to DockerHub + if: ${{ env.DOCKERHUB_USERNAME != '' }} + uses: docker/login-action@v3 + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Fetch tags with annotations + run: | + # 确保获取完整的 annotated tag 信息 + git fetch --tags --force + + - name: Get tag message + id: tag_message + run: | + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + TAG_NAME=${{ github.event.inputs.tag }} + else + TAG_NAME=${GITHUB_REF#refs/tags/} + fi + echo "Processing tag: $TAG_NAME" + + # 获取完整的 tag message(跳过第一行标题) + TAG_MESSAGE=$(git tag -l --format='%(contents:body)' "$TAG_NAME") + + # 调试输出 + echo "Tag message length: ${#TAG_MESSAGE}" + echo "Tag message preview:" + echo "$TAG_MESSAGE" | head -10 + + # 使用 EOF 分隔符处理多行内容 + echo "message<> $GITHUB_OUTPUT + echo "$TAG_MESSAGE" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Set lowercase owner for GHCR + id: lowercase + run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v6 + with: + version: '~> v2' + args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TAG_MESSAGE: ${{ steps.tag_message.outputs.message }} + GITHUB_REPO_OWNER: ${{ github.repository_owner }} + GITHUB_REPO_OWNER_LOWER: ${{ steps.lowercase.outputs.owner }} + GITHUB_REPO_NAME: ${{ github.event.repository.name }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME || 'skip' }} + + # Update DockerHub description + - name: Update DockerHub description + if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }} + uses: peter-evans/dockerhub-description@v4 + env: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + repository: ${{ secrets.DOCKERHUB_USERNAME }}/sub2api + short-description: "Sub2API - AI API Gateway Platform" + readme-filepath: ./deploy/DOCKER.md + + # Send Telegram notification + - name: Send Telegram Notification + if: ${{ env.SIMPLE_RELEASE != 'true' }} + env: + TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }} + TELEGRAM_CHAT_ID: ${{ secrets.TELEGRAM_CHAT_ID }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + continue-on-error: true + run: | + # 检查必要的环境变量 + if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then + echo "Telegram credentials not configured, skipping notification" + exit 0 + fi + + if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then + TAG_NAME=${{ github.event.inputs.tag }} + else + TAG_NAME=${GITHUB_REF#refs/tags/} + fi + VERSION=${TAG_NAME#v} + REPO="${{ github.repository }}" + GHCR_IMAGE="ghcr.io/${REPO,,}" # ${,,} converts to lowercase + + # 获取 tag message 内容 + TAG_MESSAGE='${{ steps.tag_message.outputs.message }}' + + # 限制消息长度(Telegram 消息限制 4096 字符,预留空间给头尾固定内容) + if [ ${#TAG_MESSAGE} -gt 3500 ]; then + TAG_MESSAGE="${TAG_MESSAGE:0:3500}..." + fi + + # 构建消息内容 + MESSAGE="🚀 *Sub2API 新版本发布!*"$'\n'$'\n' + MESSAGE+="📦 版本号: \`${VERSION}\`"$'\n'$'\n' + + # 添加更新内容 + if [ -n "$TAG_MESSAGE" ]; then + MESSAGE+="${TAG_MESSAGE}"$'\n'$'\n' + fi + + MESSAGE+="🐳 *Docker 部署:*"$'\n' + MESSAGE+="\`\`\`bash"$'\n' + # 根据是否配置 DockerHub 动态生成 + if [ -n "$DOCKERHUB_USERNAME" ]; then + DOCKER_IMAGE="${DOCKERHUB_USERNAME}/sub2api" + MESSAGE+="# Docker Hub"$'\n' + MESSAGE+="docker pull ${DOCKER_IMAGE}:${TAG_NAME}"$'\n' + MESSAGE+="# GitHub Container Registry"$'\n' + fi + MESSAGE+="docker pull ${GHCR_IMAGE}:${TAG_NAME}"$'\n' + MESSAGE+="\`\`\`"$'\n'$'\n' + MESSAGE+="🔗 *相关链接:*"$'\n' + MESSAGE+="• [GitHub Release](https://github.com/${REPO}/releases/tag/${TAG_NAME})"$'\n' + if [ -n "$DOCKERHUB_USERNAME" ]; then + MESSAGE+="• [Docker Hub](https://hub.docker.com/r/${DOCKER_IMAGE})"$'\n' + fi + MESSAGE+="• [GitHub Packages](https://github.com/${REPO}/pkgs/container/sub2api)"$'\n'$'\n' + MESSAGE+="#Sub2API #Release #${TAG_NAME//./_}" + + # 发送消息 + curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \ + -H "Content-Type: application/json" \ + -d "$(jq -n \ + --arg chat_id "${TELEGRAM_CHAT_ID}" \ + --arg text "${MESSAGE}" \ + '{ + chat_id: $chat_id, + text: $text, + parse_mode: "Markdown", + disable_web_page_preview: true + }')" diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml new file mode 100644 index 00000000..160a0df9 --- /dev/null +++ b/.github/workflows/security-scan.yml @@ -0,0 +1,62 @@ +name: Security Scan + +on: + push: + pull_request: + schedule: + - cron: '0 3 * * 1' + +permissions: + contents: read + +jobs: + backend-security: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: backend/go.mod + check-latest: false + cache-dependency-path: backend/go.sum + - name: Verify Go version + run: | + go version | grep -q 'go1.25.5' + - name: Run govulncheck + working-directory: backend + run: | + go install golang.org/x/vuln/cmd/govulncheck@latest + govulncheck ./... + - name: Run gosec + working-directory: backend + run: | + go install github.com/securego/gosec/v2/cmd/gosec@latest + gosec -severity high -confidence high ./... + + frontend-security: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Set up pnpm + uses: pnpm/action-setup@v4 + with: + version: 9 + - name: Set up Node.js + uses: actions/setup-node@v4 + with: + node-version: '20' + cache: 'pnpm' + cache-dependency-path: frontend/pnpm-lock.yaml + - name: Install dependencies + working-directory: frontend + run: pnpm install --frozen-lockfile + - name: Run pnpm audit + working-directory: frontend + run: | + pnpm audit --prod --audit-level=high --json > audit.json || true + - name: Check audit exceptions + run: | + python tools/check_pnpm_audit_exceptions.py \ + --audit frontend/audit.json \ + --exceptions .github/audit-exceptions.yml diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..f317ed1a --- /dev/null +++ b/.gitignore @@ -0,0 +1,131 @@ +docs/claude-relay-service/ + +# =================== +# Go 后端 +# =================== +# 二进制文件 +*.exe +*.exe~ +*.dll +*.so +*.dylib +backend/bin/ +backend/server +backend/sub2api +backend/main + +# Go 测试二进制 +*.test + +# 测试覆盖率 +*.out +coverage.html + +# 依赖(使用 go mod) +vendor/ + +# Go 编译缓存 +backend/.gocache/ + +# =================== +# Node.js / Vue 前端 +# =================== +node_modules/ +frontend/node_modules/ +frontend/dist/ +*.local +*.tsbuildinfo +vite.config.d.ts +vite.config.js.timestamp-* + +# 日志 +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* + +# =================== +# 环境配置 +# =================== +.env +.env.local +.env.*.local +*.env +!.env.example +docker-compose.override.yml + +# =================== +# IDE / 编辑器 +# =================== +.idea/ +.vscode/ +*.swp +*.swo +*~ +.project +.settings/ +.classpath + +# =================== +# 操作系统 +# =================== +.DS_Store +Thumbs.db +Desktop.ini + +# =================== +# 临时文件 +# =================== +tmp/ +temp/ +*.tmp +*.temp +*.log +*.bak +.cache/ +.dev/ +.serena/ + +# =================== +# 构建产物 +# =================== +dist/ +build/ +release/ + +# 后端嵌入的前端构建产物 +# Keep a placeholder file so `//go:embed all:dist` always has a match in CI/lint, +# while still ignoring generated frontend build outputs. +backend/internal/web/dist/ +!backend/internal/web/dist/ +backend/internal/web/dist/* +!backend/internal/web/dist/.keep + +# 后端运行时缓存数据 +backend/data/ + +# =================== +# 本地配置文件(包含敏感信息) +# =================== +backend/config.yaml +deploy/config.yaml +backend/.installed + +# =================== +# 其他 +# =================== +tests +CLAUDE.md +AGENTS.md +.claude +scripts +.code-review-state +openspec/ +docs/ +code-reviews/ +AGENTS.md +backend/cmd/server/server +deploy/docker-compose.override.yml +.gocache/ +vite.config.js +docs/* diff --git a/.goreleaser.simple.yaml b/.goreleaser.simple.yaml new file mode 100644 index 00000000..2155ed9d --- /dev/null +++ b/.goreleaser.simple.yaml @@ -0,0 +1,86 @@ +# 简化版 GoReleaser 配置 - 仅发布 x86_64 GHCR 镜像 +version: 2 + +project_name: sub2api + +before: + hooks: + - go mod tidy -C backend + +builds: + - id: sub2api + dir: backend + main: ./cmd/server + binary: sub2api + flags: + - -tags=embed + env: + - CGO_ENABLED=0 + goos: + - linux + goarch: + - amd64 + ldflags: + - -s -w + - -X main.Commit={{.Commit}} + - -X main.Date={{.Date}} + - -X main.BuildType=release + +# 跳过 archives +archives: [] + +# 跳过 checksum +checksum: + disable: true + +changelog: + disable: true + +# 仅 GHCR x86_64 镜像 +dockers: + - id: ghcr-amd64 + goos: linux + goarch: amd64 + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest" + dockerfile: Dockerfile.goreleaser + use: buildx + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .Commit }}" + - "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}" + +# 跳过 manifests(单架构不需要) +docker_manifests: [] + +release: + github: + owner: "{{ .Env.GITHUB_REPO_OWNER }}" + name: "{{ .Env.GITHUB_REPO_NAME }}" + draft: false + prerelease: auto + name_template: "Sub2API {{.Version}} (Simple)" + # 跳过上传二进制包 + skip_upload: true + header: | + > AI API Gateway Platform - 将 AI 订阅配额分发和管理 + > ⚡ Simple Release: 仅包含 x86_64 GHCR 镜像 + + {{ .Env.TAG_MESSAGE }} + + footer: | + --- + + ## 📥 Installation + + **Docker (x86_64 only):** + ```bash + docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }} + ``` + + ## 📚 Documentation + + - [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}) diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 00000000..da2f9aa5 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,200 @@ +version: 2 + +project_name: sub2api + +before: + hooks: + - go mod tidy -C backend + +builds: + - id: sub2api + dir: backend + main: ./cmd/server + binary: sub2api + flags: + - -tags=embed + env: + - CGO_ENABLED=0 + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + ignore: + - goos: windows + goarch: arm64 + ldflags: + - -s -w + - -X main.Commit={{.Commit}} + - -X main.Date={{.Date}} + - -X main.BuildType=release + +archives: + - id: default + format: tar.gz + name_template: >- + {{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }} + format_overrides: + - goos: windows + format: zip + files: + - LICENSE* + - README* + - deploy/* + +checksum: + name_template: 'checksums.txt' + algorithm: sha256 + +changelog: + # 禁用自动 changelog,完全使用 tag 消息 + disable: true + +# Docker images +dockers: + # DockerHub images (skipped if DOCKERHUB_USERNAME is 'skip') + - id: amd64 + goos: linux + goarch: amd64 + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64" + dockerfile: Dockerfile.goreleaser + use: buildx + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .Commit }}" + + - id: arm64 + goos: linux + goarch: arm64 + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64" + dockerfile: Dockerfile.goreleaser + use: buildx + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .Commit }}" + + # GHCR images (owner must be lowercase) + - id: ghcr-amd64 + goos: linux + goarch: amd64 + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + dockerfile: Dockerfile.goreleaser + use: buildx + build_flag_templates: + - "--platform=linux/amd64" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .Commit }}" + - "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}" + + - id: ghcr-arm64 + goos: linux + goarch: arm64 + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64" + dockerfile: Dockerfile.goreleaser + use: buildx + build_flag_templates: + - "--platform=linux/arm64" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.revision={{ .Commit }}" + - "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}" + +# Docker manifests for multi-arch support +docker_manifests: + # DockerHub manifests (skipped if DOCKERHUB_USERNAME is 'skip') + - name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}" + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64" + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64" + + - name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:latest" + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64" + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64" + + - name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}.{{ .Minor }}" + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64" + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64" + + - name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}" + skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}' + image_templates: + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64" + - "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64" + + # GHCR manifests (owner must be lowercase) + - name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}" + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64" + + - name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest" + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64" + + - name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}.{{ .Minor }}" + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64" + + - name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}" + image_templates: + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64" + - "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64" + +release: + github: + owner: "{{ .Env.GITHUB_REPO_OWNER }}" + name: "{{ .Env.GITHUB_REPO_NAME }}" + draft: false + prerelease: auto + name_template: "Sub2API {{.Version}}" + # 完全使用 tag 消息作为 release 内容(通过环境变量传入) + header: | + > AI API Gateway Platform - 将 AI 订阅配额分发和管理 + + {{ .Env.TAG_MESSAGE }} + + footer: | + + --- + + ## 📥 Installation + + **Docker:** + ```bash + {{ if ne .Env.DOCKERHUB_USERNAME "skip" -}} + # Docker Hub + docker pull {{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }} + + {{ end -}} + # GitHub Container Registry + docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }} + ``` + + **One-line install (Linux):** + ```bash + curl -sSL https://raw.githubusercontent.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/main/deploy/install.sh | sudo bash + ``` + + **Manual download:** + Download the appropriate archive for your platform from the assets below. + + ## 📚 Documentation + + - [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}) + - [Installation Guide](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/blob/main/deploy/README.md) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..b3320300 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,111 @@ +# ============================================================================= +# Sub2API Multi-Stage Dockerfile +# ============================================================================= +# Stage 1: Build frontend +# Stage 2: Build Go backend with embedded frontend +# Stage 3: Final minimal image +# ============================================================================= + +ARG NODE_IMAGE=node:24-alpine +ARG GOLANG_IMAGE=golang:1.25.5-alpine +ARG ALPINE_IMAGE=alpine:3.20 +ARG GOPROXY=https://goproxy.cn,direct +ARG GOSUMDB=sum.golang.google.cn + +# ----------------------------------------------------------------------------- +# Stage 1: Frontend Builder +# ----------------------------------------------------------------------------- +FROM ${NODE_IMAGE} AS frontend-builder + +WORKDIR /app/frontend + +# Install pnpm +RUN corepack enable && corepack prepare pnpm@latest --activate + +# Install dependencies first (better caching) +COPY frontend/package.json frontend/pnpm-lock.yaml ./ +RUN pnpm install --frozen-lockfile + +# Copy frontend source and build +COPY frontend/ ./ +RUN pnpm run build + +# ----------------------------------------------------------------------------- +# Stage 2: Backend Builder +# ----------------------------------------------------------------------------- +FROM ${GOLANG_IMAGE} AS backend-builder + +# Build arguments for version info (set by CI) +ARG VERSION=docker +ARG COMMIT=docker +ARG DATE +ARG GOPROXY +ARG GOSUMDB + +ENV GOPROXY=${GOPROXY} +ENV GOSUMDB=${GOSUMDB} + +# Install build dependencies +RUN apk add --no-cache git ca-certificates tzdata + +WORKDIR /app/backend + +# Copy go mod files first (better caching) +COPY backend/go.mod backend/go.sum ./ +RUN go mod download + +# Copy backend source first +COPY backend/ ./ + +# Copy frontend dist from previous stage (must be after backend copy to avoid being overwritten) +COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist + +# Build the binary (BuildType=release for CI builds, embed frontend) +RUN CGO_ENABLED=0 GOOS=linux go build \ + -tags embed \ + -ldflags="-s -w -X main.Commit=${COMMIT} -X main.Date=${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)} -X main.BuildType=release" \ + -o /app/sub2api \ + ./cmd/server + +# ----------------------------------------------------------------------------- +# Stage 3: Final Runtime Image +# ----------------------------------------------------------------------------- +FROM ${ALPINE_IMAGE} + +# Labels +LABEL maintainer="Wei-Shaw " +LABEL description="Sub2API - AI API Gateway Platform" +LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api" + +# Install runtime dependencies +RUN apk add --no-cache \ + ca-certificates \ + tzdata \ + curl \ + && rm -rf /var/cache/apk/* + +# Create non-root user +RUN addgroup -g 1000 sub2api && \ + adduser -u 1000 -G sub2api -s /bin/sh -D sub2api + +# Set working directory +WORKDIR /app + +# Copy binary from builder +COPY --from=backend-builder /app/sub2api /app/sub2api + +# Create data directory +RUN mkdir -p /app/data && chown -R sub2api:sub2api /app + +# Switch to non-root user +USER sub2api + +# Expose port (can be overridden by SERVER_PORT env var) +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \ + CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1 + +# Run the application +ENTRYPOINT ["/app/sub2api"] diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser new file mode 100644 index 00000000..2242c162 --- /dev/null +++ b/Dockerfile.goreleaser @@ -0,0 +1,40 @@ +# ============================================================================= +# Sub2API Dockerfile for GoReleaser +# ============================================================================= +# This Dockerfile is used by GoReleaser to build Docker images. +# It only packages the pre-built binary, no compilation needed. +# ============================================================================= + +FROM alpine:3.19 + +LABEL maintainer="Wei-Shaw " +LABEL description="Sub2API - AI API Gateway Platform" +LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api" + +# Install runtime dependencies +RUN apk add --no-cache \ + ca-certificates \ + tzdata \ + curl \ + && rm -rf /var/cache/apk/* + +# Create non-root user +RUN addgroup -g 1000 sub2api && \ + adduser -u 1000 -G sub2api -s /bin/sh -D sub2api + +WORKDIR /app + +# Copy pre-built binary from GoReleaser +COPY sub2api /app/sub2api + +# Create data directory +RUN mkdir -p /app/data && chown -R sub2api:sub2api /app + +USER sub2api + +EXPOSE 8080 + +HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \ + CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1 + +ENTRYPOINT ["/app/sub2api"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..7a94ca9d --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Wesley Liddick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux DO Connect.md b/Linux DO Connect.md new file mode 100644 index 00000000..7ca1260f --- /dev/null +++ b/Linux DO Connect.md @@ -0,0 +1,368 @@ +# Linux DO Connect + +OAuth(Open Authorization)是一个开放的网络授权标准,目前最新版本为 OAuth 2.0。我们日常使用的第三方登录(如 Google 账号登录)就采用了该标准。OAuth 允许用户授权第三方应用访问存储在其他服务提供商(如 Google)上的信息,无需在不同平台上重复填写注册信息。用户授权后,平台可以直接访问用户的账户信息进行身份验证,而用户无需向第三方应用提供密码。 + +目前系统已实现完整的 OAuth2 授权码(code)方式鉴权,但界面等配套功能还在持续完善中。让我们一起打造一个更完善的共享方案。 + +## 基本介绍 + +这是一套标准的 OAuth2 鉴权系统,可以让开发者共享论坛的用户基本信息。 + +- 可获取字段: + +| 参数 | 说明 | +| ----------------- | ------------------------------- | +| `id` | 用户唯一标识(不可变) | +| `username` | 论坛用户名 | +| `name` | 论坛用户昵称(可变) | +| `avatar_template` | 用户头像模板URL(支持多种尺寸) | +| `active` | 账号活跃状态 | +| `trust_level` | 信任等级(0-4) | +| `silenced` | 禁言状态 | +| `external_ids` | 外部ID关联信息 | +| `api_key` | API访问密钥 | + +通过这些信息,公益网站/接口可以实现: + +1. 基于 `id` 的服务频率限制 +2. 基于 `trust_level` 的服务额度分配 +3. 基于用户信息的滥用举报机制 + +## 相关端点 + +- Authorize 端点: `https://connect.linux.do/oauth2/authorize` +- Token 端点:`https://connect.linux.do/oauth2/token` +- 用户信息 端点:`https://connect.linux.do/api/user` + +## 申请使用 + +- 访问 [Connect.Linux.Do](https://connect.linux.do/) 申请接入你的应用。 + +![linuxdoconnect_1](https://wiki.linux.do/_next/image?url=%2Flinuxdoconnect_1.png&w=1080&q=75) + +- 点击 **`我的应用接入`** - **`申请新接入`**,填写相关信息。其中 **`回调地址`** 是你的应用接收用户信息的地址。 + +![linuxdoconnect_2](https://wiki.linux.do/_next/image?url=%2Flinuxdoconnect_2.png&w=1080&q=75) + +- 申请成功后,你将获得 **`Client Id`** 和 **`Client Secret`**,这是你应用的唯一身份凭证。 + +![linuxdoconnect_3](https://wiki.linux.do/_next/image?url=%2Flinuxdoconnect_3.png&w=1080&q=75) + +## 接入 Linux Do + +JavaScript +```JavaScript +// 安装第三方请求库(或使用原生的 Fetch API),本例中使用 axios +// npm install axios + +// 通过 OAuth2 获取 Linux Do 用户信息的参考流程 +const axios = require('axios'); +const readline = require('readline'); + +// 配置信息(建议通过环境变量配置,避免使用硬编码) +const CLIENT_ID = '你的 Client ID'; +const CLIENT_SECRET = '你的 Client Secret'; +const REDIRECT_URI = '你的回调地址'; +const AUTH_URL = 'https://connect.linux.do/oauth2/authorize'; +const TOKEN_URL = 'https://connect.linux.do/oauth2/token'; +const USER_INFO_URL = 'https://connect.linux.do/api/user'; + +// 第一步:生成授权 URL +function getAuthUrl() { + const params = new URLSearchParams({ + client_id: CLIENT_ID, + redirect_uri: REDIRECT_URI, + response_type: 'code', + scope: 'user' + }); + + return `${AUTH_URL}?${params.toString()}`; +} + +// 第二步:获取 code 参数 +function getCode() { + return new Promise((resolve) => { + // 本例中使用终端输入来模拟流程,仅供本地测试 + // 请在实际应用中替换为真实的处理逻辑 + const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); + rl.question('从回调 URL 中提取出 code,粘贴到此处并按回车:', (answer) => { + rl.close(); + resolve(answer.trim()); + }); + }); +} + +// 第三步:使用 code 参数获取访问令牌 +async function getAccessToken(code) { + try { + const form = new URLSearchParams({ + client_id: CLIENT_ID, + client_secret: CLIENT_SECRET, + code: code, + redirect_uri: REDIRECT_URI, + grant_type: 'authorization_code' + }).toString(); + + const response = await axios.post(TOKEN_URL, form, { + // 提醒:需正确配置请求头,否则无法正常获取访问令牌 + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json' + } + }); + + return response.data; + } catch (error) { + console.error(`获取访问令牌失败:${error.response ? JSON.stringify(error.response.data) : error.message}`); + throw error; + } +} + +// 第四步:使用访问令牌获取用户信息 +async function getUserInfo(accessToken) { + try { + const response = await axios.get(USER_INFO_URL, { + headers: { + Authorization: `Bearer ${accessToken}` + } + }); + + return response.data; + } catch (error) { + console.error(`获取用户信息失败:${error.response ? JSON.stringify(error.response.data) : error.message}`); + throw error; + } +} + +// 主流程 +async function main() { + // 1. 生成授权 URL,前端引导用户访问授权页 + const authUrl = getAuthUrl(); + console.log(`请访问此 URL 授权:${authUrl} +`); + + // 2. 用户授权后,从回调 URL 获取 code 参数 + const code = await getCode(); + + try { + // 3. 使用 code 参数获取访问令牌 + const tokenData = await getAccessToken(code); + const accessToken = tokenData.access_token; + + // 4. 使用访问令牌获取用户信息 + if (accessToken) { + const userInfo = await getUserInfo(accessToken); + console.log(` +获取用户信息成功:${JSON.stringify(userInfo, null, 2)}`); + } else { + console.log(` +获取访问令牌失败:${JSON.stringify(tokenData)}`); + } + } catch (error) { + console.error('发生错误:', error); + } +} +``` +Python +```python +# 安装第三方请求库,本例中使用 requests +# pip install requests + +# 通过 OAuth2 获取 Linux Do 用户信息的参考流程 +import requests +import json + +# 配置信息(建议通过环境变量配置,避免使用硬编码) +CLIENT_ID = '你的 Client ID' +CLIENT_SECRET = '你的 Client Secret' +REDIRECT_URI = '你的回调地址' +AUTH_URL = 'https://connect.linux.do/oauth2/authorize' +TOKEN_URL = 'https://connect.linux.do/oauth2/token' +USER_INFO_URL = 'https://connect.linux.do/api/user' + +# 第一步:生成授权 URL +def get_auth_url(): + params = { + 'client_id': CLIENT_ID, + 'redirect_uri': REDIRECT_URI, + 'response_type': 'code', + 'scope': 'user' + } + auth_url = f"{AUTH_URL}?{'&'.join(f'{k}={v}' for k, v in params.items())}" + return auth_url + +# 第二步:获取 code 参数 +def get_code(): + # 本例中使用终端输入来模拟流程,仅供本地测试 + # 请在实际应用中替换为真实的处理逻辑 + return input('从回调 URL 中提取出 code,粘贴到此处并按回车:').strip() + +# 第三步:使用 code 参数获取访问令牌 +def get_access_token(code): + try: + data = { + 'client_id': CLIENT_ID, + 'client_secret': CLIENT_SECRET, + 'code': code, + 'redirect_uri': REDIRECT_URI, + 'grant_type': 'authorization_code' + } + # 提醒:需正确配置请求头,否则无法正常获取访问令牌 + headers = { + 'Content-Type': 'application/x-www-form-urlencoded', + 'Accept': 'application/json' + } + response = requests.post(TOKEN_URL, data=data, headers=headers) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"获取访问令牌失败:{e}") + return None + +# 第四步:使用访问令牌获取用户信息 +def get_user_info(access_token): + try: + headers = { + 'Authorization': f'Bearer {access_token}' + } + response = requests.get(USER_INFO_URL, headers=headers) + response.raise_for_status() + return response.json() + except requests.exceptions.RequestException as e: + print(f"获取用户信息失败:{e}") + return None + +# 主流程 +if __name__ == '__main__': + # 1. 生成授权 URL,前端引导用户访问授权页 + auth_url = get_auth_url() + print(f'请访问此 URL 授权:{auth_url} +') + + # 2. 用户授权后,从回调 URL 获取 code 参数 + code = get_code() + + # 3. 使用 code 参数获取访问令牌 + token_data = get_access_token(code) + if token_data: + access_token = token_data.get('access_token') + + # 4. 使用访问令牌获取用户信息 + if access_token: + user_info = get_user_info(access_token) + if user_info: + print(f" +获取用户信息成功:{json.dumps(user_info, indent=2)}") + else: + print(" +获取用户信息失败") + else: + print(f" +获取访问令牌失败:{json.dumps(token_data, indent=2)}") + else: + print(" +获取访问令牌失败") +``` +PHP +```php +// 通过 OAuth2 获取 Linux Do 用户信息的参考流程 + +// 配置信息 +$CLIENT_ID = '你的 Client ID'; +$CLIENT_SECRET = '你的 Client Secret'; +$REDIRECT_URI = '你的回调地址'; +$AUTH_URL = 'https://connect.linux.do/oauth2/authorize'; +$TOKEN_URL = 'https://connect.linux.do/oauth2/token'; +$USER_INFO_URL = 'https://connect.linux.do/api/user'; + +// 生成授权 URL +function getAuthUrl($clientId, $redirectUri) { + global $AUTH_URL; + return $AUTH_URL . '?' . http_build_query([ + 'client_id' => $clientId, + 'redirect_uri' => $redirectUri, + 'response_type' => 'code', + 'scope' => 'user' + ]); +} + +// 使用 code 参数获取用户信息(合并获取令牌和获取用户信息的步骤) +function getUserInfoWithCode($code, $clientId, $clientSecret, $redirectUri) { + global $TOKEN_URL, $USER_INFO_URL; + + // 1. 获取访问令牌 + $ch = curl_init($TOKEN_URL); + curl_setopt($ch, CURLOPT_RETURNTRANSFER, true); + curl_setopt($ch, CURLOPT_POST, true); + curl_setopt($ch, CURLOPT_POSTFIELDS, http_build_query([ + 'client_id' => $clientId, + 'client_secret' => $clientSecret, + 'code' => $code, + 'redirect_uri' => $redirectUri, + 'grant_type' => 'authorization_code' + ])); + curl_setopt($ch, CURLOPT_HTTPHEADER, [ + 'Content-Type: application/x-www-form-urlencoded', + 'Accept: application/json' + ]); + + $tokenResponse = curl_exec($ch); + curl_close($ch); + + $tokenData = json_decode($tokenResponse, true); + if (!isset($tokenData['access_token'])) { + return ['error' => '获取访问令牌失败', 'details' => $tokenData]; + } + + // 2. 获取用户信息 + $ch = curl_init($USER_INFO_URL); + curl_setopt($ch, CURLOPT_RETURNTRANSFER, true); + curl_setopt($ch, CURLOPT_HTTPHEADER, [ + 'Authorization: Bearer ' . $tokenData['access_token'] + ]); + + $userResponse = curl_exec($ch); + curl_close($ch); + + return json_decode($userResponse, true); +} + +// 主流程 +// 1. 生成授权 URL +$authUrl = getAuthUrl($CLIENT_ID, $REDIRECT_URI); +echo "使用 Linux Do 登录"; + +// 2. 处理回调并获取用户信息 +if (isset($_GET['code'])) { + $userInfo = getUserInfoWithCode( + $_GET['code'], + $CLIENT_ID, + $CLIENT_SECRET, + $REDIRECT_URI + ); + + if (isset($userInfo['error'])) { + echo '错误: ' . $userInfo['error']; + } else { + echo '欢迎, ' . $userInfo['name'] . '!'; + // 处理用户登录逻辑... + } +} +``` + +## 使用说明 + +### 授权流程 + +1. 用户点击应用中的’使用 Linux Do 登录’按钮 +2. 系统将用户重定向至 Linux Do 的授权页面 +3. 用户完成授权后,系统自动重定向回应用并携带授权码 +4. 应用使用授权码获取访问令牌 +5. 使用访问令牌获取用户信息 + +### 安全建议 + +- 切勿在前端代码中暴露 Client Secret +- 对所有用户输入数据进行严格验证 +- 确保使用 HTTPS 协议传输数据 +- 定期更新并妥善保管 Client Secret \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..a5e18a37 --- /dev/null +++ b/Makefile @@ -0,0 +1,22 @@ +.PHONY: build build-backend build-frontend test test-backend test-frontend + +# 一键编译前后端 +build: build-backend build-frontend + +# 编译后端(复用 backend/Makefile) +build-backend: + @$(MAKE) -C backend build + +# 编译前端(需要已安装依赖) +build-frontend: + @pnpm --dir frontend run build + +# 运行测试(后端 + 前端) +test: test-backend test-frontend + +test-backend: + @$(MAKE) -C backend test + +test-frontend: + @pnpm --dir frontend run lint:check + @pnpm --dir frontend run typecheck diff --git a/PR_DESCRIPTION.md b/PR_DESCRIPTION.md new file mode 100644 index 00000000..b240f45c --- /dev/null +++ b/PR_DESCRIPTION.md @@ -0,0 +1,164 @@ +## 概述 + +全面增强运维监控系统(Ops)的错误日志管理和告警静默功能,优化前端 UI 组件代码质量和用户体验。本次更新重构了核心服务层和数据访问层,提升系统可维护性和运维效率。 + +## 主要改动 + +### 1. 错误日志查询优化 + +**功能特性:** +- 新增 GetErrorLogByID 接口,支持按 ID 精确查询错误详情 +- 优化错误日志过滤逻辑,支持多维度筛选(平台、阶段、来源、所有者等) +- 改进查询参数处理,简化代码结构 +- 增强错误分类和标准化处理 +- 支持错误解决状态追踪(resolved 字段) + +**技术实现:** +- `ops_handler.go` - 新增单条错误日志查询接口 +- `ops_repo.go` - 优化数据查询和过滤条件构建 +- `ops_models.go` - 扩展错误日志数据模型 +- 前端 API 接口同步更新 + +### 2. 告警静默功能 + +**功能特性:** +- 支持按规则、平台、分组、区域等维度静默告警 +- 可设置静默时长和原因说明 +- 静默记录可追溯,记录创建人和创建时间 +- 自动过期机制,避免永久静默 + +**技术实现:** +- `037_ops_alert_silences.sql` - 新增告警静默表 +- `ops_alerts.go` - 告警静默逻辑实现 +- `ops_alerts_handler.go` - 告警静默 API 接口 +- `OpsAlertEventsCard.vue` - 前端告警静默操作界面 + +**数据库结构:** + +| 字段 | 类型 | 说明 | +|------|------|------| +| rule_id | BIGINT | 告警规则 ID | +| platform | VARCHAR(64) | 平台标识 | +| group_id | BIGINT | 分组 ID(可选) | +| region | VARCHAR(64) | 区域(可选) | +| until | TIMESTAMPTZ | 静默截止时间 | +| reason | TEXT | 静默原因 | +| created_by | BIGINT | 创建人 ID | + +### 3. 错误分类标准化 + +**功能特性:** +- 统一错误阶段分类(request|auth|routing|upstream|network|internal) +- 规范错误归属分类(client|provider|platform) +- 标准化错误来源分类(client_request|upstream_http|gateway) +- 自动迁移历史数据到新分类体系 + +**技术实现:** +- `038_ops_errors_resolution_retry_results_and_standardize_classification.sql` - 分类标准化迁移 +- 自动映射历史遗留分类到新标准 +- 自动解决已恢复的上游错误(客户端状态码 < 400) + +### 4. Gateway 服务集成 + +**功能特性:** +- 完善各 Gateway 服务的 Ops 集成 +- 统一错误日志记录接口 +- 增强上游错误追踪能力 + +**涉及服务:** +- `antigravity_gateway_service.go` - Antigravity 网关集成 +- `gateway_service.go` - 通用网关集成 +- `gemini_messages_compat_service.go` - Gemini 兼容层集成 +- `openai_gateway_service.go` - OpenAI 网关集成 + +### 5. 前端 UI 优化 + +**代码重构:** +- 大幅简化错误详情模态框代码(从 828 行优化到 450 行) +- 优化错误日志表格组件,提升可读性 +- 清理未使用的 i18n 翻译,减少冗余 +- 统一组件代码风格和格式 +- 优化骨架屏组件,更好匹配实际看板布局 + +**布局改进:** +- 修复模态框内容溢出和滚动问题 +- 优化表格布局,使用 flex 布局确保正确显示 +- 改进看板头部布局和交互 +- 提升响应式体验 +- 骨架屏支持全屏模式适配 + +**交互优化:** +- 优化告警事件卡片功能和展示 +- 改进错误详情展示逻辑 +- 增强请求详情模态框 +- 完善运行时设置卡片 +- 改进加载动画效果 + +### 6. 国际化完善 + +**文案补充:** +- 补充错误日志相关的英文翻译 +- 添加告警静默功能的中英文文案 +- 完善提示文本和错误信息 +- 统一术语翻译标准 + +## 文件变更 + +**后端(26 个文件):** +- `backend/internal/handler/admin/ops_alerts_handler.go` - 告警接口增强 +- `backend/internal/handler/admin/ops_handler.go` - 错误日志接口优化 +- `backend/internal/handler/ops_error_logger.go` - 错误记录器增强 +- `backend/internal/repository/ops_repo.go` - 数据访问层重构 +- `backend/internal/repository/ops_repo_alerts.go` - 告警数据访问增强 +- `backend/internal/service/ops_*.go` - 核心服务层重构(10 个文件) +- `backend/internal/service/*_gateway_service.go` - Gateway 集成(4 个文件) +- `backend/internal/server/routes/admin.go` - 路由配置更新 +- `backend/migrations/*.sql` - 数据库迁移(2 个文件) +- 测试文件更新(5 个文件) + +**前端(13 个文件):** +- `frontend/src/views/admin/ops/OpsDashboard.vue` - 看板主页优化 +- `frontend/src/views/admin/ops/components/*.vue` - 组件重构(10 个文件) +- `frontend/src/api/admin/ops.ts` - API 接口扩展 +- `frontend/src/i18n/locales/*.ts` - 国际化文本(2 个文件) + +## 代码统计 + +- 44 个文件修改 +- 3733 行新增 +- 995 行删除 +- 净增加 2738 行 + +## 核心改进 + +**可维护性提升:** +- 重构核心服务层,职责更清晰 +- 简化前端组件代码,降低复杂度 +- 统一代码风格和命名规范 +- 清理冗余代码和未使用的翻译 +- 标准化错误分类体系 + +**功能完善:** +- 告警静默功能,减少告警噪音 +- 错误日志查询优化,提升运维效率 +- Gateway 服务集成完善,统一监控能力 +- 错误解决状态追踪,便于问题管理 + +**用户体验优化:** +- 修复多个 UI 布局问题 +- 优化交互流程 +- 完善国际化支持 +- 提升响应式体验 +- 改进加载状态展示 + +## 测试验证 + +- ✅ 错误日志查询和过滤功能 +- ✅ 告警静默创建和自动过期 +- ✅ 错误分类标准化迁移 +- ✅ Gateway 服务错误日志记录 +- ✅ 前端组件布局和交互 +- ✅ 骨架屏全屏模式适配 +- ✅ 国际化文本完整性 +- ✅ API 接口功能正确性 +- ✅ 数据库迁移执行成功 diff --git a/README.md b/README.md new file mode 100644 index 00000000..fa965e6f --- /dev/null +++ b/README.md @@ -0,0 +1,458 @@ +# Sub2API + +
+ +[![Go](https://img.shields.io/badge/Go-1.25.5-00ADD8.svg)](https://golang.org/) +[![Vue](https://img.shields.io/badge/Vue-3.4+-4FC08D.svg)](https://vuejs.org/) +[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-15+-336791.svg)](https://www.postgresql.org/) +[![Redis](https://img.shields.io/badge/Redis-7+-DC382D.svg)](https://redis.io/) +[![Docker](https://img.shields.io/badge/Docker-Ready-2496ED.svg)](https://www.docker.com/) + +**AI API Gateway Platform for Subscription Quota Distribution** + +English | [中文](README_CN.md) + +
+ +--- + +## Demo + +Try Sub2API online: **https://v2.pincc.ai/** + +Demo credentials (shared demo environment; **not** created automatically for self-hosted installs): + +| Email | Password | +|-------|----------| +| admin@sub2api.com | admin123 | + +## Overview + +Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions (like Claude Code $200/month). Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding. + +## Features + +- **Multi-Account Management** - Support multiple upstream account types (OAuth, API Key) +- **API Key Distribution** - Generate and manage API Keys for users +- **Precise Billing** - Token-level usage tracking and cost calculation +- **Smart Scheduling** - Intelligent account selection with sticky sessions +- **Concurrency Control** - Per-user and per-account concurrency limits +- **Rate Limiting** - Configurable request and token rate limits +- **Admin Dashboard** - Web interface for monitoring and management + +## Tech Stack + +| Component | Technology | +|-----------|------------| +| Backend | Go 1.25.5, Gin, Ent | +| Frontend | Vue 3.4+, Vite 5+, TailwindCSS | +| Database | PostgreSQL 15+ | +| Cache/Queue | Redis 7+ | + +--- + +## Documentation + +- Dependency Security: `docs/dependency-security.md` + +--- + +## Deployment + +### Method 1: Script Installation (Recommended) + +One-click installation script that downloads pre-built binaries from GitHub Releases. + +#### Prerequisites + +- Linux server (amd64 or arm64) +- PostgreSQL 15+ (installed and running) +- Redis 7+ (installed and running) +- Root privileges + +#### Installation Steps + +```bash +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash +``` + +The script will: +1. Detect your system architecture +2. Download the latest release +3. Install binary to `/opt/sub2api` +4. Create systemd service +5. Configure system user and permissions + +#### Post-Installation + +```bash +# 1. Start the service +sudo systemctl start sub2api + +# 2. Enable auto-start on boot +sudo systemctl enable sub2api + +# 3. Open Setup Wizard in browser +# http://YOUR_SERVER_IP:8080 +``` + +The Setup Wizard will guide you through: +- Database configuration +- Redis configuration +- Admin account creation + +#### Upgrade + +You can upgrade directly from the **Admin Dashboard** by clicking the **Check for Updates** button in the top-left corner. + +The web interface will: +- Check for new versions automatically +- Download and apply updates with one click +- Support rollback if needed + +#### Useful Commands + +```bash +# Check status +sudo systemctl status sub2api + +# View logs +sudo journalctl -u sub2api -f + +# Restart service +sudo systemctl restart sub2api + +# Uninstall +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y +``` + +--- + +### Method 2: Docker Compose + +Deploy with Docker Compose, including PostgreSQL and Redis containers. + +#### Prerequisites + +- Docker 20.10+ +- Docker Compose v2+ + +#### Installation Steps + +```bash +# 1. Clone the repository +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 2. Enter the deploy directory +cd deploy + +# 3. Copy environment configuration +cp .env.example .env + +# 4. Edit configuration (set your passwords) +nano .env +``` + +**Required configuration in `.env`:** + +```bash +# PostgreSQL password (REQUIRED - change this!) +POSTGRES_PASSWORD=your_secure_password_here + +# Optional: Admin account +ADMIN_EMAIL=admin@example.com +ADMIN_PASSWORD=your_admin_password + +# Optional: Custom port +SERVER_PORT=8080 + +# Optional: Security configuration +# Enable URL allowlist validation (false to skip allowlist checks, only basic format validation) +SECURITY_URL_ALLOWLIST_ENABLED=false + +# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) +# ⚠️ WARNING: Enabling this allows HTTP (plaintext) URLs which can expose API keys +# Only recommended for: +# - Development/testing environments +# - Internal networks with trusted endpoints +# - When using local test servers (http://localhost) +# PRODUCTION: Keep this false or use HTTPS URLs only +SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false + +# Allow private IP addresses for upstream/pricing/CRS (for internal deployments) +SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +``` + +```bash +# 5. Start all services +docker-compose up -d + +# 6. Check status +docker-compose ps + +# 7. View logs +docker-compose logs -f sub2api +``` + +#### Access + +Open `http://YOUR_SERVER_IP:8080` in your browser. + +#### Upgrade + +```bash +# Pull latest image and recreate container +docker-compose pull +docker-compose up -d +``` + +#### Useful Commands + +```bash +# Stop all services +docker-compose down + +# Restart +docker-compose restart + +# View all logs +docker-compose logs -f +``` + +--- + +### Method 3: Build from Source + +Build and run from source code for development or customization. + +#### Prerequisites + +- Go 1.21+ +- Node.js 18+ +- PostgreSQL 15+ +- Redis 7+ + +#### Build Steps + +```bash +# 1. Clone the repository +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 2. Install pnpm (if not already installed) +npm install -g pnpm + +# 3. Build frontend +cd frontend +pnpm install +pnpm run build +# Output will be in ../backend/internal/web/dist/ + +# 4. Build backend with embedded frontend +cd ../backend +go build -tags embed -o sub2api ./cmd/server + +# 5. Create configuration file +cp ../deploy/config.example.yaml ./config.yaml + +# 6. Edit configuration +nano config.yaml +``` + +> **Note:** The `-tags embed` flag embeds the frontend into the binary. Without this flag, the binary will not serve the frontend UI. + +**Key configuration in `config.yaml`:** + +```yaml +server: + host: "0.0.0.0" + port: 8080 + mode: "release" + +database: + host: "localhost" + port: 5432 + user: "postgres" + password: "your_password" + dbname: "sub2api" + +redis: + host: "localhost" + port: 6379 + password: "" + +jwt: + secret: "change-this-to-a-secure-random-string" + expire_hour: 24 + +default: + user_concurrency: 5 + user_balance: 0 + api_key_prefix: "sk-" + rate_multiplier: 1.0 +``` + +Additional security-related options are available in `config.yaml`: + +- `cors.allowed_origins` for CORS allowlist +- `security.url_allowlist` for upstream/pricing/CRS host allowlists +- `security.url_allowlist.enabled` to disable URL validation (use with caution) +- `security.url_allowlist.allow_insecure_http` to allow HTTP URLs when validation is disabled +- `security.url_allowlist.allow_private_hosts` to allow private/local IP addresses +- `security.response_headers.enabled` to enable configurable response header filtering (disabled uses default allowlist) +- `security.csp` to control Content-Security-Policy headers +- `billing.circuit_breaker` to fail closed on billing errors +- `server.trusted_proxies` to enable X-Forwarded-For parsing +- `turnstile.required` to require Turnstile in release mode + +**⚠️ Security Warning: HTTP URL Configuration** + +When `security.url_allowlist.enabled=false`, the system performs minimal URL validation by default, **rejecting HTTP URLs** and only allowing HTTPS. To allow HTTP URLs (e.g., for development or internal testing), you must explicitly set: + +```yaml +security: + url_allowlist: + enabled: false # Disable allowlist checks + allow_insecure_http: true # Allow HTTP URLs (⚠️ INSECURE) +``` + +**Or via environment variable:** + +```bash +SECURITY_URL_ALLOWLIST_ENABLED=false +SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true +``` + +**Risks of allowing HTTP:** +- API keys and data transmitted in **plaintext** (vulnerable to interception) +- Susceptible to **man-in-the-middle (MITM) attacks** +- **NOT suitable for production** environments + +**When to use HTTP:** +- ✅ Development/testing with local servers (http://localhost) +- ✅ Internal networks with trusted endpoints +- ✅ Testing account connectivity before obtaining HTTPS +- ❌ Production environments (use HTTPS only) + +**Example error without this setting:** +``` +Invalid base URL: invalid url scheme: http +``` + +If you disable URL validation or response header filtering, harden your network layer: +- Enforce an egress allowlist for upstream domains/IPs +- Block private/loopback/link-local ranges +- Enforce TLS-only outbound traffic +- Strip sensitive upstream response headers at the proxy + +```bash +# 6. Run the application +./sub2api +``` + +#### Development Mode + +```bash +# Backend (with hot reload) +cd backend +go run ./cmd/server + +# Frontend (with hot reload) +cd frontend +pnpm run dev +``` + +#### Code Generation + +When editing `backend/ent/schema`, regenerate Ent + Wire: + +```bash +cd backend +go generate ./ent +go generate ./cmd/server +``` + +--- + +## Simple Mode + +Simple Mode is designed for individual developers or internal teams who want quick access without full SaaS features. + +- Enable: Set environment variable `RUN_MODE=simple` +- Difference: Hides SaaS-related features and skips billing process +- Security note: In production, you must also set `SIMPLE_MODE_CONFIRM=true` to allow startup + +--- + +## Antigravity Support + +Sub2API supports [Antigravity](https://antigravity.so/) accounts. After authorization, dedicated endpoints are available for Claude and Gemini models. + +### Dedicated Endpoints + +| Endpoint | Model | +|----------|-------| +| `/antigravity/v1/messages` | Claude models | +| `/antigravity/v1beta/` | Gemini models | + +### Claude Code Configuration + +```bash +export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity" +export ANTHROPIC_AUTH_TOKEN="sk-xxx" +``` + +### Hybrid Scheduling Mode + +Antigravity accounts support optional **hybrid scheduling**. When enabled, the general endpoints `/v1/messages` and `/v1beta/` will also route requests to Antigravity accounts. + +> **⚠️ Warning**: Anthropic Claude and Antigravity Claude **cannot be mixed within the same conversation context**. Use groups to isolate them properly. + +### Known Issues + +In Claude Code, Plan Mode cannot exit automatically. (Normally when using the native Claude API, after planning is complete, Claude Code will pop up options for users to approve or reject the plan.) + +**Workaround**: Press `Shift + Tab` to manually exit Plan Mode, then type your response to approve or reject the plan. + +--- + +## Project Structure + +``` +sub2api/ +├── backend/ # Go backend service +│ ├── cmd/server/ # Application entry +│ ├── internal/ # Internal modules +│ │ ├── config/ # Configuration +│ │ ├── model/ # Data models +│ │ ├── service/ # Business logic +│ │ ├── handler/ # HTTP handlers +│ │ └── gateway/ # API gateway core +│ └── resources/ # Static resources +│ +├── frontend/ # Vue 3 frontend +│ └── src/ +│ ├── api/ # API calls +│ ├── stores/ # State management +│ ├── views/ # Page components +│ └── components/ # Reusable components +│ +└── deploy/ # Deployment files + ├── docker-compose.yml # Docker Compose configuration + ├── .env.example # Environment variables for Docker Compose + ├── config.example.yaml # Full config file for binary deployment + └── install.sh # One-click installation script +``` + +## License + +MIT License + +--- + +
+ +**If you find this project useful, please give it a star!** + +
diff --git a/README_CN.md b/README_CN.md new file mode 100644 index 00000000..41d399d5 --- /dev/null +++ b/README_CN.md @@ -0,0 +1,463 @@ +# Sub2API + +
+ +[![Go](https://img.shields.io/badge/Go-1.25.5-00ADD8.svg)](https://golang.org/) +[![Vue](https://img.shields.io/badge/Vue-3.4+-4FC08D.svg)](https://vuejs.org/) +[![PostgreSQL](https://img.shields.io/badge/PostgreSQL-15+-336791.svg)](https://www.postgresql.org/) +[![Redis](https://img.shields.io/badge/Redis-7+-DC382D.svg)](https://redis.io/) +[![Docker](https://img.shields.io/badge/Docker-Ready-2496ED.svg)](https://www.docker.com/) + +**AI API 网关平台 - 订阅配额分发管理** + +[English](README.md) | 中文 + +
+ +--- + +## 在线体验 + +体验地址:**https://v2.pincc.ai/** + +演示账号(共享演示环境;自建部署不会自动创建该账号): + +| 邮箱 | 密码 | +|------|------| +| admin@sub2api.com | admin123 | + +## 项目概述 + +Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(如 Claude Code $200/月)的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。 + +## 核心功能 + +- **多账号管理** - 支持多种上游账号类型(OAuth、API Key) +- **API Key 分发** - 为用户生成和管理 API Key +- **精确计费** - Token 级别的用量追踪和成本计算 +- **智能调度** - 智能账号选择,支持粘性会话 +- **并发控制** - 用户级和账号级并发限制 +- **速率限制** - 可配置的请求和 Token 速率限制 +- **管理后台** - Web 界面进行监控和管理 + +## 技术栈 + +| 组件 | 技术 | +|------|------| +| 后端 | Go 1.25.5, Gin, Ent | +| 前端 | Vue 3.4+, Vite 5+, TailwindCSS | +| 数据库 | PostgreSQL 15+ | +| 缓存/队列 | Redis 7+ | + +--- + +## 文档 + +- 依赖安全:`docs/dependency-security.md` + +--- + +## OpenAI Responses 兼容注意事项 + +- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。 +- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。 + +--- + +## 部署方式 + +### 方式一:脚本安装(推荐) + +一键安装脚本,自动从 GitHub Releases 下载预编译的二进制文件。 + +#### 前置条件 + +- Linux 服务器(amd64 或 arm64) +- PostgreSQL 15+(已安装并运行) +- Redis 7+(已安装并运行) +- Root 权限 + +#### 安装步骤 + +```bash +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash +``` + +脚本会自动: +1. 检测系统架构 +2. 下载最新版本 +3. 安装二进制文件到 `/opt/sub2api` +4. 创建 systemd 服务 +5. 配置系统用户和权限 + +#### 安装后配置 + +```bash +# 1. 启动服务 +sudo systemctl start sub2api + +# 2. 设置开机自启 +sudo systemctl enable sub2api + +# 3. 在浏览器中打开设置向导 +# http://你的服务器IP:8080 +``` + +设置向导将引导你完成: +- 数据库配置 +- Redis 配置 +- 管理员账号创建 + +#### 升级 + +可以直接在 **管理后台** 左上角点击 **检测更新** 按钮进行在线升级。 + +网页升级功能支持: +- 自动检测新版本 +- 一键下载并应用更新 +- 支持回滚 + +#### 常用命令 + +```bash +# 查看状态 +sudo systemctl status sub2api + +# 查看日志 +sudo journalctl -u sub2api -f + +# 重启服务 +sudo systemctl restart sub2api + +# 卸载 +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y +``` + +--- + +### 方式二:Docker Compose + +使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。 + +#### 前置条件 + +- Docker 20.10+ +- Docker Compose v2+ + +#### 安装步骤 + +```bash +# 1. 克隆仓库 +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 2. 进入 deploy 目录 +cd deploy + +# 3. 复制环境配置文件 +cp .env.example .env + +# 4. 编辑配置(设置密码等) +nano .env +``` + +**`.env` 必须配置项:** + +```bash +# PostgreSQL 密码(必须修改!) +POSTGRES_PASSWORD=your_secure_password_here + +# 可选:管理员账号 +ADMIN_EMAIL=admin@example.com +ADMIN_PASSWORD=your_admin_password + +# 可选:自定义端口 +SERVER_PORT=8080 + +# 可选:安全配置 +# 启用 URL 白名单验证(false 则跳过白名单检查,仅做基本格式校验) +SECURITY_URL_ALLOWLIST_ENABLED=false + +# 关闭白名单时,是否允许 http:// URL(默认 false,只允许 https://) +# ⚠️ 警告:允许 HTTP 会暴露 API 密钥(明文传输) +# 仅建议在以下场景使用: +# - 开发/测试环境 +# - 内部可信网络 +# - 本地测试服务器(http://localhost) +# 生产环境:保持 false 或仅使用 HTTPS URL +SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false + +# 是否允许私有 IP 地址用于上游/定价/CRS(内网部署时使用) +SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false +``` + +```bash +# 5. 启动所有服务 +docker-compose up -d + +# 6. 查看状态 +docker-compose ps + +# 7. 查看日志 +docker-compose logs -f sub2api +``` + +#### 访问 + +在浏览器中打开 `http://你的服务器IP:8080` + +#### 升级 + +```bash +# 拉取最新镜像并重建容器 +docker-compose pull +docker-compose up -d +``` + +#### 常用命令 + +```bash +# 停止所有服务 +docker-compose down + +# 重启 +docker-compose restart + +# 查看所有日志 +docker-compose logs -f +``` + +--- + +### 方式三:源码编译 + +从源码编译安装,适合开发或定制需求。 + +#### 前置条件 + +- Go 1.21+ +- Node.js 18+ +- PostgreSQL 15+ +- Redis 7+ + +#### 编译步骤 + +```bash +# 1. 克隆仓库 +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api + +# 2. 安装 pnpm(如果还没有安装) +npm install -g pnpm + +# 3. 编译前端 +cd frontend +pnpm install +pnpm run build +# 构建产物输出到 ../backend/internal/web/dist/ + +# 4. 编译后端(嵌入前端) +cd ../backend +go build -tags embed -o sub2api ./cmd/server + +# 5. 创建配置文件 +cp ../deploy/config.example.yaml ./config.yaml + +# 6. 编辑配置 +nano config.yaml +``` + +> **注意:** `-tags embed` 参数会将前端嵌入到二进制文件中。不使用此参数编译的程序将不包含前端界面。 + +**`config.yaml` 关键配置:** + +```yaml +server: + host: "0.0.0.0" + port: 8080 + mode: "release" + +database: + host: "localhost" + port: 5432 + user: "postgres" + password: "your_password" + dbname: "sub2api" + +redis: + host: "localhost" + port: 6379 + password: "" + +jwt: + secret: "change-this-to-a-secure-random-string" + expire_hour: 24 + +default: + user_concurrency: 5 + user_balance: 0 + api_key_prefix: "sk-" + rate_multiplier: 1.0 +``` + +`config.yaml` 还支持以下安全相关配置: + +- `cors.allowed_origins` 配置 CORS 白名单 +- `security.url_allowlist` 配置上游/价格数据/CRS 主机白名单 +- `security.url_allowlist.enabled` 可关闭 URL 校验(慎用) +- `security.url_allowlist.allow_insecure_http` 关闭校验时允许 HTTP URL +- `security.url_allowlist.allow_private_hosts` 允许私有/本地 IP 地址 +- `security.response_headers.enabled` 可启用可配置响应头过滤(关闭时使用默认白名单) +- `security.csp` 配置 Content-Security-Policy +- `billing.circuit_breaker` 计费异常时 fail-closed +- `server.trusted_proxies` 启用可信代理解析 X-Forwarded-For +- `turnstile.required` 在 release 模式强制启用 Turnstile + +**⚠️ 安全警告:HTTP URL 配置** + +当 `security.url_allowlist.enabled=false` 时,系统默认执行最小 URL 校验,**拒绝 HTTP URL**,仅允许 HTTPS。要允许 HTTP URL(例如用于开发或内网测试),必须显式设置: + +```yaml +security: + url_allowlist: + enabled: false # 禁用白名单检查 + allow_insecure_http: true # 允许 HTTP URL(⚠️ 不安全) +``` + +**或通过环境变量:** + +```bash +SECURITY_URL_ALLOWLIST_ENABLED=false +SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true +``` + +**允许 HTTP 的风险:** +- API 密钥和数据以**明文传输**(可被截获) +- 易受**中间人攻击 (MITM)** +- **不适合生产环境** + +**适用场景:** +- ✅ 开发/测试环境的本地服务器(http://localhost) +- ✅ 内网可信端点 +- ✅ 获取 HTTPS 前测试账号连通性 +- ❌ 生产环境(仅使用 HTTPS) + +**未设置此项时的错误示例:** +``` +Invalid base URL: invalid url scheme: http +``` + +如关闭 URL 校验或响应头过滤,请加强网络层防护: +- 出站访问白名单限制上游域名/IP +- 阻断私网/回环/链路本地地址 +- 强制仅允许 TLS 出站 +- 在反向代理层移除敏感响应头 + +```bash +# 6. 运行应用 +./sub2api +``` + +#### 开发模式 + +```bash +# 后端(支持热重载) +cd backend +go run ./cmd/server + +# 前端(支持热重载) +cd frontend +pnpm run dev +``` + +#### 代码生成 + +修改 `backend/ent/schema` 后,需要重新生成 Ent + Wire: + +```bash +cd backend +go generate ./ent +go generate ./cmd/server +``` + +--- + +## 简易模式 + +简易模式适合个人开发者或内部团队快速使用,不依赖完整 SaaS 功能。 + +- 启用方式:设置环境变量 `RUN_MODE=simple` +- 功能差异:隐藏 SaaS 相关功能,跳过计费流程 +- 安全注意事项:生产环境需同时设置 `SIMPLE_MODE_CONFIRM=true` 才允许启动 + +--- + +## Antigravity 使用说明 + +Sub2API 支持 [Antigravity](https://antigravity.so/) 账户,授权后可通过专用端点访问 Claude 和 Gemini 模型。 + +### 专用端点 + +| 端点 | 模型 | +|------|------| +| `/antigravity/v1/messages` | Claude 模型 | +| `/antigravity/v1beta/` | Gemini 模型 | + +### Claude Code 配置示例 + +```bash +export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity" +export ANTHROPIC_AUTH_TOKEN="sk-xxx" +``` + +### 混合调度模式 + +Antigravity 账户支持可选的**混合调度**功能。开启后,通用端点 `/v1/messages` 和 `/v1beta/` 也会调度该账户。 + +> **⚠️ 注意**:Anthropic Claude 和 Antigravity Claude **不能在同一上下文中混合使用**,请通过分组功能做好隔离。 + + +### 已知问题 +在 Claude Code 中,无法自动退出Plan Mode。(正常使用原生Claude Api时,Plan 完成后,Claude Code会弹出弹出选项让用户同意或拒绝Plan。) +解决办法:shift + Tab,手动退出Plan mode,然后输入内容 告诉 Claude Code 同意或拒绝 Plan +--- + +## 项目结构 + +``` +sub2api/ +├── backend/ # Go 后端服务 +│ ├── cmd/server/ # 应用入口 +│ ├── internal/ # 内部模块 +│ │ ├── config/ # 配置管理 +│ │ ├── model/ # 数据模型 +│ │ ├── service/ # 业务逻辑 +│ │ ├── handler/ # HTTP 处理器 +│ │ └── gateway/ # API 网关核心 +│ └── resources/ # 静态资源 +│ +├── frontend/ # Vue 3 前端 +│ └── src/ +│ ├── api/ # API 调用 +│ ├── stores/ # 状态管理 +│ ├── views/ # 页面组件 +│ └── components/ # 通用组件 +│ +└── deploy/ # 部署文件 + ├── docker-compose.yml # Docker Compose 配置 + ├── .env.example # Docker Compose 环境变量 + ├── config.example.yaml # 二进制部署完整配置文件 + └── install.sh # 一键安装脚本 +``` + +## 许可证 + +MIT License + +--- + +
+ +**如果觉得有用,请给个 Star 支持一下!** + +
diff --git a/backend/.dockerignore b/backend/.dockerignore new file mode 100644 index 00000000..c1c2a854 --- /dev/null +++ b/backend/.dockerignore @@ -0,0 +1,2 @@ +.cache/ +.DS_Store diff --git a/backend/.golangci.yml b/backend/.golangci.yml new file mode 100644 index 00000000..3ec692a8 --- /dev/null +++ b/backend/.golangci.yml @@ -0,0 +1,599 @@ +version: "2" + +linters: + default: none + enable: + - depguard + - errcheck + - govet + - ineffassign + - staticcheck + - unused + + settings: + depguard: + rules: + # Enforce: service must not depend on repository. + service-no-repository: + list-mode: original + files: + - "**/internal/service/**" + - "!**/internal/service/ops_aggregation_service.go" + - "!**/internal/service/ops_alert_evaluator_service.go" + - "!**/internal/service/ops_cleanup_service.go" + - "!**/internal/service/ops_metrics_collector.go" + - "!**/internal/service/ops_scheduled_report_service.go" + - "!**/internal/service/wire.go" + deny: + - pkg: github.com/Wei-Shaw/sub2api/internal/repository + desc: "service must not import repository" + - pkg: gorm.io/gorm + desc: "service must not import gorm" + - pkg: github.com/redis/go-redis/v9 + desc: "service must not import redis" + handler-no-repository: + list-mode: original + files: + - "**/internal/handler/**" + deny: + - pkg: github.com/Wei-Shaw/sub2api/internal/repository + desc: "handler must not import repository" + - pkg: gorm.io/gorm + desc: "handler must not import gorm" + - pkg: github.com/redis/go-redis/v9 + desc: "handler must not import redis" + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + # Such cases aren't reported by default. + # Default: false + check-blank: false + # To disable the errcheck built-in exclude list. + # See `-excludeonly` option in https://github.com/kisielk/errcheck#excluding-functions for details. + # Default: false + disable-default-exclusions: true + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - io/ioutil.ReadFile + - io.Copy(*bytes.Buffer) + - io.Copy(os.Stdout) + - fmt.Println + - fmt.Print + - fmt.Printf + - fmt.Fprint + - fmt.Fprintf + - fmt.Fprintln + # Display function signature instead of selector. + # Default: false + verbose: true + ineffassign: + # Check escaping variables of type error, may cause false positives. + # Default: false + check-escaping-errors: true + staticcheck: + # https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist + # Default: ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"] + dot-import-whitelist: + - fmt + # https://staticcheck.dev/docs/configuration/options/#initialisms + # Default: ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"] + initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ] + # https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist + # Default: ["200", "400", "404", "500"] + http-status-code-whitelist: [ "200", "400", "404", "500" ] + # SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks + # Example (to disable some checks): [ "all", "-SA1000", "-SA1001"] + # Run `GL_DEBUG=staticcheck golangci-lint run --enable=staticcheck` to see all available checks and enabled by config checks. + # Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"] + # Temporarily disable style checks to allow CI to pass + checks: + - all + - -ST1000 # Package comment format + - -ST1003 # Poorly chosen identifier (ApiKey vs APIKey) + - -ST1020 # Comment on exported method format + - -ST1021 # Comment on exported type format + - -ST1022 # Comment on exported variable format + # Invalid regular expression. + # https://staticcheck.dev/docs/checks/#SA1000 + - SA1000 + # Invalid template. + # https://staticcheck.dev/docs/checks/#SA1001 + - SA1001 + # Invalid format in 'time.Parse'. + # https://staticcheck.dev/docs/checks/#SA1002 + - SA1002 + # Unsupported argument to functions in 'encoding/binary'. + # https://staticcheck.dev/docs/checks/#SA1003 + - SA1003 + # Suspiciously small untyped constant in 'time.Sleep'. + # https://staticcheck.dev/docs/checks/#SA1004 + - SA1004 + # Invalid first argument to 'exec.Command'. + # https://staticcheck.dev/docs/checks/#SA1005 + - SA1005 + # 'Printf' with dynamic first argument and no further arguments. + # https://staticcheck.dev/docs/checks/#SA1006 + - SA1006 + # Invalid URL in 'net/url.Parse'. + # https://staticcheck.dev/docs/checks/#SA1007 + - SA1007 + # Non-canonical key in 'http.Header' map. + # https://staticcheck.dev/docs/checks/#SA1008 + - SA1008 + # '(*regexp.Regexp).FindAll' called with 'n == 0', which will always return zero results. + # https://staticcheck.dev/docs/checks/#SA1010 + - SA1010 + # Various methods in the "strings" package expect valid UTF-8, but invalid input is provided. + # https://staticcheck.dev/docs/checks/#SA1011 + - SA1011 + # A nil 'context.Context' is being passed to a function, consider using 'context.TODO' instead. + # https://staticcheck.dev/docs/checks/#SA1012 + - SA1012 + # 'io.Seeker.Seek' is being called with the whence constant as the first argument, but it should be the second. + # https://staticcheck.dev/docs/checks/#SA1013 + - SA1013 + # Non-pointer value passed to 'Unmarshal' or 'Decode'. + # https://staticcheck.dev/docs/checks/#SA1014 + - SA1014 + # Using 'time.Tick' in a way that will leak. Consider using 'time.NewTicker', and only use 'time.Tick' in tests, commands and endless functions. + # https://staticcheck.dev/docs/checks/#SA1015 + - SA1015 + # Trapping a signal that cannot be trapped. + # https://staticcheck.dev/docs/checks/#SA1016 + - SA1016 + # Channels used with 'os/signal.Notify' should be buffered. + # https://staticcheck.dev/docs/checks/#SA1017 + - SA1017 + # 'strings.Replace' called with 'n == 0', which does nothing. + # https://staticcheck.dev/docs/checks/#SA1018 + - SA1018 + # Using a deprecated function, variable, constant or field. + # https://staticcheck.dev/docs/checks/#SA1019 + - SA1019 + # Using an invalid host:port pair with a 'net.Listen'-related function. + # https://staticcheck.dev/docs/checks/#SA1020 + - SA1020 + # Using 'bytes.Equal' to compare two 'net.IP'. + # https://staticcheck.dev/docs/checks/#SA1021 + - SA1021 + # Modifying the buffer in an 'io.Writer' implementation. + # https://staticcheck.dev/docs/checks/#SA1023 + - SA1023 + # A string cutset contains duplicate characters. + # https://staticcheck.dev/docs/checks/#SA1024 + - SA1024 + # It is not possible to use '(*time.Timer).Reset''s return value correctly. + # https://staticcheck.dev/docs/checks/#SA1025 + - SA1025 + # Cannot marshal channels or functions. + # https://staticcheck.dev/docs/checks/#SA1026 + - SA1026 + # Atomic access to 64-bit variable must be 64-bit aligned. + # https://staticcheck.dev/docs/checks/#SA1027 + - SA1027 + # 'sort.Slice' can only be used on slices. + # https://staticcheck.dev/docs/checks/#SA1028 + - SA1028 + # Inappropriate key in call to 'context.WithValue'. + # https://staticcheck.dev/docs/checks/#SA1029 + - SA1029 + # Invalid argument in call to a 'strconv' function. + # https://staticcheck.dev/docs/checks/#SA1030 + - SA1030 + # Overlapping byte slices passed to an encoder. + # https://staticcheck.dev/docs/checks/#SA1031 + - SA1031 + # Wrong order of arguments to 'errors.Is'. + # https://staticcheck.dev/docs/checks/#SA1032 + - SA1032 + # 'sync.WaitGroup.Add' called inside the goroutine, leading to a race condition. + # https://staticcheck.dev/docs/checks/#SA2000 + - SA2000 + # Empty critical section, did you mean to defer the unlock?. + # https://staticcheck.dev/docs/checks/#SA2001 + - SA2001 + # Called 'testing.T.FailNow' or 'SkipNow' in a goroutine, which isn't allowed. + # https://staticcheck.dev/docs/checks/#SA2002 + - SA2002 + # Deferred 'Lock' right after locking, likely meant to defer 'Unlock' instead. + # https://staticcheck.dev/docs/checks/#SA2003 + - SA2003 + # 'TestMain' doesn't call 'os.Exit', hiding test failures. + # https://staticcheck.dev/docs/checks/#SA3000 + - SA3000 + # Assigning to 'b.N' in benchmarks distorts the results. + # https://staticcheck.dev/docs/checks/#SA3001 + - SA3001 + # Binary operator has identical expressions on both sides. + # https://staticcheck.dev/docs/checks/#SA4000 + - SA4000 + # '&*x' gets simplified to 'x', it does not copy 'x'. + # https://staticcheck.dev/docs/checks/#SA4001 + - SA4001 + # Comparing unsigned values against negative values is pointless. + # https://staticcheck.dev/docs/checks/#SA4003 + - SA4003 + # The loop exits unconditionally after one iteration. + # https://staticcheck.dev/docs/checks/#SA4004 + - SA4004 + # Field assignment that will never be observed. Did you mean to use a pointer receiver?. + # https://staticcheck.dev/docs/checks/#SA4005 + - SA4005 + # A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?. + # https://staticcheck.dev/docs/checks/#SA4006 + - SA4006 + # The variable in the loop condition never changes, are you incrementing the wrong variable?. + # https://staticcheck.dev/docs/checks/#SA4008 + - SA4008 + # A function argument is overwritten before its first use. + # https://staticcheck.dev/docs/checks/#SA4009 + - SA4009 + # The result of 'append' will never be observed anywhere. + # https://staticcheck.dev/docs/checks/#SA4010 + - SA4010 + # Break statement with no effect. Did you mean to break out of an outer loop?. + # https://staticcheck.dev/docs/checks/#SA4011 + - SA4011 + # Comparing a value against NaN even though no value is equal to NaN. + # https://staticcheck.dev/docs/checks/#SA4012 + - SA4012 + # Negating a boolean twice ('!!b') is the same as writing 'b'. This is either redundant, or a typo. + # https://staticcheck.dev/docs/checks/#SA4013 + - SA4013 + # An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either. + # https://staticcheck.dev/docs/checks/#SA4014 + - SA4014 + # Calling functions like 'math.Ceil' on floats converted from integers doesn't do anything useful. + # https://staticcheck.dev/docs/checks/#SA4015 + - SA4015 + # Certain bitwise operations, such as 'x ^ 0', do not do anything useful. + # https://staticcheck.dev/docs/checks/#SA4016 + - SA4016 + # Discarding the return values of a function without side effects, making the call pointless. + # https://staticcheck.dev/docs/checks/#SA4017 + - SA4017 + # Self-assignment of variables. + # https://staticcheck.dev/docs/checks/#SA4018 + - SA4018 + # Multiple, identical build constraints in the same file. + # https://staticcheck.dev/docs/checks/#SA4019 + - SA4019 + # Unreachable case clause in a type switch. + # https://staticcheck.dev/docs/checks/#SA4020 + - SA4020 + # "x = append(y)" is equivalent to "x = y". + # https://staticcheck.dev/docs/checks/#SA4021 + - SA4021 + # Comparing the address of a variable against nil. + # https://staticcheck.dev/docs/checks/#SA4022 + - SA4022 + # Impossible comparison of interface value with untyped nil. + # https://staticcheck.dev/docs/checks/#SA4023 + - SA4023 + # Checking for impossible return value from a builtin function. + # https://staticcheck.dev/docs/checks/#SA4024 + - SA4024 + # Integer division of literals that results in zero. + # https://staticcheck.dev/docs/checks/#SA4025 + - SA4025 + # Go constants cannot express negative zero. + # https://staticcheck.dev/docs/checks/#SA4026 + - SA4026 + # '(*net/url.URL).Query' returns a copy, modifying it doesn't change the URL. + # https://staticcheck.dev/docs/checks/#SA4027 + - SA4027 + # 'x % 1' is always zero. + # https://staticcheck.dev/docs/checks/#SA4028 + - SA4028 + # Ineffective attempt at sorting slice. + # https://staticcheck.dev/docs/checks/#SA4029 + - SA4029 + # Ineffective attempt at generating random number. + # https://staticcheck.dev/docs/checks/#SA4030 + - SA4030 + # Checking never-nil value against nil. + # https://staticcheck.dev/docs/checks/#SA4031 + - SA4031 + # Comparing 'runtime.GOOS' or 'runtime.GOARCH' against impossible value. + # https://staticcheck.dev/docs/checks/#SA4032 + - SA4032 + # Assignment to nil map. + # https://staticcheck.dev/docs/checks/#SA5000 + - SA5000 + # Deferring 'Close' before checking for a possible error. + # https://staticcheck.dev/docs/checks/#SA5001 + - SA5001 + # The empty for loop ("for {}") spins and can block the scheduler. + # https://staticcheck.dev/docs/checks/#SA5002 + - SA5002 + # Defers in infinite loops will never execute. + # https://staticcheck.dev/docs/checks/#SA5003 + - SA5003 + # "for { select { ..." with an empty default branch spins. + # https://staticcheck.dev/docs/checks/#SA5004 + - SA5004 + # The finalizer references the finalized object, preventing garbage collection. + # https://staticcheck.dev/docs/checks/#SA5005 + - SA5005 + # Infinite recursive call. + # https://staticcheck.dev/docs/checks/#SA5007 + - SA5007 + # Invalid struct tag. + # https://staticcheck.dev/docs/checks/#SA5008 + - SA5008 + # Invalid Printf call. + # https://staticcheck.dev/docs/checks/#SA5009 + - SA5009 + # Impossible type assertion. + # https://staticcheck.dev/docs/checks/#SA5010 + - SA5010 + # Possible nil pointer dereference. + # https://staticcheck.dev/docs/checks/#SA5011 + - SA5011 + # Passing odd-sized slice to function expecting even size. + # https://staticcheck.dev/docs/checks/#SA5012 + - SA5012 + # Using 'regexp.Match' or related in a loop, should use 'regexp.Compile'. + # https://staticcheck.dev/docs/checks/#SA6000 + - SA6000 + # Missing an optimization opportunity when indexing maps by byte slices. + # https://staticcheck.dev/docs/checks/#SA6001 + - SA6001 + # Storing non-pointer values in 'sync.Pool' allocates memory. + # https://staticcheck.dev/docs/checks/#SA6002 + - SA6002 + # Converting a string to a slice of runes before ranging over it. + # https://staticcheck.dev/docs/checks/#SA6003 + - SA6003 + # Inefficient string comparison with 'strings.ToLower' or 'strings.ToUpper'. + # https://staticcheck.dev/docs/checks/#SA6005 + - SA6005 + # Using io.WriteString to write '[]byte'. + # https://staticcheck.dev/docs/checks/#SA6006 + - SA6006 + # Defers in range loops may not run when you expect them to. + # https://staticcheck.dev/docs/checks/#SA9001 + - SA9001 + # Using a non-octal 'os.FileMode' that looks like it was meant to be in octal. + # https://staticcheck.dev/docs/checks/#SA9002 + - SA9002 + # Empty body in an if or else branch. + # https://staticcheck.dev/docs/checks/#SA9003 + - SA9003 + # Only the first constant has an explicit type. + # https://staticcheck.dev/docs/checks/#SA9004 + - SA9004 + # Trying to marshal a struct with no public fields nor custom marshaling. + # https://staticcheck.dev/docs/checks/#SA9005 + - SA9005 + # Dubious bit shifting of a fixed size integer value. + # https://staticcheck.dev/docs/checks/#SA9006 + - SA9006 + # Deleting a directory that shouldn't be deleted. + # https://staticcheck.dev/docs/checks/#SA9007 + - SA9007 + # 'else' branch of a type assertion is probably not reading the right value. + # https://staticcheck.dev/docs/checks/#SA9008 + - SA9008 + # Ineffectual Go compiler directive. + # https://staticcheck.dev/docs/checks/#SA9009 + - SA9009 + # NOTE: ST1000, ST1001, ST1003, ST1020, ST1021, ST1022 are disabled above + # Incorrectly formatted error string. + # https://staticcheck.dev/docs/checks/#ST1005 + - ST1005 + # Poorly chosen receiver name. + # https://staticcheck.dev/docs/checks/#ST1006 + - ST1006 + # A function's error value should be its last return value. + # https://staticcheck.dev/docs/checks/#ST1008 + - ST1008 + # Poorly chosen name for variable of type 'time.Duration'. + # https://staticcheck.dev/docs/checks/#ST1011 + - ST1011 + # Poorly chosen name for error variable. + # https://staticcheck.dev/docs/checks/#ST1012 + - ST1012 + # Should use constants for HTTP error codes, not magic numbers. + # https://staticcheck.dev/docs/checks/#ST1013 + - ST1013 + # A switch's default case should be the first or last case. + # https://staticcheck.dev/docs/checks/#ST1015 + - ST1015 + # Use consistent method receiver names. + # https://staticcheck.dev/docs/checks/#ST1016 + - ST1016 + # Don't use Yoda conditions. + # https://staticcheck.dev/docs/checks/#ST1017 + - ST1017 + # Avoid zero-width and control characters in string literals. + # https://staticcheck.dev/docs/checks/#ST1018 + - ST1018 + # Importing the same package multiple times. + # https://staticcheck.dev/docs/checks/#ST1019 + - ST1019 + # NOTE: ST1020, ST1021, ST1022 removed (disabled above) + # Redundant type in variable declaration. + # https://staticcheck.dev/docs/checks/#ST1023 + - ST1023 + # Use plain channel send or receive instead of single-case select. + # https://staticcheck.dev/docs/checks/#S1000 + - S1000 + # Replace for loop with call to copy. + # https://staticcheck.dev/docs/checks/#S1001 + - S1001 + # Omit comparison with boolean constant. + # https://staticcheck.dev/docs/checks/#S1002 + - S1002 + # Replace call to 'strings.Index' with 'strings.Contains'. + # https://staticcheck.dev/docs/checks/#S1003 + - S1003 + # Replace call to 'bytes.Compare' with 'bytes.Equal'. + # https://staticcheck.dev/docs/checks/#S1004 + - S1004 + # Drop unnecessary use of the blank identifier. + # https://staticcheck.dev/docs/checks/#S1005 + - S1005 + # Use "for { ... }" for infinite loops. + # https://staticcheck.dev/docs/checks/#S1006 + - S1006 + # Simplify regular expression by using raw string literal. + # https://staticcheck.dev/docs/checks/#S1007 + - S1007 + # Simplify returning boolean expression. + # https://staticcheck.dev/docs/checks/#S1008 + - S1008 + # Omit redundant nil check on slices, maps, and channels. + # https://staticcheck.dev/docs/checks/#S1009 + - S1009 + # Omit default slice index. + # https://staticcheck.dev/docs/checks/#S1010 + - S1010 + # Use a single 'append' to concatenate two slices. + # https://staticcheck.dev/docs/checks/#S1011 + - S1011 + # Replace 'time.Now().Sub(x)' with 'time.Since(x)'. + # https://staticcheck.dev/docs/checks/#S1012 + - S1012 + # Use a type conversion instead of manually copying struct fields. + # https://staticcheck.dev/docs/checks/#S1016 + - S1016 + # Replace manual trimming with 'strings.TrimPrefix'. + # https://staticcheck.dev/docs/checks/#S1017 + - S1017 + # Use "copy" for sliding elements. + # https://staticcheck.dev/docs/checks/#S1018 + - S1018 + # Simplify "make" call by omitting redundant arguments. + # https://staticcheck.dev/docs/checks/#S1019 + - S1019 + # Omit redundant nil check in type assertion. + # https://staticcheck.dev/docs/checks/#S1020 + - S1020 + # Merge variable declaration and assignment. + # https://staticcheck.dev/docs/checks/#S1021 + - S1021 + # Omit redundant control flow. + # https://staticcheck.dev/docs/checks/#S1023 + - S1023 + # Replace 'x.Sub(time.Now())' with 'time.Until(x)'. + # https://staticcheck.dev/docs/checks/#S1024 + - S1024 + # Don't use 'fmt.Sprintf("%s", x)' unnecessarily. + # https://staticcheck.dev/docs/checks/#S1025 + - S1025 + # Simplify error construction with 'fmt.Errorf'. + # https://staticcheck.dev/docs/checks/#S1028 + - S1028 + # Range over the string directly. + # https://staticcheck.dev/docs/checks/#S1029 + - S1029 + # Use 'bytes.Buffer.String' or 'bytes.Buffer.Bytes'. + # https://staticcheck.dev/docs/checks/#S1030 + - S1030 + # Omit redundant nil check around loop. + # https://staticcheck.dev/docs/checks/#S1031 + - S1031 + # Use 'sort.Ints(x)', 'sort.Float64s(x)', and 'sort.Strings(x)'. + # https://staticcheck.dev/docs/checks/#S1032 + - S1032 + # Unnecessary guard around call to "delete". + # https://staticcheck.dev/docs/checks/#S1033 + - S1033 + # Use result of type assertion to simplify cases. + # https://staticcheck.dev/docs/checks/#S1034 + - S1034 + # Redundant call to 'net/http.CanonicalHeaderKey' in method call on 'net/http.Header'. + # https://staticcheck.dev/docs/checks/#S1035 + - S1035 + # Unnecessary guard around map access. + # https://staticcheck.dev/docs/checks/#S1036 + - S1036 + # Elaborate way of sleeping. + # https://staticcheck.dev/docs/checks/#S1037 + - S1037 + # Unnecessarily complex way of printing formatted string. + # https://staticcheck.dev/docs/checks/#S1038 + - S1038 + # Unnecessary use of 'fmt.Sprint'. + # https://staticcheck.dev/docs/checks/#S1039 + - S1039 + # Type assertion to current type. + # https://staticcheck.dev/docs/checks/#S1040 + - S1040 + # Apply De Morgan's law. + # https://staticcheck.dev/docs/checks/#QF1001 + - QF1001 + # Convert untagged switch to tagged switch. + # https://staticcheck.dev/docs/checks/#QF1002 + - QF1002 + # Convert if/else-if chain to tagged switch. + # https://staticcheck.dev/docs/checks/#QF1003 + - QF1003 + # Use 'strings.ReplaceAll' instead of 'strings.Replace' with 'n == -1'. + # https://staticcheck.dev/docs/checks/#QF1004 + - QF1004 + # Expand call to 'math.Pow'. + # https://staticcheck.dev/docs/checks/#QF1005 + - QF1005 + # Lift 'if'+'break' into loop condition. + # https://staticcheck.dev/docs/checks/#QF1006 + - QF1006 + # Merge conditional assignment into variable declaration. + # https://staticcheck.dev/docs/checks/#QF1007 + - QF1007 + # Omit embedded fields from selector expression. + # https://staticcheck.dev/docs/checks/#QF1008 + - QF1008 + # Use 'time.Time.Equal' instead of '==' operator. + # https://staticcheck.dev/docs/checks/#QF1009 + - QF1009 + # Convert slice of bytes to string when printing it. + # https://staticcheck.dev/docs/checks/#QF1010 + - QF1010 + # Omit redundant type from variable declaration. + # https://staticcheck.dev/docs/checks/#QF1011 + - QF1011 + # Use 'fmt.Fprintf(x, ...)' instead of 'x.Write(fmt.Sprintf(...))'. + # https://staticcheck.dev/docs/checks/#QF1012 + - QF1012 + unused: + # Mark all struct fields that have been written to as used. + # Default: true + field-writes-are-uses: false + # Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just write. + # Default: false + post-statements-are-reads: true + # Mark all exported fields as used. + # default: true + exported-fields-are-used: false + # Mark all function parameters as used. + # default: true + parameters-are-used: true + # Mark all local variables as used. + # default: true + local-variables-are-used: false + # Mark all identifiers inside generated files as used. + # Default: true + generated-is-used: false + +formatters: + enable: + - gofmt + settings: + gofmt: + # Simplify code: gofmt with `-s` option. + # Default: true + simplify: false + # Apply the rewrite rules to the source before reformatting. + # https://pkg.go.dev/cmd/gofmt + # Default: [] + rewrite-rules: + - pattern: 'interface{}' + replacement: 'any' + - pattern: 'a[b:len(a)]' + replacement: 'a[b:]' diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 00000000..770fdedf --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,24 @@ +FROM golang:1.25.5-alpine + +WORKDIR /app + +# 安装必要的工具 +RUN apk add --no-cache git + +# 复制go.mod和go.sum +COPY go.mod go.sum ./ + +# 下载依赖 +RUN go mod download + +# 复制源代码 +COPY . . + +# 构建应用 +RUN go build -o main cmd/server/main.go + +# 暴露端口 +EXPOSE 8080 + +# 运行应用 +CMD ["./main"] diff --git a/backend/Makefile b/backend/Makefile new file mode 100644 index 00000000..6a5d2caa --- /dev/null +++ b/backend/Makefile @@ -0,0 +1,17 @@ +.PHONY: build test test-unit test-integration test-e2e + +build: + go build -o bin/server ./cmd/server + +test: + go test ./... + golangci-lint run ./... + +test-unit: + go test -tags=unit ./... + +test-integration: + go test -tags=integration ./... + +test-e2e: + go test -tags=e2e ./... diff --git a/backend/cmd/jwtgen/main.go b/backend/cmd/jwtgen/main.go new file mode 100644 index 00000000..c461198b --- /dev/null +++ b/backend/cmd/jwtgen/main.go @@ -0,0 +1,57 @@ +package main + +import ( + "context" + "flag" + "fmt" + "log" + "time" + + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/repository" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func main() { + email := flag.String("email", "", "Admin email to issue a JWT for (defaults to first active admin)") + flag.Parse() + + cfg, err := config.Load() + if err != nil { + log.Fatalf("failed to load config: %v", err) + } + + client, sqlDB, err := repository.InitEnt(cfg) + if err != nil { + log.Fatalf("failed to init db: %v", err) + } + defer func() { + if err := client.Close(); err != nil { + log.Printf("failed to close db: %v", err) + } + }() + + userRepo := repository.NewUserRepository(client, sqlDB) + authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil, nil) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + var user *service.User + if *email != "" { + user, err = userRepo.GetByEmail(ctx, *email) + } else { + user, err = userRepo.GetFirstAdmin(ctx) + } + if err != nil { + log.Fatalf("failed to resolve admin user: %v", err) + } + + token, err := authService.GenerateToken(user) + if err != nil { + log.Fatalf("failed to generate token: %v", err) + } + + fmt.Printf("ADMIN_EMAIL=%s\nADMIN_USER_ID=%d\nJWT=%s\n", user.Email, user.ID, token) +} diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION new file mode 100644 index 00000000..79e0dd8a --- /dev/null +++ b/backend/cmd/server/VERSION @@ -0,0 +1 @@ +0.1.46 diff --git a/backend/cmd/server/main.go b/backend/cmd/server/main.go new file mode 100644 index 00000000..c9dc57bb --- /dev/null +++ b/backend/cmd/server/main.go @@ -0,0 +1,155 @@ +package main + +//go:generate go run github.com/google/wire/cmd/wire + +import ( + "context" + _ "embed" + "errors" + "flag" + "log" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + "time" + + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/setup" + "github.com/Wei-Shaw/sub2api/internal/web" + + "github.com/gin-gonic/gin" +) + +//go:embed VERSION +var embeddedVersion string + +// Build-time variables (can be set by ldflags) +var ( + Version = "" + Commit = "unknown" + Date = "unknown" + BuildType = "source" // "source" for manual builds, "release" for CI builds (set by ldflags) +) + +func init() { + // Read version from embedded VERSION file + Version = strings.TrimSpace(embeddedVersion) + if Version == "" { + Version = "0.0.0-dev" + } +} + +func main() { + // Parse command line flags + setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode") + showVersion := flag.Bool("version", false, "Show version information") + flag.Parse() + + if *showVersion { + log.Printf("Sub2API %s (commit: %s, built: %s)\n", Version, Commit, Date) + return + } + + // CLI setup mode + if *setupMode { + if err := setup.RunCLI(); err != nil { + log.Fatalf("Setup failed: %v", err) + } + return + } + + // Check if setup is needed + if setup.NeedsSetup() { + // Check if auto-setup is enabled (for Docker deployment) + if setup.AutoSetupEnabled() { + log.Println("Auto setup mode enabled...") + if err := setup.AutoSetupFromEnv(); err != nil { + log.Fatalf("Auto setup failed: %v", err) + } + // Continue to main server after auto-setup + } else { + log.Println("First run detected, starting setup wizard...") + runSetupServer() + return + } + } + + // Normal server mode + runMainServer() +} + +func runSetupServer() { + r := gin.New() + r.Use(middleware.Recovery()) + r.Use(middleware.CORS(config.CORSConfig{})) + r.Use(middleware.SecurityHeaders(config.CSPConfig{Enabled: true, Policy: config.DefaultCSPPolicy})) + + // Register setup routes + setup.RegisterRoutes(r) + + // Serve embedded frontend if available + if web.HasEmbeddedFrontend() { + r.Use(web.ServeEmbeddedFrontend()) + } + + // Get server address from config.yaml or environment variables (SERVER_HOST, SERVER_PORT) + // This allows users to run setup on a different address if needed + addr := config.GetServerAddress() + log.Printf("Setup wizard available at http://%s", addr) + log.Println("Complete the setup wizard to configure Sub2API") + + if err := r.Run(addr); err != nil { + log.Fatalf("Failed to start setup server: %v", err) + } +} + +func runMainServer() { + cfg, err := config.Load() + if err != nil { + log.Fatalf("Failed to load config: %v", err) + } + if cfg.RunMode == config.RunModeSimple { + log.Println("⚠️ WARNING: Running in SIMPLE mode - billing and quota checks are DISABLED") + } + + buildInfo := handler.BuildInfo{ + Version: Version, + BuildType: BuildType, + } + + app, err := initializeApplication(buildInfo) + if err != nil { + log.Fatalf("Failed to initialize application: %v", err) + } + defer app.Cleanup() + + // 启动服务器 + go func() { + if err := app.Server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Fatalf("Failed to start server: %v", err) + } + }() + + log.Printf("Server started on %s", app.Server.Addr) + + // 等待中断信号 + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + + log.Println("Shutting down server...") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := app.Server.Shutdown(ctx); err != nil { + log.Fatalf("Server forced to shutdown: %v", err) + } + + log.Println("Server exited") +} diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go new file mode 100644 index 00000000..0a5f9744 --- /dev/null +++ b/backend/cmd/server/wire.go @@ -0,0 +1,187 @@ +//go:build wireinject +// +build wireinject + +package main + +import ( + "context" + "log" + "net/http" + "time" + + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/repository" + "github.com/Wei-Shaw/sub2api/internal/server" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/google/wire" + "github.com/redis/go-redis/v9" +) + +type Application struct { + Server *http.Server + Cleanup func() +} + +func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { + wire.Build( + // Infrastructure layer ProviderSets + config.ProviderSet, + + // Business layer ProviderSets + repository.ProviderSet, + service.ProviderSet, + middleware.ProviderSet, + handler.ProviderSet, + + // Server layer ProviderSet + server.ProviderSet, + + // BuildInfo provider + provideServiceBuildInfo, + + // Cleanup function provider + provideCleanup, + + // Application struct + wire.Struct(new(Application), "Server", "Cleanup"), + ) + return nil, nil +} + +func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { + return service.BuildInfo{ + Version: buildInfo.Version, + BuildType: buildInfo.BuildType, + } +} + +func provideCleanup( + entClient *ent.Client, + rdb *redis.Client, + opsMetricsCollector *service.OpsMetricsCollector, + opsAggregation *service.OpsAggregationService, + opsAlertEvaluator *service.OpsAlertEvaluatorService, + opsCleanup *service.OpsCleanupService, + opsScheduledReport *service.OpsScheduledReportService, + schedulerSnapshot *service.SchedulerSnapshotService, + tokenRefresh *service.TokenRefreshService, + accountExpiry *service.AccountExpiryService, + pricing *service.PricingService, + emailQueue *service.EmailQueueService, + billingCache *service.BillingCacheService, + oauth *service.OAuthService, + openaiOAuth *service.OpenAIOAuthService, + geminiOAuth *service.GeminiOAuthService, + antigravityOAuth *service.AntigravityOAuthService, +) func() { + return func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + // Cleanup steps in reverse dependency order + cleanupSteps := []struct { + name string + fn func() error + }{ + {"OpsScheduledReportService", func() error { + if opsScheduledReport != nil { + opsScheduledReport.Stop() + } + return nil + }}, + {"OpsCleanupService", func() error { + if opsCleanup != nil { + opsCleanup.Stop() + } + return nil + }}, + {"OpsAlertEvaluatorService", func() error { + if opsAlertEvaluator != nil { + opsAlertEvaluator.Stop() + } + return nil + }}, + {"OpsAggregationService", func() error { + if opsAggregation != nil { + opsAggregation.Stop() + } + return nil + }}, + {"OpsMetricsCollector", func() error { + if opsMetricsCollector != nil { + opsMetricsCollector.Stop() + } + return nil + }}, + {"SchedulerSnapshotService", func() error { + if schedulerSnapshot != nil { + schedulerSnapshot.Stop() + } + return nil + }}, + {"TokenRefreshService", func() error { + tokenRefresh.Stop() + return nil + }}, + {"AccountExpiryService", func() error { + accountExpiry.Stop() + return nil + }}, + {"PricingService", func() error { + pricing.Stop() + return nil + }}, + {"EmailQueueService", func() error { + emailQueue.Stop() + return nil + }}, + {"BillingCacheService", func() error { + billingCache.Stop() + return nil + }}, + {"OAuthService", func() error { + oauth.Stop() + return nil + }}, + {"OpenAIOAuthService", func() error { + openaiOAuth.Stop() + return nil + }}, + {"GeminiOAuthService", func() error { + geminiOAuth.Stop() + return nil + }}, + {"AntigravityOAuthService", func() error { + antigravityOAuth.Stop() + return nil + }}, + {"Redis", func() error { + return rdb.Close() + }}, + {"Ent", func() error { + return entClient.Close() + }}, + } + + for _, step := range cleanupSteps { + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + // Continue with remaining cleanup steps even if one fails + } else { + log.Printf("[Cleanup] %s succeeded", step.name) + } + } + + // Check if context timed out + select { + case <-ctx.Done(): + log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds") + default: + log.Printf("[Cleanup] All cleanup steps completed") + } + } +} diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go new file mode 100644 index 00000000..c8304831 --- /dev/null +++ b/backend/cmd/server/wire_gen.go @@ -0,0 +1,318 @@ +// Code generated by Wire. DO NOT EDIT. + +//go:generate go run -mod=mod github.com/google/wire/cmd/wire +//go:build !wireinject +// +build !wireinject + +package main + +import ( + "context" + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/handler/admin" + "github.com/Wei-Shaw/sub2api/internal/repository" + "github.com/Wei-Shaw/sub2api/internal/server" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "log" + "net/http" + "time" +) + +import ( + _ "embed" + _ "github.com/Wei-Shaw/sub2api/ent/runtime" +) + +// Injectors from wire.go: + +func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { + configConfig, err := config.ProvideConfig() + if err != nil { + return nil, err + } + client, err := repository.ProvideEnt(configConfig) + if err != nil { + return nil, err + } + db, err := repository.ProvideSQLDB(client) + if err != nil { + return nil, err + } + userRepository := repository.NewUserRepository(client, db) + settingRepository := repository.NewSettingRepository(client) + settingService := service.NewSettingService(settingRepository, configConfig) + redisClient := repository.ProvideRedis(configConfig) + emailCache := repository.NewEmailCache(redisClient) + emailService := service.NewEmailService(settingRepository, emailCache) + turnstileVerifier := repository.NewTurnstileVerifier() + turnstileService := service.NewTurnstileService(settingService, turnstileVerifier) + emailQueueService := service.ProvideEmailQueueService(emailService) + promoCodeRepository := repository.NewPromoCodeRepository(client) + billingCache := repository.NewBillingCache(redisClient) + userSubscriptionRepository := repository.NewUserSubscriptionRepository(client) + billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig) + apiKeyRepository := repository.NewAPIKeyRepository(client) + groupRepository := repository.NewGroupRepository(client, db) + apiKeyCache := repository.NewAPIKeyCache(redisClient) + apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig) + apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService) + promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator) + authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService) + userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator) + authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService) + userHandler := handler.NewUserHandler(userService) + apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) + usageLogRepository := repository.NewUsageLogRepository(client, db) + usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator) + usageHandler := handler.NewUsageHandler(usageService, apiKeyService) + redeemCodeRepository := repository.NewRedeemCodeRepository(client) + subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService) + redeemCache := repository.NewRedeemCache(redisClient) + redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator) + redeemHandler := handler.NewRedeemHandler(redeemService) + subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) + dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db) + dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig) + dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig) + timingWheelService := service.ProvideTimingWheelService() + dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig) + dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService) + accountRepository := repository.NewAccountRepository(client, db) + proxyRepository := repository.NewProxyRepository(client, db) + proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig) + proxyLatencyCache := repository.NewProxyLatencyCache(redisClient) + adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator) + adminUserHandler := admin.NewUserHandler(adminService) + groupHandler := admin.NewGroupHandler(adminService) + claudeOAuthClient := repository.NewClaudeOAuthClient() + oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient) + openAIOAuthClient := repository.NewOpenAIOAuthClient() + openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient) + geminiOAuthClient := repository.NewGeminiOAuthClient(configConfig) + geminiCliCodeAssistClient := repository.NewGeminiCliCodeAssistClient() + geminiOAuthService := service.NewGeminiOAuthService(proxyRepository, geminiOAuthClient, geminiCliCodeAssistClient, configConfig) + antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository) + geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository) + tempUnschedCache := repository.NewTempUnschedCache(redisClient) + timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient) + geminiTokenCache := repository.NewGeminiTokenCache(redisClient) + tokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache) + rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, tokenCacheInvalidator) + claudeUsageFetcher := repository.NewClaudeUsageFetcher() + antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository) + usageCache := service.NewUsageCache() + accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache) + geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService) + gatewayCache := repository.NewGatewayCache(redisClient) + antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService) + httpUpstream := repository.NewHTTPUpstream(configConfig) + antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream, settingService) + accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig) + concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) + concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) + crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) + accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService) + oAuthHandler := admin.NewOAuthHandler(oAuthService) + openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) + geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService) + antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService) + proxyHandler := admin.NewProxyHandler(adminService) + adminRedeemHandler := admin.NewRedeemHandler(adminService) + promoHandler := admin.NewPromoHandler(promoService) + opsRepository := repository.NewOpsRepository(db) + schedulerCache := repository.NewSchedulerCache(redisClient) + schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db) + schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig) + pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig) + pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) + if err != nil { + return nil, err + } + billingService := service.NewBillingService(configConfig, pricingService) + identityCache := repository.NewIdentityCache(redisClient) + identityService := service.NewIdentityService(identityCache) + deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) + openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) + geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig) + opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService) + settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService) + opsHandler := admin.NewOpsHandler(opsService) + updateCache := repository.NewUpdateCache(redisClient) + gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig) + serviceBuildInfo := provideServiceBuildInfo(buildInfo) + updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo) + systemHandler := handler.ProvideSystemHandler(updateService) + adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService) + adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService) + userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client) + userAttributeValueRepository := repository.NewUserAttributeValueRepository(client) + userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) + userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) + gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) + openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) + handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) + handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) + jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) + adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) + apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) + engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService, opsService, settingService, redisClient) + httpServer := server.ProvideHTTPServer(configConfig, engine) + opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig) + opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig) + opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) + opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) + opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) + tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, tokenCacheInvalidator, configConfig) + accountExpiryService := service.ProvideAccountExpiryService(accountRepository) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + application := &Application{ + Server: httpServer, + Cleanup: v, + } + return application, nil +} + +// wire.go: + +type Application struct { + Server *http.Server + Cleanup func() +} + +func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { + return service.BuildInfo{ + Version: buildInfo.Version, + BuildType: buildInfo.BuildType, + } +} + +func provideCleanup( + entClient *ent.Client, + rdb *redis.Client, + opsMetricsCollector *service.OpsMetricsCollector, + opsAggregation *service.OpsAggregationService, + opsAlertEvaluator *service.OpsAlertEvaluatorService, + opsCleanup *service.OpsCleanupService, + opsScheduledReport *service.OpsScheduledReportService, + schedulerSnapshot *service.SchedulerSnapshotService, + tokenRefresh *service.TokenRefreshService, + accountExpiry *service.AccountExpiryService, + pricing *service.PricingService, + emailQueue *service.EmailQueueService, + billingCache *service.BillingCacheService, + oauth *service.OAuthService, + openaiOAuth *service.OpenAIOAuthService, + geminiOAuth *service.GeminiOAuthService, + antigravityOAuth *service.AntigravityOAuthService, +) func() { + return func() { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cleanupSteps := []struct { + name string + fn func() error + }{ + {"OpsScheduledReportService", func() error { + if opsScheduledReport != nil { + opsScheduledReport.Stop() + } + return nil + }}, + {"OpsCleanupService", func() error { + if opsCleanup != nil { + opsCleanup.Stop() + } + return nil + }}, + {"OpsAlertEvaluatorService", func() error { + if opsAlertEvaluator != nil { + opsAlertEvaluator.Stop() + } + return nil + }}, + {"OpsAggregationService", func() error { + if opsAggregation != nil { + opsAggregation.Stop() + } + return nil + }}, + {"OpsMetricsCollector", func() error { + if opsMetricsCollector != nil { + opsMetricsCollector.Stop() + } + return nil + }}, + {"SchedulerSnapshotService", func() error { + if schedulerSnapshot != nil { + schedulerSnapshot.Stop() + } + return nil + }}, + {"TokenRefreshService", func() error { + tokenRefresh.Stop() + return nil + }}, + {"AccountExpiryService", func() error { + accountExpiry.Stop() + return nil + }}, + {"PricingService", func() error { + pricing.Stop() + return nil + }}, + {"EmailQueueService", func() error { + emailQueue.Stop() + return nil + }}, + {"BillingCacheService", func() error { + billingCache.Stop() + return nil + }}, + {"OAuthService", func() error { + oauth.Stop() + return nil + }}, + {"OpenAIOAuthService", func() error { + openaiOAuth.Stop() + return nil + }}, + {"GeminiOAuthService", func() error { + geminiOAuth.Stop() + return nil + }}, + {"AntigravityOAuthService", func() error { + antigravityOAuth.Stop() + return nil + }}, + {"Redis", func() error { + return rdb.Close() + }}, + {"Ent", func() error { + return entClient.Close() + }}, + } + + for _, step := range cleanupSteps { + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + + } else { + log.Printf("[Cleanup] %s succeeded", step.name) + } + } + + select { + case <-ctx.Done(): + log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds") + default: + log.Printf("[Cleanup] All cleanup steps completed") + } + } +} diff --git a/backend/ent/account.go b/backend/ent/account.go new file mode 100644 index 00000000..038aa7e5 --- /dev/null +++ b/backend/ent/account.go @@ -0,0 +1,494 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// Account is the model entity for the Account schema. +type Account struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Notes holds the value of the "notes" field. + Notes *string `json:"notes,omitempty"` + // Platform holds the value of the "platform" field. + Platform string `json:"platform,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Credentials holds the value of the "credentials" field. + Credentials map[string]interface{} `json:"credentials,omitempty"` + // Extra holds the value of the "extra" field. + Extra map[string]interface{} `json:"extra,omitempty"` + // ProxyID holds the value of the "proxy_id" field. + ProxyID *int64 `json:"proxy_id,omitempty"` + // Concurrency holds the value of the "concurrency" field. + Concurrency int `json:"concurrency,omitempty"` + // Priority holds the value of the "priority" field. + Priority int `json:"priority,omitempty"` + // RateMultiplier holds the value of the "rate_multiplier" field. + RateMultiplier float64 `json:"rate_multiplier,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // ErrorMessage holds the value of the "error_message" field. + ErrorMessage *string `json:"error_message,omitempty"` + // LastUsedAt holds the value of the "last_used_at" field. + LastUsedAt *time.Time `json:"last_used_at,omitempty"` + // Account expiration time (NULL means no expiration). + ExpiresAt *time.Time `json:"expires_at,omitempty"` + // Auto pause scheduling when account expires. + AutoPauseOnExpired bool `json:"auto_pause_on_expired,omitempty"` + // Schedulable holds the value of the "schedulable" field. + Schedulable bool `json:"schedulable,omitempty"` + // RateLimitedAt holds the value of the "rate_limited_at" field. + RateLimitedAt *time.Time `json:"rate_limited_at,omitempty"` + // RateLimitResetAt holds the value of the "rate_limit_reset_at" field. + RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"` + // OverloadUntil holds the value of the "overload_until" field. + OverloadUntil *time.Time `json:"overload_until,omitempty"` + // SessionWindowStart holds the value of the "session_window_start" field. + SessionWindowStart *time.Time `json:"session_window_start,omitempty"` + // SessionWindowEnd holds the value of the "session_window_end" field. + SessionWindowEnd *time.Time `json:"session_window_end,omitempty"` + // SessionWindowStatus holds the value of the "session_window_status" field. + SessionWindowStatus *string `json:"session_window_status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AccountQuery when eager-loading is set. + Edges AccountEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AccountEdges holds the relations/edges for other nodes in the graph. +type AccountEdges struct { + // Groups holds the value of the groups edge. + Groups []*Group `json:"groups,omitempty"` + // Proxy holds the value of the proxy edge. + Proxy *Proxy `json:"proxy,omitempty"` + // UsageLogs holds the value of the usage_logs edge. + UsageLogs []*UsageLog `json:"usage_logs,omitempty"` + // AccountGroups holds the value of the account_groups edge. + AccountGroups []*AccountGroup `json:"account_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [4]bool +} + +// GroupsOrErr returns the Groups value or an error if the edge +// was not loaded in eager-loading. +func (e AccountEdges) GroupsOrErr() ([]*Group, error) { + if e.loadedTypes[0] { + return e.Groups, nil + } + return nil, &NotLoadedError{edge: "groups"} +} + +// ProxyOrErr returns the Proxy value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AccountEdges) ProxyOrErr() (*Proxy, error) { + if e.Proxy != nil { + return e.Proxy, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: proxy.Label} + } + return nil, &NotLoadedError{edge: "proxy"} +} + +// UsageLogsOrErr returns the UsageLogs value or an error if the edge +// was not loaded in eager-loading. +func (e AccountEdges) UsageLogsOrErr() ([]*UsageLog, error) { + if e.loadedTypes[2] { + return e.UsageLogs, nil + } + return nil, &NotLoadedError{edge: "usage_logs"} +} + +// AccountGroupsOrErr returns the AccountGroups value or an error if the edge +// was not loaded in eager-loading. +func (e AccountEdges) AccountGroupsOrErr() ([]*AccountGroup, error) { + if e.loadedTypes[3] { + return e.AccountGroups, nil + } + return nil, &NotLoadedError{edge: "account_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Account) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case account.FieldCredentials, account.FieldExtra: + values[i] = new([]byte) + case account.FieldAutoPauseOnExpired, account.FieldSchedulable: + values[i] = new(sql.NullBool) + case account.FieldRateMultiplier: + values[i] = new(sql.NullFloat64) + case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority: + values[i] = new(sql.NullInt64) + case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus: + values[i] = new(sql.NullString) + case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Account fields. +func (_m *Account) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case account.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case account.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case account.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case account.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case account.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case account.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + case account.FieldPlatform: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field platform", values[i]) + } else if value.Valid { + _m.Platform = value.String + } + case account.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case account.FieldCredentials: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field credentials", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Credentials); err != nil { + return fmt.Errorf("unmarshal field credentials: %w", err) + } + } + case account.FieldExtra: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extra", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Extra); err != nil { + return fmt.Errorf("unmarshal field extra: %w", err) + } + } + case account.FieldProxyID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field proxy_id", values[i]) + } else if value.Valid { + _m.ProxyID = new(int64) + *_m.ProxyID = value.Int64 + } + case account.FieldConcurrency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field concurrency", values[i]) + } else if value.Valid { + _m.Concurrency = int(value.Int64) + } + case account.FieldPriority: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field priority", values[i]) + } else if value.Valid { + _m.Priority = int(value.Int64) + } + case account.FieldRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i]) + } else if value.Valid { + _m.RateMultiplier = value.Float64 + } + case account.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case account.FieldErrorMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_message", values[i]) + } else if value.Valid { + _m.ErrorMessage = new(string) + *_m.ErrorMessage = value.String + } + case account.FieldLastUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field last_used_at", values[i]) + } else if value.Valid { + _m.LastUsedAt = new(time.Time) + *_m.LastUsedAt = value.Time + } + case account.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = new(time.Time) + *_m.ExpiresAt = value.Time + } + case account.FieldAutoPauseOnExpired: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field auto_pause_on_expired", values[i]) + } else if value.Valid { + _m.AutoPauseOnExpired = value.Bool + } + case account.FieldSchedulable: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field schedulable", values[i]) + } else if value.Valid { + _m.Schedulable = value.Bool + } + case account.FieldRateLimitedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field rate_limited_at", values[i]) + } else if value.Valid { + _m.RateLimitedAt = new(time.Time) + *_m.RateLimitedAt = value.Time + } + case account.FieldRateLimitResetAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field rate_limit_reset_at", values[i]) + } else if value.Valid { + _m.RateLimitResetAt = new(time.Time) + *_m.RateLimitResetAt = value.Time + } + case account.FieldOverloadUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field overload_until", values[i]) + } else if value.Valid { + _m.OverloadUntil = new(time.Time) + *_m.OverloadUntil = value.Time + } + case account.FieldSessionWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field session_window_start", values[i]) + } else if value.Valid { + _m.SessionWindowStart = new(time.Time) + *_m.SessionWindowStart = value.Time + } + case account.FieldSessionWindowEnd: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field session_window_end", values[i]) + } else if value.Valid { + _m.SessionWindowEnd = new(time.Time) + *_m.SessionWindowEnd = value.Time + } + case account.FieldSessionWindowStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field session_window_status", values[i]) + } else if value.Valid { + _m.SessionWindowStatus = new(string) + *_m.SessionWindowStatus = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Account. +// This includes values selected through modifiers, order, etc. +func (_m *Account) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryGroups queries the "groups" edge of the Account entity. +func (_m *Account) QueryGroups() *GroupQuery { + return NewAccountClient(_m.config).QueryGroups(_m) +} + +// QueryProxy queries the "proxy" edge of the Account entity. +func (_m *Account) QueryProxy() *ProxyQuery { + return NewAccountClient(_m.config).QueryProxy(_m) +} + +// QueryUsageLogs queries the "usage_logs" edge of the Account entity. +func (_m *Account) QueryUsageLogs() *UsageLogQuery { + return NewAccountClient(_m.config).QueryUsageLogs(_m) +} + +// QueryAccountGroups queries the "account_groups" edge of the Account entity. +func (_m *Account) QueryAccountGroups() *AccountGroupQuery { + return NewAccountClient(_m.config).QueryAccountGroups(_m) +} + +// Update returns a builder for updating this Account. +// Note that you need to call Account.Unwrap() before calling this method if this Account +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Account) Update() *AccountUpdateOne { + return NewAccountClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Account entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Account) Unwrap() *Account { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Account is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Account) String() string { + var builder strings.Builder + builder.WriteString("Account(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("platform=") + builder.WriteString(_m.Platform) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("credentials=") + builder.WriteString(fmt.Sprintf("%v", _m.Credentials)) + builder.WriteString(", ") + builder.WriteString("extra=") + builder.WriteString(fmt.Sprintf("%v", _m.Extra)) + builder.WriteString(", ") + if v := _m.ProxyID; v != nil { + builder.WriteString("proxy_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("concurrency=") + builder.WriteString(fmt.Sprintf("%v", _m.Concurrency)) + builder.WriteString(", ") + builder.WriteString("priority=") + builder.WriteString(fmt.Sprintf("%v", _m.Priority)) + builder.WriteString(", ") + builder.WriteString("rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ErrorMessage; v != nil { + builder.WriteString("error_message=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.LastUsedAt; v != nil { + builder.WriteString("last_used_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.ExpiresAt; v != nil { + builder.WriteString("expires_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("auto_pause_on_expired=") + builder.WriteString(fmt.Sprintf("%v", _m.AutoPauseOnExpired)) + builder.WriteString(", ") + builder.WriteString("schedulable=") + builder.WriteString(fmt.Sprintf("%v", _m.Schedulable)) + builder.WriteString(", ") + if v := _m.RateLimitedAt; v != nil { + builder.WriteString("rate_limited_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.RateLimitResetAt; v != nil { + builder.WriteString("rate_limit_reset_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.OverloadUntil; v != nil { + builder.WriteString("overload_until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowStart; v != nil { + builder.WriteString("session_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowEnd; v != nil { + builder.WriteString("session_window_end=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.SessionWindowStatus; v != nil { + builder.WriteString("session_window_status=") + builder.WriteString(*v) + } + builder.WriteByte(')') + return builder.String() +} + +// Accounts is a parsable slice of Account. +type Accounts []*Account diff --git a/backend/ent/account/account.go b/backend/ent/account/account.go new file mode 100644 index 00000000..73c0e8c2 --- /dev/null +++ b/backend/ent/account/account.go @@ -0,0 +1,392 @@ +// Code generated by ent, DO NOT EDIT. + +package account + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the account type in the database. + Label = "account" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // FieldPlatform holds the string denoting the platform field in the database. + FieldPlatform = "platform" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldCredentials holds the string denoting the credentials field in the database. + FieldCredentials = "credentials" + // FieldExtra holds the string denoting the extra field in the database. + FieldExtra = "extra" + // FieldProxyID holds the string denoting the proxy_id field in the database. + FieldProxyID = "proxy_id" + // FieldConcurrency holds the string denoting the concurrency field in the database. + FieldConcurrency = "concurrency" + // FieldPriority holds the string denoting the priority field in the database. + FieldPriority = "priority" + // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. + FieldRateMultiplier = "rate_multiplier" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldErrorMessage holds the string denoting the error_message field in the database. + FieldErrorMessage = "error_message" + // FieldLastUsedAt holds the string denoting the last_used_at field in the database. + FieldLastUsedAt = "last_used_at" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldAutoPauseOnExpired holds the string denoting the auto_pause_on_expired field in the database. + FieldAutoPauseOnExpired = "auto_pause_on_expired" + // FieldSchedulable holds the string denoting the schedulable field in the database. + FieldSchedulable = "schedulable" + // FieldRateLimitedAt holds the string denoting the rate_limited_at field in the database. + FieldRateLimitedAt = "rate_limited_at" + // FieldRateLimitResetAt holds the string denoting the rate_limit_reset_at field in the database. + FieldRateLimitResetAt = "rate_limit_reset_at" + // FieldOverloadUntil holds the string denoting the overload_until field in the database. + FieldOverloadUntil = "overload_until" + // FieldSessionWindowStart holds the string denoting the session_window_start field in the database. + FieldSessionWindowStart = "session_window_start" + // FieldSessionWindowEnd holds the string denoting the session_window_end field in the database. + FieldSessionWindowEnd = "session_window_end" + // FieldSessionWindowStatus holds the string denoting the session_window_status field in the database. + FieldSessionWindowStatus = "session_window_status" + // EdgeGroups holds the string denoting the groups edge name in mutations. + EdgeGroups = "groups" + // EdgeProxy holds the string denoting the proxy edge name in mutations. + EdgeProxy = "proxy" + // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. + EdgeUsageLogs = "usage_logs" + // EdgeAccountGroups holds the string denoting the account_groups edge name in mutations. + EdgeAccountGroups = "account_groups" + // Table holds the table name of the account in the database. + Table = "accounts" + // GroupsTable is the table that holds the groups relation/edge. The primary key declared below. + GroupsTable = "account_groups" + // GroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupsInverseTable = "groups" + // ProxyTable is the table that holds the proxy relation/edge. + ProxyTable = "accounts" + // ProxyInverseTable is the table name for the Proxy entity. + // It exists in this package in order to avoid circular dependency with the "proxy" package. + ProxyInverseTable = "proxies" + // ProxyColumn is the table column denoting the proxy relation/edge. + ProxyColumn = "proxy_id" + // UsageLogsTable is the table that holds the usage_logs relation/edge. + UsageLogsTable = "usage_logs" + // UsageLogsInverseTable is the table name for the UsageLog entity. + // It exists in this package in order to avoid circular dependency with the "usagelog" package. + UsageLogsInverseTable = "usage_logs" + // UsageLogsColumn is the table column denoting the usage_logs relation/edge. + UsageLogsColumn = "account_id" + // AccountGroupsTable is the table that holds the account_groups relation/edge. + AccountGroupsTable = "account_groups" + // AccountGroupsInverseTable is the table name for the AccountGroup entity. + // It exists in this package in order to avoid circular dependency with the "accountgroup" package. + AccountGroupsInverseTable = "account_groups" + // AccountGroupsColumn is the table column denoting the account_groups relation/edge. + AccountGroupsColumn = "account_id" +) + +// Columns holds all SQL columns for account fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldNotes, + FieldPlatform, + FieldType, + FieldCredentials, + FieldExtra, + FieldProxyID, + FieldConcurrency, + FieldPriority, + FieldRateMultiplier, + FieldStatus, + FieldErrorMessage, + FieldLastUsedAt, + FieldExpiresAt, + FieldAutoPauseOnExpired, + FieldSchedulable, + FieldRateLimitedAt, + FieldRateLimitResetAt, + FieldOverloadUntil, + FieldSessionWindowStart, + FieldSessionWindowEnd, + FieldSessionWindowStatus, +} + +var ( + // GroupsPrimaryKey and GroupsColumn2 are the table columns denoting the + // primary key for the groups relation (M2M). + GroupsPrimaryKey = []string{"account_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + PlatformValidator func(string) error + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultCredentials holds the default value on creation for the "credentials" field. + DefaultCredentials func() map[string]interface{} + // DefaultExtra holds the default value on creation for the "extra" field. + DefaultExtra func() map[string]interface{} + // DefaultConcurrency holds the default value on creation for the "concurrency" field. + DefaultConcurrency int + // DefaultPriority holds the default value on creation for the "priority" field. + DefaultPriority int + // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field. + DefaultRateMultiplier float64 + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultAutoPauseOnExpired holds the default value on creation for the "auto_pause_on_expired" field. + DefaultAutoPauseOnExpired bool + // DefaultSchedulable holds the default value on creation for the "schedulable" field. + DefaultSchedulable bool + // SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. + SessionWindowStatusValidator func(string) error +) + +// OrderOption defines the ordering options for the Account queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByPlatform orders the results by the platform field. +func ByPlatform(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlatform, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByProxyID orders the results by the proxy_id field. +func ByProxyID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProxyID, opts...).ToFunc() +} + +// ByConcurrency orders the results by the concurrency field. +func ByConcurrency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConcurrency, opts...).ToFunc() +} + +// ByPriority orders the results by the priority field. +func ByPriority(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPriority, opts...).ToFunc() +} + +// ByRateMultiplier orders the results by the rate_multiplier field. +func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByLastUsedAt orders the results by the last_used_at field. +func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByAutoPauseOnExpired orders the results by the auto_pause_on_expired field. +func ByAutoPauseOnExpired(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAutoPauseOnExpired, opts...).ToFunc() +} + +// BySchedulable orders the results by the schedulable field. +func BySchedulable(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSchedulable, opts...).ToFunc() +} + +// ByRateLimitedAt orders the results by the rate_limited_at field. +func ByRateLimitedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateLimitedAt, opts...).ToFunc() +} + +// ByRateLimitResetAt orders the results by the rate_limit_reset_at field. +func ByRateLimitResetAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateLimitResetAt, opts...).ToFunc() +} + +// ByOverloadUntil orders the results by the overload_until field. +func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc() +} + +// BySessionWindowStart orders the results by the session_window_start field. +func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc() +} + +// BySessionWindowEnd orders the results by the session_window_end field. +func BySessionWindowEnd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowEnd, opts...).ToFunc() +} + +// BySessionWindowStatus orders the results by the session_window_status field. +func BySessionWindowStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSessionWindowStatus, opts...).ToFunc() +} + +// ByGroupsCount orders the results by groups count. +func ByGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newGroupsStep(), opts...) + } +} + +// ByGroups orders the results by groups terms. +func ByGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByProxyField orders the results by proxy field. +func ByProxyField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newProxyStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUsageLogsCount orders the results by usage_logs count. +func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...) + } +} + +// ByUsageLogs orders the results by usage_logs terms. +func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountGroupsCount orders the results by account_groups count. +func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...) + } +} + +// ByAccountGroups orders the results by account_groups terms. +func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...), + ) +} +func newProxyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ProxyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProxyTable, ProxyColumn), + ) +} +func newUsageLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) +} +func newAccountGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) +} diff --git a/backend/ent/account/where.go b/backend/ent/account/where.go new file mode 100644 index 00000000..dea1127a --- /dev/null +++ b/backend/ent/account/where.go @@ -0,0 +1,1413 @@ +// Code generated by ent, DO NOT EDIT. + +package account + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Account { + return predicate.Account(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Account { + return predicate.Account(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldName, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldNotes, v)) +} + +// Platform applies equality check predicate on the "platform" field. It's identical to PlatformEQ. +func Platform(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPlatform, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldType, v)) +} + +// ProxyID applies equality check predicate on the "proxy_id" field. It's identical to ProxyIDEQ. +func ProxyID(v int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldProxyID, v)) +} + +// Concurrency applies equality check predicate on the "concurrency" field. It's identical to ConcurrencyEQ. +func Concurrency(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldConcurrency, v)) +} + +// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ. +func Priority(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPriority, v)) +} + +// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ. +func RateMultiplier(v float64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldStatus, v)) +} + +// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. +func ErrorMessage(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldErrorMessage, v)) +} + +// LastUsedAt applies equality check predicate on the "last_used_at" field. It's identical to LastUsedAtEQ. +func LastUsedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldLastUsedAt, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldExpiresAt, v)) +} + +// AutoPauseOnExpired applies equality check predicate on the "auto_pause_on_expired" field. It's identical to AutoPauseOnExpiredEQ. +func AutoPauseOnExpired(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldAutoPauseOnExpired, v)) +} + +// Schedulable applies equality check predicate on the "schedulable" field. It's identical to SchedulableEQ. +func Schedulable(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSchedulable, v)) +} + +// RateLimitedAt applies equality check predicate on the "rate_limited_at" field. It's identical to RateLimitedAtEQ. +func RateLimitedAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitedAt, v)) +} + +// RateLimitResetAt applies equality check predicate on the "rate_limit_reset_at" field. It's identical to RateLimitResetAtEQ. +func RateLimitResetAt(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitResetAt, v)) +} + +// OverloadUntil applies equality check predicate on the "overload_until" field. It's identical to OverloadUntilEQ. +func OverloadUntil(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v)) +} + +// SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ. +func SessionWindowStart(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowEnd applies equality check predicate on the "session_window_end" field. It's identical to SessionWindowEndEQ. +func SessionWindowEnd(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowStatus applies equality check predicate on the "session_window_status" field. It's identical to SessionWindowStatusEQ. +func SessionWindowStatus(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldName, v)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldNotes, v)) +} + +// PlatformEQ applies the EQ predicate on the "platform" field. +func PlatformEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPlatform, v)) +} + +// PlatformNEQ applies the NEQ predicate on the "platform" field. +func PlatformNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldPlatform, v)) +} + +// PlatformIn applies the In predicate on the "platform" field. +func PlatformIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldPlatform, vs...)) +} + +// PlatformNotIn applies the NotIn predicate on the "platform" field. +func PlatformNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldPlatform, vs...)) +} + +// PlatformGT applies the GT predicate on the "platform" field. +func PlatformGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldPlatform, v)) +} + +// PlatformGTE applies the GTE predicate on the "platform" field. +func PlatformGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldPlatform, v)) +} + +// PlatformLT applies the LT predicate on the "platform" field. +func PlatformLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldPlatform, v)) +} + +// PlatformLTE applies the LTE predicate on the "platform" field. +func PlatformLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldPlatform, v)) +} + +// PlatformContains applies the Contains predicate on the "platform" field. +func PlatformContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldPlatform, v)) +} + +// PlatformHasPrefix applies the HasPrefix predicate on the "platform" field. +func PlatformHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldPlatform, v)) +} + +// PlatformHasSuffix applies the HasSuffix predicate on the "platform" field. +func PlatformHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldPlatform, v)) +} + +// PlatformEqualFold applies the EqualFold predicate on the "platform" field. +func PlatformEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldPlatform, v)) +} + +// PlatformContainsFold applies the ContainsFold predicate on the "platform" field. +func PlatformContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldPlatform, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldType, v)) +} + +// ProxyIDEQ applies the EQ predicate on the "proxy_id" field. +func ProxyIDEQ(v int64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldProxyID, v)) +} + +// ProxyIDNEQ applies the NEQ predicate on the "proxy_id" field. +func ProxyIDNEQ(v int64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldProxyID, v)) +} + +// ProxyIDIn applies the In predicate on the "proxy_id" field. +func ProxyIDIn(vs ...int64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldProxyID, vs...)) +} + +// ProxyIDNotIn applies the NotIn predicate on the "proxy_id" field. +func ProxyIDNotIn(vs ...int64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldProxyID, vs...)) +} + +// ProxyIDIsNil applies the IsNil predicate on the "proxy_id" field. +func ProxyIDIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldProxyID)) +} + +// ProxyIDNotNil applies the NotNil predicate on the "proxy_id" field. +func ProxyIDNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldProxyID)) +} + +// ConcurrencyEQ applies the EQ predicate on the "concurrency" field. +func ConcurrencyEQ(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldConcurrency, v)) +} + +// ConcurrencyNEQ applies the NEQ predicate on the "concurrency" field. +func ConcurrencyNEQ(v int) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldConcurrency, v)) +} + +// ConcurrencyIn applies the In predicate on the "concurrency" field. +func ConcurrencyIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldIn(FieldConcurrency, vs...)) +} + +// ConcurrencyNotIn applies the NotIn predicate on the "concurrency" field. +func ConcurrencyNotIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldConcurrency, vs...)) +} + +// ConcurrencyGT applies the GT predicate on the "concurrency" field. +func ConcurrencyGT(v int) predicate.Account { + return predicate.Account(sql.FieldGT(FieldConcurrency, v)) +} + +// ConcurrencyGTE applies the GTE predicate on the "concurrency" field. +func ConcurrencyGTE(v int) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldConcurrency, v)) +} + +// ConcurrencyLT applies the LT predicate on the "concurrency" field. +func ConcurrencyLT(v int) predicate.Account { + return predicate.Account(sql.FieldLT(FieldConcurrency, v)) +} + +// ConcurrencyLTE applies the LTE predicate on the "concurrency" field. +func ConcurrencyLTE(v int) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldConcurrency, v)) +} + +// PriorityEQ applies the EQ predicate on the "priority" field. +func PriorityEQ(v int) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldPriority, v)) +} + +// PriorityNEQ applies the NEQ predicate on the "priority" field. +func PriorityNEQ(v int) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldPriority, v)) +} + +// PriorityIn applies the In predicate on the "priority" field. +func PriorityIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldIn(FieldPriority, vs...)) +} + +// PriorityNotIn applies the NotIn predicate on the "priority" field. +func PriorityNotIn(vs ...int) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldPriority, vs...)) +} + +// PriorityGT applies the GT predicate on the "priority" field. +func PriorityGT(v int) predicate.Account { + return predicate.Account(sql.FieldGT(FieldPriority, v)) +} + +// PriorityGTE applies the GTE predicate on the "priority" field. +func PriorityGTE(v int) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldPriority, v)) +} + +// PriorityLT applies the LT predicate on the "priority" field. +func PriorityLT(v int) predicate.Account { + return predicate.Account(sql.FieldLT(FieldPriority, v)) +} + +// PriorityLTE applies the LTE predicate on the "priority" field. +func PriorityLTE(v int) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldPriority, v)) +} + +// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field. +func RateMultiplierEQ(v float64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field. +func RateMultiplierNEQ(v float64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierIn applies the In predicate on the "rate_multiplier" field. +func RateMultiplierIn(vs ...float64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field. +func RateMultiplierNotIn(vs ...float64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field. +func RateMultiplierGT(v float64) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateMultiplier, v)) +} + +// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field. +func RateMultiplierGTE(v float64) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateMultiplier, v)) +} + +// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field. +func RateMultiplierLT(v float64) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateMultiplier, v)) +} + +// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field. +func RateMultiplierLTE(v float64) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateMultiplier, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldStatus, v)) +} + +// ErrorMessageEQ applies the EQ predicate on the "error_message" field. +func ErrorMessageEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. +func ErrorMessageNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldErrorMessage, v)) +} + +// ErrorMessageIn applies the In predicate on the "error_message" field. +func ErrorMessageIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. +func ErrorMessageNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageGT applies the GT predicate on the "error_message" field. +func ErrorMessageGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldErrorMessage, v)) +} + +// ErrorMessageGTE applies the GTE predicate on the "error_message" field. +func ErrorMessageGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldErrorMessage, v)) +} + +// ErrorMessageLT applies the LT predicate on the "error_message" field. +func ErrorMessageLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldErrorMessage, v)) +} + +// ErrorMessageLTE applies the LTE predicate on the "error_message" field. +func ErrorMessageLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldErrorMessage, v)) +} + +// ErrorMessageContains applies the Contains predicate on the "error_message" field. +func ErrorMessageContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldErrorMessage, v)) +} + +// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. +func ErrorMessageHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldErrorMessage, v)) +} + +// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. +func ErrorMessageHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldErrorMessage, v)) +} + +// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. +func ErrorMessageIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldErrorMessage)) +} + +// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. +func ErrorMessageNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldErrorMessage)) +} + +// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. +func ErrorMessageEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldErrorMessage, v)) +} + +// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. +func ErrorMessageContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldErrorMessage, v)) +} + +// LastUsedAtEQ applies the EQ predicate on the "last_used_at" field. +func LastUsedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldLastUsedAt, v)) +} + +// LastUsedAtNEQ applies the NEQ predicate on the "last_used_at" field. +func LastUsedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldLastUsedAt, v)) +} + +// LastUsedAtIn applies the In predicate on the "last_used_at" field. +func LastUsedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldLastUsedAt, vs...)) +} + +// LastUsedAtNotIn applies the NotIn predicate on the "last_used_at" field. +func LastUsedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldLastUsedAt, vs...)) +} + +// LastUsedAtGT applies the GT predicate on the "last_used_at" field. +func LastUsedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldLastUsedAt, v)) +} + +// LastUsedAtGTE applies the GTE predicate on the "last_used_at" field. +func LastUsedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldLastUsedAt, v)) +} + +// LastUsedAtLT applies the LT predicate on the "last_used_at" field. +func LastUsedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldLastUsedAt, v)) +} + +// LastUsedAtLTE applies the LTE predicate on the "last_used_at" field. +func LastUsedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldLastUsedAt, v)) +} + +// LastUsedAtIsNil applies the IsNil predicate on the "last_used_at" field. +func LastUsedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldLastUsedAt)) +} + +// LastUsedAtNotNil applies the NotNil predicate on the "last_used_at" field. +func LastUsedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldLastUsedAt)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldExpiresAt, v)) +} + +// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field. +func ExpiresAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldExpiresAt)) +} + +// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field. +func ExpiresAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldExpiresAt)) +} + +// AutoPauseOnExpiredEQ applies the EQ predicate on the "auto_pause_on_expired" field. +func AutoPauseOnExpiredEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldAutoPauseOnExpired, v)) +} + +// AutoPauseOnExpiredNEQ applies the NEQ predicate on the "auto_pause_on_expired" field. +func AutoPauseOnExpiredNEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldAutoPauseOnExpired, v)) +} + +// SchedulableEQ applies the EQ predicate on the "schedulable" field. +func SchedulableEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSchedulable, v)) +} + +// SchedulableNEQ applies the NEQ predicate on the "schedulable" field. +func SchedulableNEQ(v bool) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSchedulable, v)) +} + +// RateLimitedAtEQ applies the EQ predicate on the "rate_limited_at" field. +func RateLimitedAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitedAt, v)) +} + +// RateLimitedAtNEQ applies the NEQ predicate on the "rate_limited_at" field. +func RateLimitedAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateLimitedAt, v)) +} + +// RateLimitedAtIn applies the In predicate on the "rate_limited_at" field. +func RateLimitedAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateLimitedAt, vs...)) +} + +// RateLimitedAtNotIn applies the NotIn predicate on the "rate_limited_at" field. +func RateLimitedAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateLimitedAt, vs...)) +} + +// RateLimitedAtGT applies the GT predicate on the "rate_limited_at" field. +func RateLimitedAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateLimitedAt, v)) +} + +// RateLimitedAtGTE applies the GTE predicate on the "rate_limited_at" field. +func RateLimitedAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateLimitedAt, v)) +} + +// RateLimitedAtLT applies the LT predicate on the "rate_limited_at" field. +func RateLimitedAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateLimitedAt, v)) +} + +// RateLimitedAtLTE applies the LTE predicate on the "rate_limited_at" field. +func RateLimitedAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateLimitedAt, v)) +} + +// RateLimitedAtIsNil applies the IsNil predicate on the "rate_limited_at" field. +func RateLimitedAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldRateLimitedAt)) +} + +// RateLimitedAtNotNil applies the NotNil predicate on the "rate_limited_at" field. +func RateLimitedAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldRateLimitedAt)) +} + +// RateLimitResetAtEQ applies the EQ predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtNEQ applies the NEQ predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtIn applies the In predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateLimitResetAt, vs...)) +} + +// RateLimitResetAtNotIn applies the NotIn predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateLimitResetAt, vs...)) +} + +// RateLimitResetAtGT applies the GT predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtGTE applies the GTE predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtLT applies the LT predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtLTE applies the LTE predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateLimitResetAt, v)) +} + +// RateLimitResetAtIsNil applies the IsNil predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldRateLimitResetAt)) +} + +// RateLimitResetAtNotNil applies the NotNil predicate on the "rate_limit_reset_at" field. +func RateLimitResetAtNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldRateLimitResetAt)) +} + +// OverloadUntilEQ applies the EQ predicate on the "overload_until" field. +func OverloadUntilEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v)) +} + +// OverloadUntilNEQ applies the NEQ predicate on the "overload_until" field. +func OverloadUntilNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldOverloadUntil, v)) +} + +// OverloadUntilIn applies the In predicate on the "overload_until" field. +func OverloadUntilIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldOverloadUntil, vs...)) +} + +// OverloadUntilNotIn applies the NotIn predicate on the "overload_until" field. +func OverloadUntilNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldOverloadUntil, vs...)) +} + +// OverloadUntilGT applies the GT predicate on the "overload_until" field. +func OverloadUntilGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldOverloadUntil, v)) +} + +// OverloadUntilGTE applies the GTE predicate on the "overload_until" field. +func OverloadUntilGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldOverloadUntil, v)) +} + +// OverloadUntilLT applies the LT predicate on the "overload_until" field. +func OverloadUntilLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldOverloadUntil, v)) +} + +// OverloadUntilLTE applies the LTE predicate on the "overload_until" field. +func OverloadUntilLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldOverloadUntil, v)) +} + +// OverloadUntilIsNil applies the IsNil predicate on the "overload_until" field. +func OverloadUntilIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldOverloadUntil)) +} + +// OverloadUntilNotNil applies the NotNil predicate on the "overload_until" field. +func OverloadUntilNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldOverloadUntil)) +} + +// SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field. +func SessionWindowStartEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowStartNEQ applies the NEQ predicate on the "session_window_start" field. +func SessionWindowStartNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowStart, v)) +} + +// SessionWindowStartIn applies the In predicate on the "session_window_start" field. +func SessionWindowStartIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowStart, vs...)) +} + +// SessionWindowStartNotIn applies the NotIn predicate on the "session_window_start" field. +func SessionWindowStartNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowStart, vs...)) +} + +// SessionWindowStartGT applies the GT predicate on the "session_window_start" field. +func SessionWindowStartGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowStart, v)) +} + +// SessionWindowStartGTE applies the GTE predicate on the "session_window_start" field. +func SessionWindowStartGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowStart, v)) +} + +// SessionWindowStartLT applies the LT predicate on the "session_window_start" field. +func SessionWindowStartLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowStart, v)) +} + +// SessionWindowStartLTE applies the LTE predicate on the "session_window_start" field. +func SessionWindowStartLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowStart, v)) +} + +// SessionWindowStartIsNil applies the IsNil predicate on the "session_window_start" field. +func SessionWindowStartIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowStart)) +} + +// SessionWindowStartNotNil applies the NotNil predicate on the "session_window_start" field. +func SessionWindowStartNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowStart)) +} + +// SessionWindowEndEQ applies the EQ predicate on the "session_window_end" field. +func SessionWindowEndEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndNEQ applies the NEQ predicate on the "session_window_end" field. +func SessionWindowEndNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndIn applies the In predicate on the "session_window_end" field. +func SessionWindowEndIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowEnd, vs...)) +} + +// SessionWindowEndNotIn applies the NotIn predicate on the "session_window_end" field. +func SessionWindowEndNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowEnd, vs...)) +} + +// SessionWindowEndGT applies the GT predicate on the "session_window_end" field. +func SessionWindowEndGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndGTE applies the GTE predicate on the "session_window_end" field. +func SessionWindowEndGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndLT applies the LT predicate on the "session_window_end" field. +func SessionWindowEndLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndLTE applies the LTE predicate on the "session_window_end" field. +func SessionWindowEndLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowEnd, v)) +} + +// SessionWindowEndIsNil applies the IsNil predicate on the "session_window_end" field. +func SessionWindowEndIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowEnd)) +} + +// SessionWindowEndNotNil applies the NotNil predicate on the "session_window_end" field. +func SessionWindowEndNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowEnd)) +} + +// SessionWindowStatusEQ applies the EQ predicate on the "session_window_status" field. +func SessionWindowStatusEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusNEQ applies the NEQ predicate on the "session_window_status" field. +func SessionWindowStatusNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusIn applies the In predicate on the "session_window_status" field. +func SessionWindowStatusIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldSessionWindowStatus, vs...)) +} + +// SessionWindowStatusNotIn applies the NotIn predicate on the "session_window_status" field. +func SessionWindowStatusNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldSessionWindowStatus, vs...)) +} + +// SessionWindowStatusGT applies the GT predicate on the "session_window_status" field. +func SessionWindowStatusGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusGTE applies the GTE predicate on the "session_window_status" field. +func SessionWindowStatusGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusLT applies the LT predicate on the "session_window_status" field. +func SessionWindowStatusLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusLTE applies the LTE predicate on the "session_window_status" field. +func SessionWindowStatusLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusContains applies the Contains predicate on the "session_window_status" field. +func SessionWindowStatusContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusHasPrefix applies the HasPrefix predicate on the "session_window_status" field. +func SessionWindowStatusHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusHasSuffix applies the HasSuffix predicate on the "session_window_status" field. +func SessionWindowStatusHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusIsNil applies the IsNil predicate on the "session_window_status" field. +func SessionWindowStatusIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldSessionWindowStatus)) +} + +// SessionWindowStatusNotNil applies the NotNil predicate on the "session_window_status" field. +func SessionWindowStatusNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldSessionWindowStatus)) +} + +// SessionWindowStatusEqualFold applies the EqualFold predicate on the "session_window_status" field. +func SessionWindowStatusEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldSessionWindowStatus, v)) +} + +// SessionWindowStatusContainsFold applies the ContainsFold predicate on the "session_window_status" field. +func SessionWindowStatusContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldSessionWindowStatus, v)) +} + +// HasGroups applies the HasEdge predicate on the "groups" edge. +func HasGroups() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupsWith applies the HasEdge predicate on the "groups" edge with a given conditions (other predicates). +func HasGroupsWith(preds ...predicate.Group) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasProxy applies the HasEdge predicate on the "proxy" edge. +func HasProxy() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, ProxyTable, ProxyColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasProxyWith applies the HasEdge predicate on the "proxy" edge with a given conditions (other predicates). +func HasProxyWith(preds ...predicate.Proxy) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newProxyStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge. +func HasUsageLogs() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates). +func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newUsageLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccountGroups applies the HasEdge predicate on the "account_groups" edge. +func HasAccountGroups() predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountGroupsWith applies the HasEdge predicate on the "account_groups" edge with a given conditions (other predicates). +func HasAccountGroupsWith(preds ...predicate.AccountGroup) predicate.Account { + return predicate.Account(func(s *sql.Selector) { + step := newAccountGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Account) predicate.Account { + return predicate.Account(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Account) predicate.Account { + return predicate.Account(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Account) predicate.Account { + return predicate.Account(sql.NotPredicates(p)) +} diff --git a/backend/ent/account_create.go b/backend/ent/account_create.go new file mode 100644 index 00000000..42a561cf --- /dev/null +++ b/backend/ent/account_create.go @@ -0,0 +1,2296 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/usagelog" +) + +// AccountCreate is the builder for creating a Account entity. +type AccountCreate struct { + config + mutation *AccountMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AccountCreate) SetCreatedAt(v time.Time) *AccountCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableCreatedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *AccountCreate) SetUpdatedAt(v time.Time) *AccountCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableUpdatedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *AccountCreate) SetDeletedAt(v time.Time) *AccountCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableDeletedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *AccountCreate) SetName(v string) *AccountCreate { + _c.mutation.SetName(v) + return _c +} + +// SetNotes sets the "notes" field. +func (_c *AccountCreate) SetNotes(v string) *AccountCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *AccountCreate) SetNillableNotes(v *string) *AccountCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetPlatform sets the "platform" field. +func (_c *AccountCreate) SetPlatform(v string) *AccountCreate { + _c.mutation.SetPlatform(v) + return _c +} + +// SetType sets the "type" field. +func (_c *AccountCreate) SetType(v string) *AccountCreate { + _c.mutation.SetType(v) + return _c +} + +// SetCredentials sets the "credentials" field. +func (_c *AccountCreate) SetCredentials(v map[string]interface{}) *AccountCreate { + _c.mutation.SetCredentials(v) + return _c +} + +// SetExtra sets the "extra" field. +func (_c *AccountCreate) SetExtra(v map[string]interface{}) *AccountCreate { + _c.mutation.SetExtra(v) + return _c +} + +// SetProxyID sets the "proxy_id" field. +func (_c *AccountCreate) SetProxyID(v int64) *AccountCreate { + _c.mutation.SetProxyID(v) + return _c +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_c *AccountCreate) SetNillableProxyID(v *int64) *AccountCreate { + if v != nil { + _c.SetProxyID(*v) + } + return _c +} + +// SetConcurrency sets the "concurrency" field. +func (_c *AccountCreate) SetConcurrency(v int) *AccountCreate { + _c.mutation.SetConcurrency(v) + return _c +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_c *AccountCreate) SetNillableConcurrency(v *int) *AccountCreate { + if v != nil { + _c.SetConcurrency(*v) + } + return _c +} + +// SetPriority sets the "priority" field. +func (_c *AccountCreate) SetPriority(v int) *AccountCreate { + _c.mutation.SetPriority(v) + return _c +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_c *AccountCreate) SetNillablePriority(v *int) *AccountCreate { + if v != nil { + _c.SetPriority(*v) + } + return _c +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_c *AccountCreate) SetRateMultiplier(v float64) *AccountCreate { + _c.mutation.SetRateMultiplier(v) + return _c +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateMultiplier(v *float64) *AccountCreate { + if v != nil { + _c.SetRateMultiplier(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *AccountCreate) SetStatus(v string) *AccountCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *AccountCreate) SetNillableStatus(v *string) *AccountCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetErrorMessage sets the "error_message" field. +func (_c *AccountCreate) SetErrorMessage(v string) *AccountCreate { + _c.mutation.SetErrorMessage(v) + return _c +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_c *AccountCreate) SetNillableErrorMessage(v *string) *AccountCreate { + if v != nil { + _c.SetErrorMessage(*v) + } + return _c +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_c *AccountCreate) SetLastUsedAt(v time.Time) *AccountCreate { + _c.mutation.SetLastUsedAt(v) + return _c +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableLastUsedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetLastUsedAt(*v) + } + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *AccountCreate) SetExpiresAt(v time.Time) *AccountCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableExpiresAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetExpiresAt(*v) + } + return _c +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (_c *AccountCreate) SetAutoPauseOnExpired(v bool) *AccountCreate { + _c.mutation.SetAutoPauseOnExpired(v) + return _c +} + +// SetNillableAutoPauseOnExpired sets the "auto_pause_on_expired" field if the given value is not nil. +func (_c *AccountCreate) SetNillableAutoPauseOnExpired(v *bool) *AccountCreate { + if v != nil { + _c.SetAutoPauseOnExpired(*v) + } + return _c +} + +// SetSchedulable sets the "schedulable" field. +func (_c *AccountCreate) SetSchedulable(v bool) *AccountCreate { + _c.mutation.SetSchedulable(v) + return _c +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSchedulable(v *bool) *AccountCreate { + if v != nil { + _c.SetSchedulable(*v) + } + return _c +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_c *AccountCreate) SetRateLimitedAt(v time.Time) *AccountCreate { + _c.mutation.SetRateLimitedAt(v) + return _c +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateLimitedAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetRateLimitedAt(*v) + } + return _c +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_c *AccountCreate) SetRateLimitResetAt(v time.Time) *AccountCreate { + _c.mutation.SetRateLimitResetAt(v) + return _c +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateLimitResetAt(v *time.Time) *AccountCreate { + if v != nil { + _c.SetRateLimitResetAt(*v) + } + return _c +} + +// SetOverloadUntil sets the "overload_until" field. +func (_c *AccountCreate) SetOverloadUntil(v time.Time) *AccountCreate { + _c.mutation.SetOverloadUntil(v) + return _c +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_c *AccountCreate) SetNillableOverloadUntil(v *time.Time) *AccountCreate { + if v != nil { + _c.SetOverloadUntil(*v) + } + return _c +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate { + _c.mutation.SetSessionWindowStart(v) + return _c +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowStart(v *time.Time) *AccountCreate { + if v != nil { + _c.SetSessionWindowStart(*v) + } + return _c +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_c *AccountCreate) SetSessionWindowEnd(v time.Time) *AccountCreate { + _c.mutation.SetSessionWindowEnd(v) + return _c +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowEnd(v *time.Time) *AccountCreate { + if v != nil { + _c.SetSessionWindowEnd(*v) + } + return _c +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_c *AccountCreate) SetSessionWindowStatus(v string) *AccountCreate { + _c.mutation.SetSessionWindowStatus(v) + return _c +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_c *AccountCreate) SetNillableSessionWindowStatus(v *string) *AccountCreate { + if v != nil { + _c.SetSessionWindowStatus(*v) + } + return _c +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_c *AccountCreate) AddGroupIDs(ids ...int64) *AccountCreate { + _c.mutation.AddGroupIDs(ids...) + return _c +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_c *AccountCreate) AddGroups(v ...*Group) *AccountCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddGroupIDs(ids...) +} + +// SetProxy sets the "proxy" edge to the Proxy entity. +func (_c *AccountCreate) SetProxy(v *Proxy) *AccountCreate { + return _c.SetProxyID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_c *AccountCreate) AddUsageLogIDs(ids ...int64) *AccountCreate { + _c.mutation.AddUsageLogIDs(ids...) + return _c +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_c *AccountCreate) AddUsageLogs(v ...*UsageLog) *AccountCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageLogIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_c *AccountCreate) Mutation() *AccountMutation { + return _c.mutation +} + +// Save creates the Account in the database. +func (_c *AccountCreate) Save(ctx context.Context) (*Account, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AccountCreate) SaveX(ctx context.Context) *Account { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AccountCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if account.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized account.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := account.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if account.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Credentials(); !ok { + if account.DefaultCredentials == nil { + return fmt.Errorf("ent: uninitialized account.DefaultCredentials (forgotten import ent/runtime?)") + } + v := account.DefaultCredentials() + _c.mutation.SetCredentials(v) + } + if _, ok := _c.mutation.Extra(); !ok { + if account.DefaultExtra == nil { + return fmt.Errorf("ent: uninitialized account.DefaultExtra (forgotten import ent/runtime?)") + } + v := account.DefaultExtra() + _c.mutation.SetExtra(v) + } + if _, ok := _c.mutation.Concurrency(); !ok { + v := account.DefaultConcurrency + _c.mutation.SetConcurrency(v) + } + if _, ok := _c.mutation.Priority(); !ok { + v := account.DefaultPriority + _c.mutation.SetPriority(v) + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + v := account.DefaultRateMultiplier + _c.mutation.SetRateMultiplier(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := account.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.AutoPauseOnExpired(); !ok { + v := account.DefaultAutoPauseOnExpired + _c.mutation.SetAutoPauseOnExpired(v) + } + if _, ok := _c.mutation.Schedulable(); !ok { + v := account.DefaultSchedulable + _c.mutation.SetSchedulable(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AccountCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Account.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Account.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Account.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if _, ok := _c.mutation.Platform(); !ok { + return &ValidationError{Name: "platform", err: errors.New(`ent: missing required field "Account.platform"`)} + } + if v, ok := _c.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "Account.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if _, ok := _c.mutation.Credentials(); !ok { + return &ValidationError{Name: "credentials", err: errors.New(`ent: missing required field "Account.credentials"`)} + } + if _, ok := _c.mutation.Extra(); !ok { + return &ValidationError{Name: "extra", err: errors.New(`ent: missing required field "Account.extra"`)} + } + if _, ok := _c.mutation.Concurrency(); !ok { + return &ValidationError{Name: "concurrency", err: errors.New(`ent: missing required field "Account.concurrency"`)} + } + if _, ok := _c.mutation.Priority(); !ok { + return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "Account.priority"`)} + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "Account.rate_multiplier"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Account.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if _, ok := _c.mutation.AutoPauseOnExpired(); !ok { + return &ValidationError{Name: "auto_pause_on_expired", err: errors.New(`ent: missing required field "Account.auto_pause_on_expired"`)} + } + if _, ok := _c.mutation.Schedulable(); !ok { + return &ValidationError{Name: "schedulable", err: errors.New(`ent: missing required field "Account.schedulable"`)} + } + if v, ok := _c.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_c *AccountCreate) sqlSave(ctx context.Context) (*Account, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) { + var ( + _node = &Account{config: _c.config} + _spec = sqlgraph.NewCreateSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(account.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(account.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if value, ok := _c.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + _node.Platform = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + _node.Credentials = value + } + if value, ok := _c.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + _node.Extra = value + } + if value, ok := _c.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + _node.Concurrency = value + } + if value, ok := _c.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + _node.Priority = value + } + if value, ok := _c.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + _node.RateMultiplier = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + _node.ErrorMessage = &value + } + if value, ok := _c.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + _node.LastUsedAt = &value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(account.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = &value + } + if value, ok := _c.mutation.AutoPauseOnExpired(); ok { + _spec.SetField(account.FieldAutoPauseOnExpired, field.TypeBool, value) + _node.AutoPauseOnExpired = value + } + if value, ok := _c.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + _node.Schedulable = value + } + if value, ok := _c.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + _node.RateLimitedAt = &value + } + if value, ok := _c.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + _node.RateLimitResetAt = &value + } + if value, ok := _c.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + _node.OverloadUntil = &value + } + if value, ok := _c.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + _node.SessionWindowStart = &value + } + if value, ok := _c.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + _node.SessionWindowEnd = &value + } + if value, ok := _c.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + _node.SessionWindowStatus = &value + } + if nodes := _c.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _c.config, mutation: newAccountGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.ProxyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: account.ProxyTable, + Columns: []string{account.ProxyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.ProxyID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Account.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *AccountCreate) OnConflict(opts ...sql.ConflictOption) *AccountUpsertOne { + _c.conflict = opts + return &AccountUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountCreate) OnConflictColumns(columns ...string) *AccountUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountUpsertOne{ + create: _c, + } +} + +type ( + // AccountUpsertOne is the builder for "upsert"-ing + // one Account node. + AccountUpsertOne struct { + create *AccountCreate + } + + // AccountUpsert is the "OnConflict" setter. + AccountUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsert) SetUpdatedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateUpdatedAt() *AccountUpsert { + u.SetExcluded(account.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsert) SetDeletedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateDeletedAt() *AccountUpsert { + u.SetExcluded(account.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsert) ClearDeletedAt() *AccountUpsert { + u.SetNull(account.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *AccountUpsert) SetName(v string) *AccountUpsert { + u.Set(account.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsert) UpdateName() *AccountUpsert { + u.SetExcluded(account.FieldName) + return u +} + +// SetNotes sets the "notes" field. +func (u *AccountUpsert) SetNotes(v string) *AccountUpsert { + u.Set(account.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *AccountUpsert) UpdateNotes() *AccountUpsert { + u.SetExcluded(account.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *AccountUpsert) ClearNotes() *AccountUpsert { + u.SetNull(account.FieldNotes) + return u +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsert) SetPlatform(v string) *AccountUpsert { + u.Set(account.FieldPlatform, v) + return u +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsert) UpdatePlatform() *AccountUpsert { + u.SetExcluded(account.FieldPlatform) + return u +} + +// SetType sets the "type" field. +func (u *AccountUpsert) SetType(v string) *AccountUpsert { + u.Set(account.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsert) UpdateType() *AccountUpsert { + u.SetExcluded(account.FieldType) + return u +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsert) SetCredentials(v map[string]interface{}) *AccountUpsert { + u.Set(account.FieldCredentials, v) + return u +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsert) UpdateCredentials() *AccountUpsert { + u.SetExcluded(account.FieldCredentials) + return u +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsert) SetExtra(v map[string]interface{}) *AccountUpsert { + u.Set(account.FieldExtra, v) + return u +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsert) UpdateExtra() *AccountUpsert { + u.SetExcluded(account.FieldExtra) + return u +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsert) SetProxyID(v int64) *AccountUpsert { + u.Set(account.FieldProxyID, v) + return u +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsert) UpdateProxyID() *AccountUpsert { + u.SetExcluded(account.FieldProxyID) + return u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsert) ClearProxyID() *AccountUpsert { + u.SetNull(account.FieldProxyID) + return u +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsert) SetConcurrency(v int) *AccountUpsert { + u.Set(account.FieldConcurrency, v) + return u +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsert) UpdateConcurrency() *AccountUpsert { + u.SetExcluded(account.FieldConcurrency) + return u +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsert) AddConcurrency(v int) *AccountUpsert { + u.Add(account.FieldConcurrency, v) + return u +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsert) SetPriority(v int) *AccountUpsert { + u.Set(account.FieldPriority, v) + return u +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsert) UpdatePriority() *AccountUpsert { + u.SetExcluded(account.FieldPriority) + return u +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsert) AddPriority(v int) *AccountUpsert { + u.Add(account.FieldPriority, v) + return u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsert) SetRateMultiplier(v float64) *AccountUpsert { + u.Set(account.FieldRateMultiplier, v) + return u +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateMultiplier() *AccountUpsert { + u.SetExcluded(account.FieldRateMultiplier) + return u +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsert) AddRateMultiplier(v float64) *AccountUpsert { + u.Add(account.FieldRateMultiplier, v) + return u +} + +// SetStatus sets the "status" field. +func (u *AccountUpsert) SetStatus(v string) *AccountUpsert { + u.Set(account.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsert) UpdateStatus() *AccountUpsert { + u.SetExcluded(account.FieldStatus) + return u +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsert) SetErrorMessage(v string) *AccountUpsert { + u.Set(account.FieldErrorMessage, v) + return u +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsert) UpdateErrorMessage() *AccountUpsert { + u.SetExcluded(account.FieldErrorMessage) + return u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsert) ClearErrorMessage() *AccountUpsert { + u.SetNull(account.FieldErrorMessage) + return u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsert) SetLastUsedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldLastUsedAt, v) + return u +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateLastUsedAt() *AccountUpsert { + u.SetExcluded(account.FieldLastUsedAt) + return u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsert) ClearLastUsedAt() *AccountUpsert { + u.SetNull(account.FieldLastUsedAt) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *AccountUpsert) SetExpiresAt(v time.Time) *AccountUpsert { + u.Set(account.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateExpiresAt() *AccountUpsert { + u.SetExcluded(account.FieldExpiresAt) + return u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *AccountUpsert) ClearExpiresAt() *AccountUpsert { + u.SetNull(account.FieldExpiresAt) + return u +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (u *AccountUpsert) SetAutoPauseOnExpired(v bool) *AccountUpsert { + u.Set(account.FieldAutoPauseOnExpired, v) + return u +} + +// UpdateAutoPauseOnExpired sets the "auto_pause_on_expired" field to the value that was provided on create. +func (u *AccountUpsert) UpdateAutoPauseOnExpired() *AccountUpsert { + u.SetExcluded(account.FieldAutoPauseOnExpired) + return u +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsert) SetSchedulable(v bool) *AccountUpsert { + u.Set(account.FieldSchedulable, v) + return u +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSchedulable() *AccountUpsert { + u.SetExcluded(account.FieldSchedulable) + return u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsert) SetRateLimitedAt(v time.Time) *AccountUpsert { + u.Set(account.FieldRateLimitedAt, v) + return u +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateLimitedAt() *AccountUpsert { + u.SetExcluded(account.FieldRateLimitedAt) + return u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsert) ClearRateLimitedAt() *AccountUpsert { + u.SetNull(account.FieldRateLimitedAt) + return u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsert) SetRateLimitResetAt(v time.Time) *AccountUpsert { + u.Set(account.FieldRateLimitResetAt, v) + return u +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateLimitResetAt() *AccountUpsert { + u.SetExcluded(account.FieldRateLimitResetAt) + return u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsert) ClearRateLimitResetAt() *AccountUpsert { + u.SetNull(account.FieldRateLimitResetAt) + return u +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsert) SetOverloadUntil(v time.Time) *AccountUpsert { + u.Set(account.FieldOverloadUntil, v) + return u +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsert) UpdateOverloadUntil() *AccountUpsert { + u.SetExcluded(account.FieldOverloadUntil) + return u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsert) ClearOverloadUntil() *AccountUpsert { + u.SetNull(account.FieldOverloadUntil) + return u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert { + u.Set(account.FieldSessionWindowStart, v) + return u +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowStart() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowStart) + return u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsert) ClearSessionWindowStart() *AccountUpsert { + u.SetNull(account.FieldSessionWindowStart) + return u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsert) SetSessionWindowEnd(v time.Time) *AccountUpsert { + u.Set(account.FieldSessionWindowEnd, v) + return u +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowEnd() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowEnd) + return u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsert) ClearSessionWindowEnd() *AccountUpsert { + u.SetNull(account.FieldSessionWindowEnd) + return u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsert) SetSessionWindowStatus(v string) *AccountUpsert { + u.Set(account.FieldSessionWindowStatus, v) + return u +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsert) UpdateSessionWindowStatus() *AccountUpsert { + u.SetExcluded(account.FieldSessionWindowStatus) + return u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsert) ClearSessionWindowStatus() *AccountUpsert { + u.SetNull(account.FieldSessionWindowStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountUpsertOne) UpdateNewValues() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(account.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountUpsertOne) Ignore() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountUpsertOne) DoNothing() *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountCreate.OnConflict +// documentation for more info. +func (u *AccountUpsertOne) Update(set func(*AccountUpsert)) *AccountUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsertOne) SetUpdatedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateUpdatedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsertOne) SetDeletedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateDeletedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsertOne) ClearDeletedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *AccountUpsertOne) SetName(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateName() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateName() + }) +} + +// SetNotes sets the "notes" field. +func (u *AccountUpsertOne) SetNotes(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateNotes() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *AccountUpsertOne) ClearNotes() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearNotes() + }) +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsertOne) SetPlatform(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdatePlatform() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdatePlatform() + }) +} + +// SetType sets the "type" field. +func (u *AccountUpsertOne) SetType(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateType() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateType() + }) +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsertOne) SetCredentials(v map[string]interface{}) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetCredentials(v) + }) +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateCredentials() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateCredentials() + }) +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsertOne) SetExtra(v map[string]interface{}) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetExtra(v) + }) +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateExtra() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateExtra() + }) +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsertOne) SetProxyID(v int64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetProxyID(v) + }) +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateProxyID() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateProxyID() + }) +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsertOne) ClearProxyID() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearProxyID() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsertOne) SetConcurrency(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsertOne) AddConcurrency(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateConcurrency() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateConcurrency() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsertOne) SetPriority(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsertOne) AddPriority(v int) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdatePriority() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdatePriority() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsertOne) SetRateMultiplier(v float64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsertOne) AddRateMultiplier(v float64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateMultiplier() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetStatus sets the "status" field. +func (u *AccountUpsertOne) SetStatus(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateStatus() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsertOne) SetErrorMessage(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateErrorMessage() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsertOne) ClearErrorMessage() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearErrorMessage() + }) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsertOne) SetLastUsedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetLastUsedAt(v) + }) +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateLastUsedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateLastUsedAt() + }) +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsertOne) ClearLastUsedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearLastUsedAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *AccountUpsertOne) SetExpiresAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateExpiresAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *AccountUpsertOne) ClearExpiresAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearExpiresAt() + }) +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (u *AccountUpsertOne) SetAutoPauseOnExpired(v bool) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetAutoPauseOnExpired(v) + }) +} + +// UpdateAutoPauseOnExpired sets the "auto_pause_on_expired" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateAutoPauseOnExpired() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateAutoPauseOnExpired() + }) +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsertOne) SetSchedulable(v bool) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSchedulable(v) + }) +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSchedulable() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSchedulable() + }) +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsertOne) SetRateLimitedAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitedAt(v) + }) +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateLimitedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitedAt() + }) +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsertOne) ClearRateLimitedAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitedAt() + }) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsertOne) SetRateLimitResetAt(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitResetAt(v) + }) +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateLimitResetAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitResetAt() + }) +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsertOne) ClearRateLimitResetAt() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitResetAt() + }) +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsertOne) SetOverloadUntil(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetOverloadUntil(v) + }) +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateOverloadUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateOverloadUntil() + }) +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsertOne) ClearOverloadUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearOverloadUntil() + }) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStart(v) + }) +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowStart() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStart() + }) +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsertOne) ClearSessionWindowStart() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStart() + }) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsertOne) SetSessionWindowEnd(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowEnd(v) + }) +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowEnd() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowEnd() + }) +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsertOne) ClearSessionWindowEnd() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowEnd() + }) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsertOne) SetSessionWindowStatus(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStatus(v) + }) +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateSessionWindowStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStatus() + }) +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsertOne) ClearSessionWindowStatus() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStatus() + }) +} + +// Exec executes the query. +func (u *AccountUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AccountUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AccountUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AccountCreateBulk is the builder for creating many Account entities in bulk. +type AccountCreateBulk struct { + config + err error + builders []*AccountCreate + conflict []sql.ConflictOption +} + +// Save creates the Account entities in the database. +func (_c *AccountCreateBulk) Save(ctx context.Context) ([]*Account, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Account, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AccountMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AccountCreateBulk) SaveX(ctx context.Context) []*Account { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Account.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *AccountCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountUpsertBulk { + _c.conflict = opts + return &AccountUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountCreateBulk) OnConflictColumns(columns ...string) *AccountUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountUpsertBulk{ + create: _c, + } +} + +// AccountUpsertBulk is the builder for "upsert"-ing +// a bulk of Account nodes. +type AccountUpsertBulk struct { + create *AccountCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountUpsertBulk) UpdateNewValues() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(account.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Account.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountUpsertBulk) Ignore() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountUpsertBulk) DoNothing() *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountCreateBulk.OnConflict +// documentation for more info. +func (u *AccountUpsertBulk) Update(set func(*AccountUpsert)) *AccountUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AccountUpsertBulk) SetUpdatedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateUpdatedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *AccountUpsertBulk) SetDeletedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateDeletedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *AccountUpsertBulk) ClearDeletedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *AccountUpsertBulk) SetName(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateName() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateName() + }) +} + +// SetNotes sets the "notes" field. +func (u *AccountUpsertBulk) SetNotes(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateNotes() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *AccountUpsertBulk) ClearNotes() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearNotes() + }) +} + +// SetPlatform sets the "platform" field. +func (u *AccountUpsertBulk) SetPlatform(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdatePlatform() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdatePlatform() + }) +} + +// SetType sets the "type" field. +func (u *AccountUpsertBulk) SetType(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateType() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateType() + }) +} + +// SetCredentials sets the "credentials" field. +func (u *AccountUpsertBulk) SetCredentials(v map[string]interface{}) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetCredentials(v) + }) +} + +// UpdateCredentials sets the "credentials" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateCredentials() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateCredentials() + }) +} + +// SetExtra sets the "extra" field. +func (u *AccountUpsertBulk) SetExtra(v map[string]interface{}) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetExtra(v) + }) +} + +// UpdateExtra sets the "extra" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateExtra() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateExtra() + }) +} + +// SetProxyID sets the "proxy_id" field. +func (u *AccountUpsertBulk) SetProxyID(v int64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetProxyID(v) + }) +} + +// UpdateProxyID sets the "proxy_id" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateProxyID() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateProxyID() + }) +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (u *AccountUpsertBulk) ClearProxyID() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearProxyID() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *AccountUpsertBulk) SetConcurrency(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *AccountUpsertBulk) AddConcurrency(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateConcurrency() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateConcurrency() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountUpsertBulk) SetPriority(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountUpsertBulk) AddPriority(v int) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdatePriority() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdatePriority() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsertBulk) SetRateMultiplier(v float64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsertBulk) AddRateMultiplier(v float64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateMultiplier() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetStatus sets the "status" field. +func (u *AccountUpsertBulk) SetStatus(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateStatus() + }) +} + +// SetErrorMessage sets the "error_message" field. +func (u *AccountUpsertBulk) SetErrorMessage(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetErrorMessage(v) + }) +} + +// UpdateErrorMessage sets the "error_message" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateErrorMessage() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateErrorMessage() + }) +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (u *AccountUpsertBulk) ClearErrorMessage() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearErrorMessage() + }) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (u *AccountUpsertBulk) SetLastUsedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetLastUsedAt(v) + }) +} + +// UpdateLastUsedAt sets the "last_used_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateLastUsedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateLastUsedAt() + }) +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (u *AccountUpsertBulk) ClearLastUsedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearLastUsedAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *AccountUpsertBulk) SetExpiresAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateExpiresAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *AccountUpsertBulk) ClearExpiresAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearExpiresAt() + }) +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (u *AccountUpsertBulk) SetAutoPauseOnExpired(v bool) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetAutoPauseOnExpired(v) + }) +} + +// UpdateAutoPauseOnExpired sets the "auto_pause_on_expired" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateAutoPauseOnExpired() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateAutoPauseOnExpired() + }) +} + +// SetSchedulable sets the "schedulable" field. +func (u *AccountUpsertBulk) SetSchedulable(v bool) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSchedulable(v) + }) +} + +// UpdateSchedulable sets the "schedulable" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSchedulable() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSchedulable() + }) +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (u *AccountUpsertBulk) SetRateLimitedAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitedAt(v) + }) +} + +// UpdateRateLimitedAt sets the "rate_limited_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateLimitedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitedAt() + }) +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (u *AccountUpsertBulk) ClearRateLimitedAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitedAt() + }) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (u *AccountUpsertBulk) SetRateLimitResetAt(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateLimitResetAt(v) + }) +} + +// UpdateRateLimitResetAt sets the "rate_limit_reset_at" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateLimitResetAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateLimitResetAt() + }) +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (u *AccountUpsertBulk) ClearRateLimitResetAt() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearRateLimitResetAt() + }) +} + +// SetOverloadUntil sets the "overload_until" field. +func (u *AccountUpsertBulk) SetOverloadUntil(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetOverloadUntil(v) + }) +} + +// UpdateOverloadUntil sets the "overload_until" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateOverloadUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateOverloadUntil() + }) +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (u *AccountUpsertBulk) ClearOverloadUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearOverloadUntil() + }) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStart(v) + }) +} + +// UpdateSessionWindowStart sets the "session_window_start" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowStart() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStart() + }) +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (u *AccountUpsertBulk) ClearSessionWindowStart() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStart() + }) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (u *AccountUpsertBulk) SetSessionWindowEnd(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowEnd(v) + }) +} + +// UpdateSessionWindowEnd sets the "session_window_end" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowEnd() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowEnd() + }) +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (u *AccountUpsertBulk) ClearSessionWindowEnd() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowEnd() + }) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (u *AccountUpsertBulk) SetSessionWindowStatus(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetSessionWindowStatus(v) + }) +} + +// UpdateSessionWindowStatus sets the "session_window_status" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateSessionWindowStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateSessionWindowStatus() + }) +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (u *AccountUpsertBulk) ClearSessionWindowStatus() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearSessionWindowStatus() + }) +} + +// Exec executes the query. +func (u *AccountUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/account_delete.go b/backend/ent/account_delete.go new file mode 100644 index 00000000..44cf2f55 --- /dev/null +++ b/backend/ent/account_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountDelete is the builder for deleting a Account entity. +type AccountDelete struct { + config + hooks []Hook + mutation *AccountMutation +} + +// Where appends a list predicates to the AccountDelete builder. +func (_d *AccountDelete) Where(ps ...predicate.Account) *AccountDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AccountDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AccountDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AccountDeleteOne is the builder for deleting a single Account entity. +type AccountDeleteOne struct { + _d *AccountDelete +} + +// Where appends a list predicates to the AccountDelete builder. +func (_d *AccountDeleteOne) Where(ps ...predicate.Account) *AccountDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AccountDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{account.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/account_query.go b/backend/ent/account_query.go new file mode 100644 index 00000000..1761fa63 --- /dev/null +++ b/backend/ent/account_query.go @@ -0,0 +1,900 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/usagelog" +) + +// AccountQuery is the builder for querying Account entities. +type AccountQuery struct { + config + ctx *QueryContext + order []account.OrderOption + inters []Interceptor + predicates []predicate.Account + withGroups *GroupQuery + withProxy *ProxyQuery + withUsageLogs *UsageLogQuery + withAccountGroups *AccountGroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AccountQuery builder. +func (_q *AccountQuery) Where(ps ...predicate.Account) *AccountQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AccountQuery) Limit(limit int) *AccountQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AccountQuery) Offset(offset int) *AccountQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AccountQuery) Unique(unique bool) *AccountQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AccountQuery) Order(o ...account.OrderOption) *AccountQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryGroups chains the current query on the "groups" edge. +func (_q *AccountQuery) QueryGroups() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryProxy chains the current query on the "proxy" edge. +func (_q *AccountQuery) QueryProxy() *ProxyQuery { + query := (&ProxyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(proxy.Table, proxy.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, account.ProxyTable, account.ProxyColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsageLogs chains the current query on the "usage_logs" edge. +func (_q *AccountQuery) QueryUsageLogs() *UsageLogQuery { + query := (&UsageLogClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, account.UsageLogsTable, account.UsageLogsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccountGroups chains the current query on the "account_groups" edge. +func (_q *AccountQuery) QueryAccountGroups() *AccountGroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, selector), + sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn), + sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Account entity from the query. +// Returns a *NotFoundError when no Account was found. +func (_q *AccountQuery) First(ctx context.Context) (*Account, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{account.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AccountQuery) FirstX(ctx context.Context) *Account { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Account ID from the query. +// Returns a *NotFoundError when no Account ID was found. +func (_q *AccountQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{account.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *AccountQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Account entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Account entity is found. +// Returns a *NotFoundError when no Account entities are found. +func (_q *AccountQuery) Only(ctx context.Context) (*Account, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{account.Label} + default: + return nil, &NotSingularError{account.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AccountQuery) OnlyX(ctx context.Context) *Account { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Account ID in the query. +// Returns a *NotSingularError when more than one Account ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *AccountQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{account.Label} + default: + err = &NotSingularError{account.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *AccountQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Accounts. +func (_q *AccountQuery) All(ctx context.Context) ([]*Account, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Account, *AccountQuery]() + return withInterceptors[[]*Account](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AccountQuery) AllX(ctx context.Context) []*Account { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Account IDs. +func (_q *AccountQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(account.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *AccountQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *AccountQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AccountQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AccountQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AccountQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AccountQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AccountQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AccountQuery) Clone() *AccountQuery { + if _q == nil { + return nil + } + return &AccountQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]account.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Account{}, _q.predicates...), + withGroups: _q.withGroups.Clone(), + withProxy: _q.withProxy.Clone(), + withUsageLogs: _q.withUsageLogs.Clone(), + withAccountGroups: _q.withAccountGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithGroups tells the query-builder to eager-load the nodes that are connected to +// the "groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithGroups(opts ...func(*GroupQuery)) *AccountQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroups = query + return _q +} + +// WithProxy tells the query-builder to eager-load the nodes that are connected to +// the "proxy" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithProxy(opts ...func(*ProxyQuery)) *AccountQuery { + query := (&ProxyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withProxy = query + return _q +} + +// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to +// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *AccountQuery { + query := (&UsageLogClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageLogs = query + return _q +} + +// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to +// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *AccountQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccountGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Account.Query(). +// GroupBy(account.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AccountQuery) GroupBy(field string, fields ...string) *AccountGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AccountGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = account.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Account.Query(). +// Select(account.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *AccountQuery) Select(fields ...string) *AccountSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AccountSelect{AccountQuery: _q} + sbuild.label = account.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AccountSelect configured with the given aggregations. +func (_q *AccountQuery) Aggregate(fns ...AggregateFunc) *AccountSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AccountQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !account.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Account, error) { + var ( + nodes = []*Account{} + _spec = _q.querySpec() + loadedTypes = [4]bool{ + _q.withGroups != nil, + _q.withProxy != nil, + _q.withUsageLogs != nil, + _q.withAccountGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Account).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Account{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withGroups; query != nil { + if err := _q.loadGroups(ctx, query, nodes, + func(n *Account) { n.Edges.Groups = []*Group{} }, + func(n *Account, e *Group) { n.Edges.Groups = append(n.Edges.Groups, e) }); err != nil { + return nil, err + } + } + if query := _q.withProxy; query != nil { + if err := _q.loadProxy(ctx, query, nodes, nil, + func(n *Account, e *Proxy) { n.Edges.Proxy = e }); err != nil { + return nil, err + } + } + if query := _q.withUsageLogs; query != nil { + if err := _q.loadUsageLogs(ctx, query, nodes, + func(n *Account) { n.Edges.UsageLogs = []*UsageLog{} }, + func(n *Account, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccountGroups; query != nil { + if err := _q.loadAccountGroups(ctx, query, nodes, + func(n *Account) { n.Edges.AccountGroups = []*AccountGroup{} }, + func(n *Account, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AccountQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *Group)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Account) + nids := make(map[int64]map[*Account]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(account.GroupsTable) + s.Join(joinT).On(s.C(group.FieldID), joinT.C(account.GroupsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(account.GroupsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(account.GroupsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Account]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "groups" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *AccountQuery) loadProxy(ctx context.Context, query *ProxyQuery, nodes []*Account, init func(*Account), assign func(*Account, *Proxy)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*Account) + for i := range nodes { + if nodes[i].ProxyID == nil { + continue + } + fk := *nodes[i].ProxyID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(proxy.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "proxy_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *AccountQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*Account, init func(*Account), assign func(*Account, *UsageLog)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Account) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usagelog.FieldAccountID) + } + query.Where(predicate.UsageLog(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(account.UsageLogsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AccountID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *AccountGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Account) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(accountgroup.FieldAccountID) + } + query.Where(predicate.AccountGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(account.AccountGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AccountID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AccountQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, account.FieldID) + for i := range fields { + if fields[i] != account.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withProxy != nil { + _spec.Node.AddColumnOnce(account.FieldProxyID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(account.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = account.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AccountQuery) ForUpdate(opts ...sql.LockOption) *AccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AccountQuery) ForShare(opts ...sql.LockOption) *AccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AccountGroupBy is the group-by builder for Account entities. +type AccountGroupBy struct { + selector + build *AccountQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AccountGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AccountGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountQuery, *AccountGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AccountGroupBy) sqlScan(ctx context.Context, root *AccountQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AccountSelect is the builder for selecting fields of Account entities. +type AccountSelect struct { + *AccountQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AccountSelect) Aggregate(fns ...AggregateFunc) *AccountSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AccountSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountQuery, *AccountSelect](ctx, _s.AccountQuery, _s, _s.inters, v) +} + +func (_s *AccountSelect) sqlScan(ctx context.Context, root *AccountQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/account_update.go b/backend/ent/account_update.go new file mode 100644 index 00000000..63fab096 --- /dev/null +++ b/backend/ent/account_update.go @@ -0,0 +1,1735 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/usagelog" +) + +// AccountUpdate is the builder for updating Account entities. +type AccountUpdate struct { + config + hooks []Hook + mutation *AccountMutation +} + +// Where appends a list predicates to the AccountUpdate builder. +func (_u *AccountUpdate) Where(ps ...predicate.Account) *AccountUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AccountUpdate) SetUpdatedAt(v time.Time) *AccountUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *AccountUpdate) SetDeletedAt(v time.Time) *AccountUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableDeletedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *AccountUpdate) ClearDeletedAt() *AccountUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *AccountUpdate) SetName(v string) *AccountUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableName(v *string) *AccountUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *AccountUpdate) SetNotes(v string) *AccountUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableNotes(v *string) *AccountUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *AccountUpdate) ClearNotes() *AccountUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *AccountUpdate) SetPlatform(v string) *AccountUpdate { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *AccountUpdate) SetNillablePlatform(v *string) *AccountUpdate { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *AccountUpdate) SetType(v string) *AccountUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableType(v *string) *AccountUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetCredentials sets the "credentials" field. +func (_u *AccountUpdate) SetCredentials(v map[string]interface{}) *AccountUpdate { + _u.mutation.SetCredentials(v) + return _u +} + +// SetExtra sets the "extra" field. +func (_u *AccountUpdate) SetExtra(v map[string]interface{}) *AccountUpdate { + _u.mutation.SetExtra(v) + return _u +} + +// SetProxyID sets the "proxy_id" field. +func (_u *AccountUpdate) SetProxyID(v int64) *AccountUpdate { + _u.mutation.SetProxyID(v) + return _u +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableProxyID(v *int64) *AccountUpdate { + if v != nil { + _u.SetProxyID(*v) + } + return _u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (_u *AccountUpdate) ClearProxyID() *AccountUpdate { + _u.mutation.ClearProxyID() + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *AccountUpdate) SetConcurrency(v int) *AccountUpdate { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableConcurrency(v *int) *AccountUpdate { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *AccountUpdate) AddConcurrency(v int) *AccountUpdate { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountUpdate) SetPriority(v int) *AccountUpdate { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountUpdate) SetNillablePriority(v *int) *AccountUpdate { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountUpdate) AddPriority(v int) *AccountUpdate { + _u.mutation.AddPriority(v) + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *AccountUpdate) SetRateMultiplier(v float64) *AccountUpdate { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateMultiplier(v *float64) *AccountUpdate { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *AccountUpdate) AddRateMultiplier(v float64) *AccountUpdate { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *AccountUpdate) SetStatus(v string) *AccountUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableStatus(v *string) *AccountUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *AccountUpdate) SetErrorMessage(v string) *AccountUpdate { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableErrorMessage(v *string) *AccountUpdate { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *AccountUpdate) ClearErrorMessage() *AccountUpdate { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_u *AccountUpdate) SetLastUsedAt(v time.Time) *AccountUpdate { + _u.mutation.SetLastUsedAt(v) + return _u +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableLastUsedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetLastUsedAt(*v) + } + return _u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (_u *AccountUpdate) ClearLastUsedAt() *AccountUpdate { + _u.mutation.ClearLastUsedAt() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *AccountUpdate) SetExpiresAt(v time.Time) *AccountUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableExpiresAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *AccountUpdate) ClearExpiresAt() *AccountUpdate { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (_u *AccountUpdate) SetAutoPauseOnExpired(v bool) *AccountUpdate { + _u.mutation.SetAutoPauseOnExpired(v) + return _u +} + +// SetNillableAutoPauseOnExpired sets the "auto_pause_on_expired" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableAutoPauseOnExpired(v *bool) *AccountUpdate { + if v != nil { + _u.SetAutoPauseOnExpired(*v) + } + return _u +} + +// SetSchedulable sets the "schedulable" field. +func (_u *AccountUpdate) SetSchedulable(v bool) *AccountUpdate { + _u.mutation.SetSchedulable(v) + return _u +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSchedulable(v *bool) *AccountUpdate { + if v != nil { + _u.SetSchedulable(*v) + } + return _u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_u *AccountUpdate) SetRateLimitedAt(v time.Time) *AccountUpdate { + _u.mutation.SetRateLimitedAt(v) + return _u +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateLimitedAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetRateLimitedAt(*v) + } + return _u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (_u *AccountUpdate) ClearRateLimitedAt() *AccountUpdate { + _u.mutation.ClearRateLimitedAt() + return _u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_u *AccountUpdate) SetRateLimitResetAt(v time.Time) *AccountUpdate { + _u.mutation.SetRateLimitResetAt(v) + return _u +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateLimitResetAt(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetRateLimitResetAt(*v) + } + return _u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (_u *AccountUpdate) ClearRateLimitResetAt() *AccountUpdate { + _u.mutation.ClearRateLimitResetAt() + return _u +} + +// SetOverloadUntil sets the "overload_until" field. +func (_u *AccountUpdate) SetOverloadUntil(v time.Time) *AccountUpdate { + _u.mutation.SetOverloadUntil(v) + return _u +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableOverloadUntil(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetOverloadUntil(*v) + } + return _u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (_u *AccountUpdate) ClearOverloadUntil() *AccountUpdate { + _u.mutation.ClearOverloadUntil() + return _u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate { + _u.mutation.SetSessionWindowStart(v) + return _u +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowStart(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetSessionWindowStart(*v) + } + return _u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (_u *AccountUpdate) ClearSessionWindowStart() *AccountUpdate { + _u.mutation.ClearSessionWindowStart() + return _u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_u *AccountUpdate) SetSessionWindowEnd(v time.Time) *AccountUpdate { + _u.mutation.SetSessionWindowEnd(v) + return _u +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowEnd(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetSessionWindowEnd(*v) + } + return _u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (_u *AccountUpdate) ClearSessionWindowEnd() *AccountUpdate { + _u.mutation.ClearSessionWindowEnd() + return _u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_u *AccountUpdate) SetSessionWindowStatus(v string) *AccountUpdate { + _u.mutation.SetSessionWindowStatus(v) + return _u +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableSessionWindowStatus(v *string) *AccountUpdate { + if v != nil { + _u.SetSessionWindowStatus(*v) + } + return _u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (_u *AccountUpdate) ClearSessionWindowStatus() *AccountUpdate { + _u.mutation.ClearSessionWindowStatus() + return _u +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_u *AccountUpdate) AddGroupIDs(ids ...int64) *AccountUpdate { + _u.mutation.AddGroupIDs(ids...) + return _u +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_u *AccountUpdate) AddGroups(v ...*Group) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddGroupIDs(ids...) +} + +// SetProxy sets the "proxy" edge to the Proxy entity. +func (_u *AccountUpdate) SetProxy(v *Proxy) *AccountUpdate { + return _u.SetProxyID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *AccountUpdate) AddUsageLogIDs(ids ...int64) *AccountUpdate { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *AccountUpdate) AddUsageLogs(v ...*UsageLog) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_u *AccountUpdate) Mutation() *AccountMutation { + return _u.mutation +} + +// ClearGroups clears all "groups" edges to the Group entity. +func (_u *AccountUpdate) ClearGroups() *AccountUpdate { + _u.mutation.ClearGroups() + return _u +} + +// RemoveGroupIDs removes the "groups" edge to Group entities by IDs. +func (_u *AccountUpdate) RemoveGroupIDs(ids ...int64) *AccountUpdate { + _u.mutation.RemoveGroupIDs(ids...) + return _u +} + +// RemoveGroups removes "groups" edges to Group entities. +func (_u *AccountUpdate) RemoveGroups(v ...*Group) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveGroupIDs(ids...) +} + +// ClearProxy clears the "proxy" edge to the Proxy entity. +func (_u *AccountUpdate) ClearProxy() *AccountUpdate { + _u.mutation.ClearProxy() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *AccountUpdate) ClearUsageLogs() *AccountUpdate { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *AccountUpdate) RemoveUsageLogIDs(ids ...int64) *AccountUpdate { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *AccountUpdate) RemoveUsageLogs(v ...*UsageLog) *AccountUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AccountUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AccountUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AccountUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if account.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if v, ok := _u.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(account.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(account.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(account.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + } + if value, ok := _u.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(account.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + } + if _u.mutation.LastUsedAtCleared() { + _spec.ClearField(account.FieldLastUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(account.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(account.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.AutoPauseOnExpired(); ok { + _spec.SetField(account.FieldAutoPauseOnExpired, field.TypeBool, value) + } + if value, ok := _u.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + } + if value, ok := _u.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + } + if _u.mutation.RateLimitedAtCleared() { + _spec.ClearField(account.FieldRateLimitedAt, field.TypeTime) + } + if value, ok := _u.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + } + if _u.mutation.RateLimitResetAtCleared() { + _spec.ClearField(account.FieldRateLimitResetAt, field.TypeTime) + } + if value, ok := _u.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + } + if _u.mutation.OverloadUntilCleared() { + _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + } + if _u.mutation.SessionWindowStartCleared() { + _spec.ClearField(account.FieldSessionWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + } + if _u.mutation.SessionWindowEndCleared() { + _spec.ClearField(account.FieldSessionWindowEnd, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + } + if _u.mutation.SessionWindowStatusCleared() { + _spec.ClearField(account.FieldSessionWindowStatus, field.TypeString) + } + if _u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.ProxyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: account.ProxyTable, + Columns: []string{account.ProxyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ProxyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: account.ProxyTable, + Columns: []string{account.ProxyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{account.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AccountUpdateOne is the builder for updating a single Account entity. +type AccountUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AccountMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *AccountUpdateOne) SetUpdatedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *AccountUpdateOne) SetDeletedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableDeletedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *AccountUpdateOne) ClearDeletedAt() *AccountUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *AccountUpdateOne) SetName(v string) *AccountUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableName(v *string) *AccountUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *AccountUpdateOne) SetNotes(v string) *AccountUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableNotes(v *string) *AccountUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *AccountUpdateOne) ClearNotes() *AccountUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *AccountUpdateOne) SetPlatform(v string) *AccountUpdateOne { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillablePlatform(v *string) *AccountUpdateOne { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *AccountUpdateOne) SetType(v string) *AccountUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableType(v *string) *AccountUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetCredentials sets the "credentials" field. +func (_u *AccountUpdateOne) SetCredentials(v map[string]interface{}) *AccountUpdateOne { + _u.mutation.SetCredentials(v) + return _u +} + +// SetExtra sets the "extra" field. +func (_u *AccountUpdateOne) SetExtra(v map[string]interface{}) *AccountUpdateOne { + _u.mutation.SetExtra(v) + return _u +} + +// SetProxyID sets the "proxy_id" field. +func (_u *AccountUpdateOne) SetProxyID(v int64) *AccountUpdateOne { + _u.mutation.SetProxyID(v) + return _u +} + +// SetNillableProxyID sets the "proxy_id" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableProxyID(v *int64) *AccountUpdateOne { + if v != nil { + _u.SetProxyID(*v) + } + return _u +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (_u *AccountUpdateOne) ClearProxyID() *AccountUpdateOne { + _u.mutation.ClearProxyID() + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *AccountUpdateOne) SetConcurrency(v int) *AccountUpdateOne { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableConcurrency(v *int) *AccountUpdateOne { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *AccountUpdateOne) AddConcurrency(v int) *AccountUpdateOne { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountUpdateOne) SetPriority(v int) *AccountUpdateOne { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillablePriority(v *int) *AccountUpdateOne { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountUpdateOne) AddPriority(v int) *AccountUpdateOne { + _u.mutation.AddPriority(v) + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *AccountUpdateOne) SetRateMultiplier(v float64) *AccountUpdateOne { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateMultiplier(v *float64) *AccountUpdateOne { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *AccountUpdateOne) AddRateMultiplier(v float64) *AccountUpdateOne { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *AccountUpdateOne) SetStatus(v string) *AccountUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableStatus(v *string) *AccountUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *AccountUpdateOne) SetErrorMessage(v string) *AccountUpdateOne { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableErrorMessage(v *string) *AccountUpdateOne { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *AccountUpdateOne) ClearErrorMessage() *AccountUpdateOne { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetLastUsedAt sets the "last_used_at" field. +func (_u *AccountUpdateOne) SetLastUsedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetLastUsedAt(v) + return _u +} + +// SetNillableLastUsedAt sets the "last_used_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableLastUsedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetLastUsedAt(*v) + } + return _u +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (_u *AccountUpdateOne) ClearLastUsedAt() *AccountUpdateOne { + _u.mutation.ClearLastUsedAt() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *AccountUpdateOne) SetExpiresAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableExpiresAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *AccountUpdateOne) ClearExpiresAt() *AccountUpdateOne { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (_u *AccountUpdateOne) SetAutoPauseOnExpired(v bool) *AccountUpdateOne { + _u.mutation.SetAutoPauseOnExpired(v) + return _u +} + +// SetNillableAutoPauseOnExpired sets the "auto_pause_on_expired" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableAutoPauseOnExpired(v *bool) *AccountUpdateOne { + if v != nil { + _u.SetAutoPauseOnExpired(*v) + } + return _u +} + +// SetSchedulable sets the "schedulable" field. +func (_u *AccountUpdateOne) SetSchedulable(v bool) *AccountUpdateOne { + _u.mutation.SetSchedulable(v) + return _u +} + +// SetNillableSchedulable sets the "schedulable" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSchedulable(v *bool) *AccountUpdateOne { + if v != nil { + _u.SetSchedulable(*v) + } + return _u +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (_u *AccountUpdateOne) SetRateLimitedAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetRateLimitedAt(v) + return _u +} + +// SetNillableRateLimitedAt sets the "rate_limited_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateLimitedAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetRateLimitedAt(*v) + } + return _u +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (_u *AccountUpdateOne) ClearRateLimitedAt() *AccountUpdateOne { + _u.mutation.ClearRateLimitedAt() + return _u +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (_u *AccountUpdateOne) SetRateLimitResetAt(v time.Time) *AccountUpdateOne { + _u.mutation.SetRateLimitResetAt(v) + return _u +} + +// SetNillableRateLimitResetAt sets the "rate_limit_reset_at" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateLimitResetAt(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetRateLimitResetAt(*v) + } + return _u +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (_u *AccountUpdateOne) ClearRateLimitResetAt() *AccountUpdateOne { + _u.mutation.ClearRateLimitResetAt() + return _u +} + +// SetOverloadUntil sets the "overload_until" field. +func (_u *AccountUpdateOne) SetOverloadUntil(v time.Time) *AccountUpdateOne { + _u.mutation.SetOverloadUntil(v) + return _u +} + +// SetNillableOverloadUntil sets the "overload_until" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableOverloadUntil(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetOverloadUntil(*v) + } + return _u +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (_u *AccountUpdateOne) ClearOverloadUntil() *AccountUpdateOne { + _u.mutation.ClearOverloadUntil() + return _u +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne { + _u.mutation.SetSessionWindowStart(v) + return _u +} + +// SetNillableSessionWindowStart sets the "session_window_start" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowStart(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowStart(*v) + } + return _u +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (_u *AccountUpdateOne) ClearSessionWindowStart() *AccountUpdateOne { + _u.mutation.ClearSessionWindowStart() + return _u +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (_u *AccountUpdateOne) SetSessionWindowEnd(v time.Time) *AccountUpdateOne { + _u.mutation.SetSessionWindowEnd(v) + return _u +} + +// SetNillableSessionWindowEnd sets the "session_window_end" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowEnd(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowEnd(*v) + } + return _u +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (_u *AccountUpdateOne) ClearSessionWindowEnd() *AccountUpdateOne { + _u.mutation.ClearSessionWindowEnd() + return _u +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (_u *AccountUpdateOne) SetSessionWindowStatus(v string) *AccountUpdateOne { + _u.mutation.SetSessionWindowStatus(v) + return _u +} + +// SetNillableSessionWindowStatus sets the "session_window_status" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableSessionWindowStatus(v *string) *AccountUpdateOne { + if v != nil { + _u.SetSessionWindowStatus(*v) + } + return _u +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (_u *AccountUpdateOne) ClearSessionWindowStatus() *AccountUpdateOne { + _u.mutation.ClearSessionWindowStatus() + return _u +} + +// AddGroupIDs adds the "groups" edge to the Group entity by IDs. +func (_u *AccountUpdateOne) AddGroupIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.AddGroupIDs(ids...) + return _u +} + +// AddGroups adds the "groups" edges to the Group entity. +func (_u *AccountUpdateOne) AddGroups(v ...*Group) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddGroupIDs(ids...) +} + +// SetProxy sets the "proxy" edge to the Proxy entity. +func (_u *AccountUpdateOne) SetProxy(v *Proxy) *AccountUpdateOne { + return _u.SetProxyID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *AccountUpdateOne) AddUsageLogIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *AccountUpdateOne) AddUsageLogs(v ...*UsageLog) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the AccountMutation object of the builder. +func (_u *AccountUpdateOne) Mutation() *AccountMutation { + return _u.mutation +} + +// ClearGroups clears all "groups" edges to the Group entity. +func (_u *AccountUpdateOne) ClearGroups() *AccountUpdateOne { + _u.mutation.ClearGroups() + return _u +} + +// RemoveGroupIDs removes the "groups" edge to Group entities by IDs. +func (_u *AccountUpdateOne) RemoveGroupIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.RemoveGroupIDs(ids...) + return _u +} + +// RemoveGroups removes "groups" edges to Group entities. +func (_u *AccountUpdateOne) RemoveGroups(v ...*Group) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveGroupIDs(ids...) +} + +// ClearProxy clears the "proxy" edge to the Proxy entity. +func (_u *AccountUpdateOne) ClearProxy() *AccountUpdateOne { + _u.mutation.ClearProxy() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *AccountUpdateOne) ClearUsageLogs() *AccountUpdateOne { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *AccountUpdateOne) RemoveUsageLogIDs(ids ...int64) *AccountUpdateOne { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *AccountUpdateOne) RemoveUsageLogs(v ...*UsageLog) *AccountUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Where appends a list predicates to the AccountUpdate builder. +func (_u *AccountUpdateOne) Where(ps ...predicate.Account) *AccountUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AccountUpdateOne) Select(field string, fields ...string) *AccountUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Account entity. +func (_u *AccountUpdateOne) Save(ctx context.Context) (*Account, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountUpdateOne) SaveX(ctx context.Context) *Account { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AccountUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *AccountUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if account.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized account.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := account.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := account.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Account.name": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := account.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Account.platform": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := account.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Account.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := account.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Account.status": %w`, err)} + } + } + if v, ok := _u.mutation.SessionWindowStatus(); ok { + if err := account.SessionWindowStatusValidator(v); err != nil { + return &ValidationError{Name: "session_window_status", err: fmt.Errorf(`ent: validator failed for field "Account.session_window_status": %w`, err)} + } + } + return nil +} + +func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Account.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, account.FieldID) + for _, f := range fields { + if !account.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != account.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(account.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(account.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(account.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(account.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(account.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(account.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(account.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(account.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Credentials(); ok { + _spec.SetField(account.FieldCredentials, field.TypeJSON, value) + } + if value, ok := _u.mutation.Extra(); ok { + _spec.SetField(account.FieldExtra, field.TypeJSON, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(account.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(account.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(account.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(account.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(account.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.LastUsedAt(); ok { + _spec.SetField(account.FieldLastUsedAt, field.TypeTime, value) + } + if _u.mutation.LastUsedAtCleared() { + _spec.ClearField(account.FieldLastUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(account.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(account.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.AutoPauseOnExpired(); ok { + _spec.SetField(account.FieldAutoPauseOnExpired, field.TypeBool, value) + } + if value, ok := _u.mutation.Schedulable(); ok { + _spec.SetField(account.FieldSchedulable, field.TypeBool, value) + } + if value, ok := _u.mutation.RateLimitedAt(); ok { + _spec.SetField(account.FieldRateLimitedAt, field.TypeTime, value) + } + if _u.mutation.RateLimitedAtCleared() { + _spec.ClearField(account.FieldRateLimitedAt, field.TypeTime) + } + if value, ok := _u.mutation.RateLimitResetAt(); ok { + _spec.SetField(account.FieldRateLimitResetAt, field.TypeTime, value) + } + if _u.mutation.RateLimitResetAtCleared() { + _spec.ClearField(account.FieldRateLimitResetAt, field.TypeTime) + } + if value, ok := _u.mutation.OverloadUntil(); ok { + _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) + } + if _u.mutation.OverloadUntilCleared() { + _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStart(); ok { + _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) + } + if _u.mutation.SessionWindowStartCleared() { + _spec.ClearField(account.FieldSessionWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowEnd(); ok { + _spec.SetField(account.FieldSessionWindowEnd, field.TypeTime, value) + } + if _u.mutation.SessionWindowEndCleared() { + _spec.ClearField(account.FieldSessionWindowEnd, field.TypeTime) + } + if value, ok := _u.mutation.SessionWindowStatus(); ok { + _spec.SetField(account.FieldSessionWindowStatus, field.TypeString, value) + } + if _u.mutation.SessionWindowStatusCleared() { + _spec.ClearField(account.FieldSessionWindowStatus, field.TypeString) + } + if _u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedGroupsIDs(); len(nodes) > 0 && !_u.mutation.GroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: account.GroupsTable, + Columns: account.GroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.ProxyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: account.ProxyTable, + Columns: []string{account.ProxyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ProxyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: account.ProxyTable, + Columns: []string{account.ProxyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: account.UsageLogsTable, + Columns: []string{account.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Account{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{account.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/accountgroup.go b/backend/ent/accountgroup.go new file mode 100644 index 00000000..71d8a1f9 --- /dev/null +++ b/backend/ent/accountgroup.go @@ -0,0 +1,176 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// AccountGroup is the model entity for the AccountGroup schema. +type AccountGroup struct { + config `json:"-"` + // AccountID holds the value of the "account_id" field. + AccountID int64 `json:"account_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // Priority holds the value of the "priority" field. + Priority int `json:"priority,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AccountGroupQuery when eager-loading is set. + Edges AccountGroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// AccountGroupEdges holds the relations/edges for other nodes in the graph. +type AccountGroupEdges struct { + // Account holds the value of the account edge. + Account *Account `json:"account,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// AccountOrErr returns the Account value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AccountGroupEdges) AccountOrErr() (*Account, error) { + if e.Account != nil { + return e.Account, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: account.Label} + } + return nil, &NotLoadedError{edge: "account"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AccountGroupEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*AccountGroup) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case accountgroup.FieldAccountID, accountgroup.FieldGroupID, accountgroup.FieldPriority: + values[i] = new(sql.NullInt64) + case accountgroup.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the AccountGroup fields. +func (_m *AccountGroup) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case accountgroup.FieldAccountID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field account_id", values[i]) + } else if value.Valid { + _m.AccountID = value.Int64 + } + case accountgroup.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case accountgroup.FieldPriority: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field priority", values[i]) + } else if value.Valid { + _m.Priority = int(value.Int64) + } + case accountgroup.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the AccountGroup. +// This includes values selected through modifiers, order, etc. +func (_m *AccountGroup) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAccount queries the "account" edge of the AccountGroup entity. +func (_m *AccountGroup) QueryAccount() *AccountQuery { + return NewAccountGroupClient(_m.config).QueryAccount(_m) +} + +// QueryGroup queries the "group" edge of the AccountGroup entity. +func (_m *AccountGroup) QueryGroup() *GroupQuery { + return NewAccountGroupClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this AccountGroup. +// Note that you need to call AccountGroup.Unwrap() before calling this method if this AccountGroup +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *AccountGroup) Update() *AccountGroupUpdateOne { + return NewAccountGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the AccountGroup entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *AccountGroup) Unwrap() *AccountGroup { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: AccountGroup is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *AccountGroup) String() string { + var builder strings.Builder + builder.WriteString("AccountGroup(") + builder.WriteString("account_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AccountID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("priority=") + builder.WriteString(fmt.Sprintf("%v", _m.Priority)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// AccountGroups is a parsable slice of AccountGroup. +type AccountGroups []*AccountGroup diff --git a/backend/ent/accountgroup/accountgroup.go b/backend/ent/accountgroup/accountgroup.go new file mode 100644 index 00000000..5db485b6 --- /dev/null +++ b/backend/ent/accountgroup/accountgroup.go @@ -0,0 +1,123 @@ +// Code generated by ent, DO NOT EDIT. + +package accountgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the accountgroup type in the database. + Label = "account_group" + // FieldAccountID holds the string denoting the account_id field in the database. + FieldAccountID = "account_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldPriority holds the string denoting the priority field in the database. + FieldPriority = "priority" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeAccount holds the string denoting the account edge name in mutations. + EdgeAccount = "account" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // AccountFieldID holds the string denoting the ID field of the Account. + AccountFieldID = "id" + // GroupFieldID holds the string denoting the ID field of the Group. + GroupFieldID = "id" + // Table holds the table name of the accountgroup in the database. + Table = "account_groups" + // AccountTable is the table that holds the account relation/edge. + AccountTable = "account_groups" + // AccountInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountInverseTable = "accounts" + // AccountColumn is the table column denoting the account relation/edge. + AccountColumn = "account_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "account_groups" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for accountgroup fields. +var Columns = []string{ + FieldAccountID, + FieldGroupID, + FieldPriority, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultPriority holds the default value on creation for the "priority" field. + DefaultPriority int + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the AccountGroup queries. +type OrderOption func(*sql.Selector) + +// ByAccountID orders the results by the account_id field. +func ByAccountID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccountID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByPriority orders the results by the priority field. +func ByPriority(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPriority, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByAccountField orders the results by account field. +func ByAccountField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newAccountStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, AccountColumn), + sqlgraph.To(AccountInverseTable, AccountFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.To(GroupInverseTable, GroupFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/accountgroup/where.go b/backend/ent/accountgroup/where.go new file mode 100644 index 00000000..8226856b --- /dev/null +++ b/backend/ent/accountgroup/where.go @@ -0,0 +1,212 @@ +// Code generated by ent, DO NOT EDIT. + +package accountgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountID applies equality check predicate on the "account_id" field. It's identical to AccountIDEQ. +func AccountID(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ. +func Priority(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// AccountIDEQ applies the EQ predicate on the "account_id" field. +func AccountIDEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v)) +} + +// AccountIDNEQ applies the NEQ predicate on the "account_id" field. +func AccountIDNEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldAccountID, v)) +} + +// AccountIDIn applies the In predicate on the "account_id" field. +func AccountIDIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldAccountID, vs...)) +} + +// AccountIDNotIn applies the NotIn predicate on the "account_id" field. +func AccountIDNotIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldAccountID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// PriorityEQ applies the EQ predicate on the "priority" field. +func PriorityEQ(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v)) +} + +// PriorityNEQ applies the NEQ predicate on the "priority" field. +func PriorityNEQ(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldPriority, v)) +} + +// PriorityIn applies the In predicate on the "priority" field. +func PriorityIn(vs ...int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldPriority, vs...)) +} + +// PriorityNotIn applies the NotIn predicate on the "priority" field. +func PriorityNotIn(vs ...int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldPriority, vs...)) +} + +// PriorityGT applies the GT predicate on the "priority" field. +func PriorityGT(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGT(FieldPriority, v)) +} + +// PriorityGTE applies the GTE predicate on the "priority" field. +func PriorityGTE(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGTE(FieldPriority, v)) +} + +// PriorityLT applies the LT predicate on the "priority" field. +func PriorityLT(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLT(FieldPriority, v)) +} + +// PriorityLTE applies the LTE predicate on the "priority" field. +func PriorityLTE(v int) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLTE(FieldPriority, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.AccountGroup { + return predicate.AccountGroup(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasAccount applies the HasEdge predicate on the "account" edge. +func HasAccount() predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, AccountColumn), + sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountWith applies the HasEdge predicate on the "account" edge with a given conditions (other predicates). +func HasAccountWith(preds ...predicate.Account) predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := newAccountStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.AccountGroup { + return predicate.AccountGroup(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.AccountGroup) predicate.AccountGroup { + return predicate.AccountGroup(sql.NotPredicates(p)) +} diff --git a/backend/ent/accountgroup_create.go b/backend/ent/accountgroup_create.go new file mode 100644 index 00000000..6a1840a1 --- /dev/null +++ b/backend/ent/accountgroup_create.go @@ -0,0 +1,653 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// AccountGroupCreate is the builder for creating a AccountGroup entity. +type AccountGroupCreate struct { + config + mutation *AccountGroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetAccountID sets the "account_id" field. +func (_c *AccountGroupCreate) SetAccountID(v int64) *AccountGroupCreate { + _c.mutation.SetAccountID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *AccountGroupCreate) SetGroupID(v int64) *AccountGroupCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetPriority sets the "priority" field. +func (_c *AccountGroupCreate) SetPriority(v int) *AccountGroupCreate { + _c.mutation.SetPriority(v) + return _c +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_c *AccountGroupCreate) SetNillablePriority(v *int) *AccountGroupCreate { + if v != nil { + _c.SetPriority(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *AccountGroupCreate) SetCreatedAt(v time.Time) *AccountGroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *AccountGroupCreate) SetNillableCreatedAt(v *time.Time) *AccountGroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetAccount sets the "account" edge to the Account entity. +func (_c *AccountGroupCreate) SetAccount(v *Account) *AccountGroupCreate { + return _c.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *AccountGroupCreate) SetGroup(v *Group) *AccountGroupCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_c *AccountGroupCreate) Mutation() *AccountGroupMutation { + return _c.mutation +} + +// Save creates the AccountGroup in the database. +func (_c *AccountGroupCreate) Save(ctx context.Context) (*AccountGroup, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *AccountGroupCreate) SaveX(ctx context.Context) *AccountGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountGroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountGroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *AccountGroupCreate) defaults() { + if _, ok := _c.mutation.Priority(); !ok { + v := accountgroup.DefaultPriority + _c.mutation.SetPriority(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := accountgroup.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *AccountGroupCreate) check() error { + if _, ok := _c.mutation.AccountID(); !ok { + return &ValidationError{Name: "account_id", err: errors.New(`ent: missing required field "AccountGroup.account_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "AccountGroup.group_id"`)} + } + if _, ok := _c.mutation.Priority(); !ok { + return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "AccountGroup.priority"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AccountGroup.created_at"`)} + } + if len(_c.mutation.AccountIDs()) == 0 { + return &ValidationError{Name: "account", err: errors.New(`ent: missing required edge "AccountGroup.account"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "AccountGroup.group"`)} + } + return nil +} + +func (_c *AccountGroupCreate) sqlSave(ctx context.Context) (*AccountGroup, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} + +func (_c *AccountGroupCreate) createSpec() (*AccountGroup, *sqlgraph.CreateSpec) { + var ( + _node = &AccountGroup{config: _c.config} + _spec = sqlgraph.NewCreateSpec(accountgroup.Table, nil) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + _node.Priority = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(accountgroup.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AccountID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AccountGroup.Create(). +// SetAccountID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountGroupUpsert) { +// SetAccountID(v+v). +// }). +// Exec(ctx) +func (_c *AccountGroupCreate) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertOne { + _c.conflict = opts + return &AccountGroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountGroupCreate) OnConflictColumns(columns ...string) *AccountGroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountGroupUpsertOne{ + create: _c, + } +} + +type ( + // AccountGroupUpsertOne is the builder for "upsert"-ing + // one AccountGroup node. + AccountGroupUpsertOne struct { + create *AccountGroupCreate + } + + // AccountGroupUpsert is the "OnConflict" setter. + AccountGroupUpsert struct { + *sql.UpdateSet + } +) + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsert) SetAccountID(v int64) *AccountGroupUpsert { + u.Set(accountgroup.FieldAccountID, v) + return u +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdateAccountID() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldAccountID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsert) SetGroupID(v int64) *AccountGroupUpsert { + u.Set(accountgroup.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdateGroupID() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldGroupID) + return u +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsert) SetPriority(v int) *AccountGroupUpsert { + u.Set(accountgroup.FieldPriority, v) + return u +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsert) UpdatePriority() *AccountGroupUpsert { + u.SetExcluded(accountgroup.FieldPriority) + return u +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsert) AddPriority(v int) *AccountGroupUpsert { + u.Add(accountgroup.FieldPriority, v) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountGroupUpsertOne) UpdateNewValues() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(accountgroup.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountGroupUpsertOne) Ignore() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountGroupUpsertOne) DoNothing() *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountGroupCreate.OnConflict +// documentation for more info. +func (u *AccountGroupUpsertOne) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsertOne) SetAccountID(v int64) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdateAccountID() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateAccountID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsertOne) SetGroupID(v int64) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdateGroupID() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateGroupID() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsertOne) SetPriority(v int) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsertOne) AddPriority(v int) *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsertOne) UpdatePriority() *AccountGroupUpsertOne { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdatePriority() + }) +} + +// Exec executes the query. +func (u *AccountGroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountGroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountGroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// AccountGroupCreateBulk is the builder for creating many AccountGroup entities in bulk. +type AccountGroupCreateBulk struct { + config + err error + builders []*AccountGroupCreate + conflict []sql.ConflictOption +} + +// Save creates the AccountGroup entities in the database. +func (_c *AccountGroupCreateBulk) Save(ctx context.Context) ([]*AccountGroup, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*AccountGroup, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AccountGroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *AccountGroupCreateBulk) SaveX(ctx context.Context) []*AccountGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *AccountGroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *AccountGroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.AccountGroup.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AccountGroupUpsert) { +// SetAccountID(v+v). +// }). +// Exec(ctx) +func (_c *AccountGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertBulk { + _c.conflict = opts + return &AccountGroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *AccountGroupCreateBulk) OnConflictColumns(columns ...string) *AccountGroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &AccountGroupUpsertBulk{ + create: _c, + } +} + +// AccountGroupUpsertBulk is the builder for "upsert"-ing +// a bulk of AccountGroup nodes. +type AccountGroupUpsertBulk struct { + create *AccountGroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *AccountGroupUpsertBulk) UpdateNewValues() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(accountgroup.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.AccountGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AccountGroupUpsertBulk) Ignore() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AccountGroupUpsertBulk) DoNothing() *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AccountGroupCreateBulk.OnConflict +// documentation for more info. +func (u *AccountGroupUpsertBulk) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AccountGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetAccountID sets the "account_id" field. +func (u *AccountGroupUpsertBulk) SetAccountID(v int64) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdateAccountID() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateAccountID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *AccountGroupUpsertBulk) SetGroupID(v int64) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdateGroupID() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdateGroupID() + }) +} + +// SetPriority sets the "priority" field. +func (u *AccountGroupUpsertBulk) SetPriority(v int) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.SetPriority(v) + }) +} + +// AddPriority adds v to the "priority" field. +func (u *AccountGroupUpsertBulk) AddPriority(v int) *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.AddPriority(v) + }) +} + +// UpdatePriority sets the "priority" field to the value that was provided on create. +func (u *AccountGroupUpsertBulk) UpdatePriority() *AccountGroupUpsertBulk { + return u.Update(func(s *AccountGroupUpsert) { + s.UpdatePriority() + }) +} + +// Exec executes the query. +func (u *AccountGroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountGroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AccountGroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AccountGroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/accountgroup_delete.go b/backend/ent/accountgroup_delete.go new file mode 100644 index 00000000..41f65ad6 --- /dev/null +++ b/backend/ent/accountgroup_delete.go @@ -0,0 +1,87 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupDelete is the builder for deleting a AccountGroup entity. +type AccountGroupDelete struct { + config + hooks []Hook + mutation *AccountGroupMutation +} + +// Where appends a list predicates to the AccountGroupDelete builder. +func (_d *AccountGroupDelete) Where(ps ...predicate.AccountGroup) *AccountGroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *AccountGroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountGroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *AccountGroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(accountgroup.Table, nil) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// AccountGroupDeleteOne is the builder for deleting a single AccountGroup entity. +type AccountGroupDeleteOne struct { + _d *AccountGroupDelete +} + +// Where appends a list predicates to the AccountGroupDelete builder. +func (_d *AccountGroupDeleteOne) Where(ps ...predicate.AccountGroup) *AccountGroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *AccountGroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{accountgroup.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *AccountGroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/accountgroup_query.go b/backend/ent/accountgroup_query.go new file mode 100644 index 00000000..d0a4f58d --- /dev/null +++ b/backend/ent/accountgroup_query.go @@ -0,0 +1,640 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupQuery is the builder for querying AccountGroup entities. +type AccountGroupQuery struct { + config + ctx *QueryContext + order []accountgroup.OrderOption + inters []Interceptor + predicates []predicate.AccountGroup + withAccount *AccountQuery + withGroup *GroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AccountGroupQuery builder. +func (_q *AccountGroupQuery) Where(ps ...predicate.AccountGroup) *AccountGroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *AccountGroupQuery) Limit(limit int) *AccountGroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *AccountGroupQuery) Offset(offset int) *AccountGroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *AccountGroupQuery) Unique(unique bool) *AccountGroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *AccountGroupQuery) Order(o ...accountgroup.OrderOption) *AccountGroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAccount chains the current query on the "account" edge. +func (_q *AccountGroupQuery) QueryAccount() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(accountgroup.Table, accountgroup.AccountColumn, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.AccountTable, accountgroup.AccountColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *AccountGroupQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(accountgroup.Table, accountgroup.GroupColumn, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.GroupTable, accountgroup.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first AccountGroup entity from the query. +// Returns a *NotFoundError when no AccountGroup was found. +func (_q *AccountGroupQuery) First(ctx context.Context) (*AccountGroup, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{accountgroup.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *AccountGroupQuery) FirstX(ctx context.Context) *AccountGroup { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// Only returns a single AccountGroup entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one AccountGroup entity is found. +// Returns a *NotFoundError when no AccountGroup entities are found. +func (_q *AccountGroupQuery) Only(ctx context.Context) (*AccountGroup, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{accountgroup.Label} + default: + return nil, &NotSingularError{accountgroup.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *AccountGroupQuery) OnlyX(ctx context.Context) *AccountGroup { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// All executes the query and returns a list of AccountGroups. +func (_q *AccountGroupQuery) All(ctx context.Context) ([]*AccountGroup, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*AccountGroup, *AccountGroupQuery]() + return withInterceptors[[]*AccountGroup](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *AccountGroupQuery) AllX(ctx context.Context) []*AccountGroup { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// Count returns the count of the given query. +func (_q *AccountGroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*AccountGroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *AccountGroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *AccountGroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.First(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *AccountGroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AccountGroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *AccountGroupQuery) Clone() *AccountGroupQuery { + if _q == nil { + return nil + } + return &AccountGroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]accountgroup.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.AccountGroup{}, _q.predicates...), + withAccount: _q.withAccount.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAccount tells the query-builder to eager-load the nodes that are connected to +// the "account" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountGroupQuery) WithAccount(opts ...func(*AccountQuery)) *AccountGroupQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccount = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *AccountGroupQuery) WithGroup(opts ...func(*GroupQuery)) *AccountGroupQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// AccountID int64 `json:"account_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.AccountGroup.Query(). +// GroupBy(accountgroup.FieldAccountID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *AccountGroupQuery) GroupBy(field string, fields ...string) *AccountGroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &AccountGroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = accountgroup.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// AccountID int64 `json:"account_id,omitempty"` +// } +// +// client.AccountGroup.Query(). +// Select(accountgroup.FieldAccountID). +// Scan(ctx, &v) +func (_q *AccountGroupQuery) Select(fields ...string) *AccountGroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &AccountGroupSelect{AccountGroupQuery: _q} + sbuild.label = accountgroup.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AccountGroupSelect configured with the given aggregations. +func (_q *AccountGroupQuery) Aggregate(fns ...AggregateFunc) *AccountGroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *AccountGroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !accountgroup.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AccountGroup, error) { + var ( + nodes = []*AccountGroup{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withAccount != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*AccountGroup).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &AccountGroup{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAccount; query != nil { + if err := _q.loadAccount(ctx, query, nodes, nil, + func(n *AccountGroup, e *Account) { n.Edges.Account = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *AccountGroup, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *AccountGroupQuery) loadAccount(ctx context.Context, query *AccountQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Account)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AccountGroup) + for i := range nodes { + fk := nodes[i].AccountID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(account.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "account_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*AccountGroup) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Unique = false + _spec.Node.Columns = nil + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *AccountGroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(accountgroup.Table, accountgroup.Columns, nil) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + for i := range fields { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + if _q.withAccount != nil { + _spec.Node.AddColumnOnce(accountgroup.FieldAccountID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(accountgroup.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(accountgroup.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = accountgroup.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AccountGroupQuery) ForUpdate(opts ...sql.LockOption) *AccountGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AccountGroupQuery) ForShare(opts ...sql.LockOption) *AccountGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// AccountGroupGroupBy is the group-by builder for AccountGroup entities. +type AccountGroupGroupBy struct { + selector + build *AccountGroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *AccountGroupGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *AccountGroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountGroupQuery, *AccountGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *AccountGroupGroupBy) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AccountGroupSelect is the builder for selecting fields of AccountGroup entities. +type AccountGroupSelect struct { + *AccountGroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *AccountGroupSelect) Aggregate(fns ...AggregateFunc) *AccountGroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *AccountGroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AccountGroupQuery, *AccountGroupSelect](ctx, _s.AccountGroupQuery, _s, _s.inters, v) +} + +func (_s *AccountGroupSelect) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/accountgroup_update.go b/backend/ent/accountgroup_update.go new file mode 100644 index 00000000..fd7b5430 --- /dev/null +++ b/backend/ent/accountgroup_update.go @@ -0,0 +1,477 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// AccountGroupUpdate is the builder for updating AccountGroup entities. +type AccountGroupUpdate struct { + config + hooks []Hook + mutation *AccountGroupMutation +} + +// Where appends a list predicates to the AccountGroupUpdate builder. +func (_u *AccountGroupUpdate) Where(ps ...predicate.AccountGroup) *AccountGroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetAccountID sets the "account_id" field. +func (_u *AccountGroupUpdate) SetAccountID(v int64) *AccountGroupUpdate { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillableAccountID(v *int64) *AccountGroupUpdate { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *AccountGroupUpdate) SetGroupID(v int64) *AccountGroupUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillableGroupID(v *int64) *AccountGroupUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountGroupUpdate) SetPriority(v int) *AccountGroupUpdate { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountGroupUpdate) SetNillablePriority(v *int) *AccountGroupUpdate { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountGroupUpdate) AddPriority(v int) *AccountGroupUpdate { + _u.mutation.AddPriority(v) + return _u +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *AccountGroupUpdate) SetAccount(v *Account) *AccountGroupUpdate { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *AccountGroupUpdate) SetGroup(v *Group) *AccountGroupUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_u *AccountGroupUpdate) Mutation() *AccountGroupMutation { + return _u.mutation +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *AccountGroupUpdate) ClearAccount() *AccountGroupUpdate { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *AccountGroupUpdate) ClearGroup() *AccountGroupUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *AccountGroupUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountGroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *AccountGroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountGroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountGroupUpdate) check() error { + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`) + } + return nil +} + +func (_u *AccountGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(accountgroup.FieldPriority, field.TypeInt, value) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{accountgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// AccountGroupUpdateOne is the builder for updating a single AccountGroup entity. +type AccountGroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AccountGroupMutation +} + +// SetAccountID sets the "account_id" field. +func (_u *AccountGroupUpdateOne) SetAccountID(v int64) *AccountGroupUpdateOne { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillableAccountID(v *int64) *AccountGroupUpdateOne { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *AccountGroupUpdateOne) SetGroupID(v int64) *AccountGroupUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillableGroupID(v *int64) *AccountGroupUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetPriority sets the "priority" field. +func (_u *AccountGroupUpdateOne) SetPriority(v int) *AccountGroupUpdateOne { + _u.mutation.ResetPriority() + _u.mutation.SetPriority(v) + return _u +} + +// SetNillablePriority sets the "priority" field if the given value is not nil. +func (_u *AccountGroupUpdateOne) SetNillablePriority(v *int) *AccountGroupUpdateOne { + if v != nil { + _u.SetPriority(*v) + } + return _u +} + +// AddPriority adds value to the "priority" field. +func (_u *AccountGroupUpdateOne) AddPriority(v int) *AccountGroupUpdateOne { + _u.mutation.AddPriority(v) + return _u +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *AccountGroupUpdateOne) SetAccount(v *Account) *AccountGroupUpdateOne { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *AccountGroupUpdateOne) SetGroup(v *Group) *AccountGroupUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the AccountGroupMutation object of the builder. +func (_u *AccountGroupUpdateOne) Mutation() *AccountGroupMutation { + return _u.mutation +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *AccountGroupUpdateOne) ClearAccount() *AccountGroupUpdateOne { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *AccountGroupUpdateOne) ClearGroup() *AccountGroupUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the AccountGroupUpdate builder. +func (_u *AccountGroupUpdateOne) Where(ps ...predicate.AccountGroup) *AccountGroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *AccountGroupUpdateOne) Select(field string, fields ...string) *AccountGroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated AccountGroup entity. +func (_u *AccountGroupUpdateOne) Save(ctx context.Context) (*AccountGroup, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *AccountGroupUpdateOne) SaveX(ctx context.Context) *AccountGroup { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *AccountGroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *AccountGroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *AccountGroupUpdateOne) check() error { + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`) + } + return nil +} + +func (_u *AccountGroupUpdateOne) sqlSave(ctx context.Context) (_node *AccountGroup, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64)) + if id, ok := _u.mutation.AccountID(); !ok { + return nil, &ValidationError{Name: "account_id", err: errors.New(`ent: missing "AccountGroup.account_id" for update`)} + } else { + _spec.Node.CompositeID[0].Value = id + } + if id, ok := _u.mutation.GroupID(); !ok { + return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "AccountGroup.group_id" for update`)} + } else { + _spec.Node.CompositeID[1].Value = id + } + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, len(fields)) + for i, f := range fields { + if !accountgroup.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + _spec.Node.Columns[i] = f + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Priority(); ok { + _spec.SetField(accountgroup.FieldPriority, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPriority(); ok { + _spec.AddField(accountgroup.FieldPriority, field.TypeInt, value) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.AccountTable, + Columns: []string{accountgroup.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: accountgroup.GroupTable, + Columns: []string{accountgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &AccountGroup{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{accountgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/apikey.go b/backend/ent/apikey.go new file mode 100644 index 00000000..95586017 --- /dev/null +++ b/backend/ent/apikey.go @@ -0,0 +1,282 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// APIKey is the model entity for the APIKey schema. +type APIKey struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID *int64 `json:"group_id,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"] + IPWhitelist []string `json:"ip_whitelist,omitempty"` + // Blocked IPs/CIDRs + IPBlacklist []string `json:"ip_blacklist,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the APIKeyQuery when eager-loading is set. + Edges APIKeyEdges `json:"edges"` + selectValues sql.SelectValues +} + +// APIKeyEdges holds the relations/edges for other nodes in the graph. +type APIKeyEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // UsageLogs holds the value of the usage_logs edge. + UsageLogs []*UsageLog `json:"usage_logs,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e APIKeyEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e APIKeyEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// UsageLogsOrErr returns the UsageLogs value or an error if the edge +// was not loaded in eager-loading. +func (e APIKeyEdges) UsageLogsOrErr() ([]*UsageLog, error) { + if e.loadedTypes[2] { + return e.UsageLogs, nil + } + return nil, &NotLoadedError{edge: "usage_logs"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*APIKey) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist: + values[i] = new([]byte) + case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID: + values[i] = new(sql.NullInt64) + case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus: + values[i] = new(sql.NullString) + case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the APIKey fields. +func (_m *APIKey) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case apikey.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case apikey.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case apikey.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case apikey.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case apikey.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case apikey.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case apikey.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case apikey.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = new(int64) + *_m.GroupID = value.Int64 + } + case apikey.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case apikey.FieldIPWhitelist: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.IPWhitelist); err != nil { + return fmt.Errorf("unmarshal field ip_whitelist: %w", err) + } + } + case apikey.FieldIPBlacklist: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field ip_blacklist", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.IPBlacklist); err != nil { + return fmt.Errorf("unmarshal field ip_blacklist: %w", err) + } + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the APIKey. +// This includes values selected through modifiers, order, etc. +func (_m *APIKey) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the APIKey entity. +func (_m *APIKey) QueryUser() *UserQuery { + return NewAPIKeyClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the APIKey entity. +func (_m *APIKey) QueryGroup() *GroupQuery { + return NewAPIKeyClient(_m.config).QueryGroup(_m) +} + +// QueryUsageLogs queries the "usage_logs" edge of the APIKey entity. +func (_m *APIKey) QueryUsageLogs() *UsageLogQuery { + return NewAPIKeyClient(_m.config).QueryUsageLogs(_m) +} + +// Update returns a builder for updating this APIKey. +// Note that you need to call APIKey.Unwrap() before calling this method if this APIKey +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *APIKey) Update() *APIKeyUpdateOne { + return NewAPIKeyClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the APIKey entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *APIKey) Unwrap() *APIKey { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: APIKey is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *APIKey) String() string { + var builder strings.Builder + builder.WriteString("APIKey(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + if v := _m.GroupID; v != nil { + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("ip_whitelist=") + builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist)) + builder.WriteString(", ") + builder.WriteString("ip_blacklist=") + builder.WriteString(fmt.Sprintf("%v", _m.IPBlacklist)) + builder.WriteByte(')') + return builder.String() +} + +// APIKeys is a parsable slice of APIKey. +type APIKeys []*APIKey diff --git a/backend/ent/apikey/apikey.go b/backend/ent/apikey/apikey.go new file mode 100644 index 00000000..564cddb1 --- /dev/null +++ b/backend/ent/apikey/apikey.go @@ -0,0 +1,213 @@ +// Code generated by ent, DO NOT EDIT. + +package apikey + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the apikey type in the database. + Label = "api_key" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldIPWhitelist holds the string denoting the ip_whitelist field in the database. + FieldIPWhitelist = "ip_whitelist" + // FieldIPBlacklist holds the string denoting the ip_blacklist field in the database. + FieldIPBlacklist = "ip_blacklist" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. + EdgeUsageLogs = "usage_logs" + // Table holds the table name of the apikey in the database. + Table = "api_keys" + // UserTable is the table that holds the user relation/edge. + UserTable = "api_keys" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "api_keys" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" + // UsageLogsTable is the table that holds the usage_logs relation/edge. + UsageLogsTable = "usage_logs" + // UsageLogsInverseTable is the table name for the UsageLog entity. + // It exists in this package in order to avoid circular dependency with the "usagelog" package. + UsageLogsInverseTable = "usage_logs" + // UsageLogsColumn is the table column denoting the usage_logs relation/edge. + UsageLogsColumn = "api_key_id" +) + +// Columns holds all SQL columns for apikey fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldUserID, + FieldKey, + FieldName, + FieldGroupID, + FieldStatus, + FieldIPWhitelist, + FieldIPBlacklist, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error +) + +// OrderOption defines the ordering options for the APIKey queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUsageLogsCount orders the results by usage_logs count. +func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...) + } +} + +// ByUsageLogs orders the results by usage_logs terms. +func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newUsageLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) +} diff --git a/backend/ent/apikey/where.go b/backend/ent/apikey/where.go new file mode 100644 index 00000000..5152867f --- /dev/null +++ b/backend/ent/apikey/where.go @@ -0,0 +1,575 @@ +// Code generated by ent, DO NOT EDIT. + +package apikey + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldDeletedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldUserID, v)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldKey, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldName, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldGroupID, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldDeletedAt)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldUserID, vs...)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContainsFold(FieldKey, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContainsFold(FieldName, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDIsNil applies the IsNil predicate on the "group_id" field. +func GroupIDIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldGroupID)) +} + +// GroupIDNotNil applies the NotNil predicate on the "group_id" field. +func GroupIDNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldGroupID)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.APIKey { + return predicate.APIKey(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.APIKey { + return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v)) +} + +// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field. +func IPWhitelistIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist)) +} + +// IPWhitelistNotNil applies the NotNil predicate on the "ip_whitelist" field. +func IPWhitelistNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldIPWhitelist)) +} + +// IPBlacklistIsNil applies the IsNil predicate on the "ip_blacklist" field. +func IPBlacklistIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldIPBlacklist)) +} + +// IPBlacklistNotNil applies the NotNil predicate on the "ip_blacklist" field. +func IPBlacklistNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldIPBlacklist)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge. +func HasUsageLogs() predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates). +func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.APIKey { + return predicate.APIKey(func(s *sql.Selector) { + step := newUsageLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.APIKey) predicate.APIKey { + return predicate.APIKey(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.APIKey) predicate.APIKey { + return predicate.APIKey(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.APIKey) predicate.APIKey { + return predicate.APIKey(sql.NotPredicates(p)) +} diff --git a/backend/ent/apikey_create.go b/backend/ent/apikey_create.go new file mode 100644 index 00000000..d5363be5 --- /dev/null +++ b/backend/ent/apikey_create.go @@ -0,0 +1,1127 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// APIKeyCreate is the builder for creating a APIKey entity. +type APIKeyCreate struct { + config + mutation *APIKeyMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *APIKeyCreate) SetCreatedAt(v time.Time) *APIKeyCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *APIKeyCreate) SetNillableCreatedAt(v *time.Time) *APIKeyCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *APIKeyCreate) SetUpdatedAt(v time.Time) *APIKeyCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *APIKeyCreate) SetNillableUpdatedAt(v *time.Time) *APIKeyCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *APIKeyCreate) SetDeletedAt(v time.Time) *APIKeyCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *APIKeyCreate) SetNillableDeletedAt(v *time.Time) *APIKeyCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *APIKeyCreate) SetUserID(v int64) *APIKeyCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetKey sets the "key" field. +func (_c *APIKeyCreate) SetKey(v string) *APIKeyCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetName sets the "name" field. +func (_c *APIKeyCreate) SetName(v string) *APIKeyCreate { + _c.mutation.SetName(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *APIKeyCreate) SetGroupID(v int64) *APIKeyCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_c *APIKeyCreate) SetNillableGroupID(v *int64) *APIKeyCreate { + if v != nil { + _c.SetGroupID(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *APIKeyCreate) SetStatus(v string) *APIKeyCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *APIKeyCreate) SetNillableStatus(v *string) *APIKeyCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (_c *APIKeyCreate) SetIPWhitelist(v []string) *APIKeyCreate { + _c.mutation.SetIPWhitelist(v) + return _c +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_c *APIKeyCreate) SetIPBlacklist(v []string) *APIKeyCreate { + _c.mutation.SetIPBlacklist(v) + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *APIKeyCreate) SetGroup(v *Group) *APIKeyCreate { + return _c.SetGroupID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_c *APIKeyCreate) AddUsageLogIDs(ids ...int64) *APIKeyCreate { + _c.mutation.AddUsageLogIDs(ids...) + return _c +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_c *APIKeyCreate) AddUsageLogs(v ...*UsageLog) *APIKeyCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageLogIDs(ids...) +} + +// Mutation returns the APIKeyMutation object of the builder. +func (_c *APIKeyCreate) Mutation() *APIKeyMutation { + return _c.mutation +} + +// Save creates the APIKey in the database. +func (_c *APIKeyCreate) Save(ctx context.Context) (*APIKey, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *APIKeyCreate) SaveX(ctx context.Context) *APIKey { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *APIKeyCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *APIKeyCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *APIKeyCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if apikey.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := apikey.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if apikey.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := apikey.DefaultStatus + _c.mutation.SetStatus(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *APIKeyCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "APIKey.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "APIKey.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "APIKey.user_id"`)} + } + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "APIKey.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "APIKey.key": %w`, err)} + } + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "APIKey.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "APIKey.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "APIKey.status": %w`, err)} + } + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "APIKey.user"`)} + } + return nil +} + +func (_c *APIKeyCreate) sqlSave(ctx context.Context) (*APIKey, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) { + var ( + _node = &APIKey{config: _c.config} + _spec = sqlgraph.NewCreateSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(apikey.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + _node.IPWhitelist = value + } + if value, ok := _c.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + _node.IPBlacklist = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.APIKey.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.APIKeyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *APIKeyCreate) OnConflict(opts ...sql.ConflictOption) *APIKeyUpsertOne { + _c.conflict = opts + return &APIKeyUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *APIKeyCreate) OnConflictColumns(columns ...string) *APIKeyUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &APIKeyUpsertOne{ + create: _c, + } +} + +type ( + // APIKeyUpsertOne is the builder for "upsert"-ing + // one APIKey node. + APIKeyUpsertOne struct { + create *APIKeyCreate + } + + // APIKeyUpsert is the "OnConflict" setter. + APIKeyUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *APIKeyUpsert) SetUpdatedAt(v time.Time) *APIKeyUpsert { + u.Set(apikey.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateUpdatedAt() *APIKeyUpsert { + u.SetExcluded(apikey.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *APIKeyUpsert) SetDeletedAt(v time.Time) *APIKeyUpsert { + u.Set(apikey.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateDeletedAt() *APIKeyUpsert { + u.SetExcluded(apikey.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *APIKeyUpsert) ClearDeletedAt() *APIKeyUpsert { + u.SetNull(apikey.FieldDeletedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *APIKeyUpsert) SetUserID(v int64) *APIKeyUpsert { + u.Set(apikey.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateUserID() *APIKeyUpsert { + u.SetExcluded(apikey.FieldUserID) + return u +} + +// SetKey sets the "key" field. +func (u *APIKeyUpsert) SetKey(v string) *APIKeyUpsert { + u.Set(apikey.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateKey() *APIKeyUpsert { + u.SetExcluded(apikey.FieldKey) + return u +} + +// SetName sets the "name" field. +func (u *APIKeyUpsert) SetName(v string) *APIKeyUpsert { + u.Set(apikey.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateName() *APIKeyUpsert { + u.SetExcluded(apikey.FieldName) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *APIKeyUpsert) SetGroupID(v int64) *APIKeyUpsert { + u.Set(apikey.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateGroupID() *APIKeyUpsert { + u.SetExcluded(apikey.FieldGroupID) + return u +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *APIKeyUpsert) ClearGroupID() *APIKeyUpsert { + u.SetNull(apikey.FieldGroupID) + return u +} + +// SetStatus sets the "status" field. +func (u *APIKeyUpsert) SetStatus(v string) *APIKeyUpsert { + u.Set(apikey.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateStatus() *APIKeyUpsert { + u.SetExcluded(apikey.FieldStatus) + return u +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsert) SetIPWhitelist(v []string) *APIKeyUpsert { + u.Set(apikey.FieldIPWhitelist, v) + return u +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateIPWhitelist() *APIKeyUpsert { + u.SetExcluded(apikey.FieldIPWhitelist) + return u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsert) ClearIPWhitelist() *APIKeyUpsert { + u.SetNull(apikey.FieldIPWhitelist) + return u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsert) SetIPBlacklist(v []string) *APIKeyUpsert { + u.Set(apikey.FieldIPBlacklist, v) + return u +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateIPBlacklist() *APIKeyUpsert { + u.SetExcluded(apikey.FieldIPBlacklist) + return u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsert) ClearIPBlacklist() *APIKeyUpsert { + u.SetNull(apikey.FieldIPBlacklist) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *APIKeyUpsertOne) UpdateNewValues() *APIKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(apikey.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *APIKeyUpsertOne) Ignore() *APIKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *APIKeyUpsertOne) DoNothing() *APIKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the APIKeyCreate.OnConflict +// documentation for more info. +func (u *APIKeyUpsertOne) Update(set func(*APIKeyUpsert)) *APIKeyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&APIKeyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *APIKeyUpsertOne) SetUpdatedAt(v time.Time) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateUpdatedAt() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *APIKeyUpsertOne) SetDeletedAt(v time.Time) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateDeletedAt() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *APIKeyUpsertOne) ClearDeletedAt() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *APIKeyUpsertOne) SetUserID(v int64) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateUserID() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateUserID() + }) +} + +// SetKey sets the "key" field. +func (u *APIKeyUpsertOne) SetKey(v string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateKey() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *APIKeyUpsertOne) SetName(v string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateName() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateName() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *APIKeyUpsertOne) SetGroupID(v int64) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateGroupID() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *APIKeyUpsertOne) ClearGroupID() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearGroupID() + }) +} + +// SetStatus sets the "status" field. +func (u *APIKeyUpsertOne) SetStatus(v string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateStatus() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateStatus() + }) +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsertOne) SetIPWhitelist(v []string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPWhitelist(v) + }) +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateIPWhitelist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPWhitelist() + }) +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsertOne) ClearIPWhitelist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPWhitelist() + }) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsertOne) SetIPBlacklist(v []string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPBlacklist(v) + }) +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateIPBlacklist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPBlacklist() + }) +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsertOne) ClearIPBlacklist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPBlacklist() + }) +} + +// Exec executes the query. +func (u *APIKeyUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for APIKeyCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *APIKeyUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *APIKeyUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *APIKeyUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// APIKeyCreateBulk is the builder for creating many APIKey entities in bulk. +type APIKeyCreateBulk struct { + config + err error + builders []*APIKeyCreate + conflict []sql.ConflictOption +} + +// Save creates the APIKey entities in the database. +func (_c *APIKeyCreateBulk) Save(ctx context.Context) ([]*APIKey, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*APIKey, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*APIKeyMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *APIKeyCreateBulk) SaveX(ctx context.Context) []*APIKey { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *APIKeyCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *APIKeyCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.APIKey.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.APIKeyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *APIKeyCreateBulk) OnConflict(opts ...sql.ConflictOption) *APIKeyUpsertBulk { + _c.conflict = opts + return &APIKeyUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *APIKeyCreateBulk) OnConflictColumns(columns ...string) *APIKeyUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &APIKeyUpsertBulk{ + create: _c, + } +} + +// APIKeyUpsertBulk is the builder for "upsert"-ing +// a bulk of APIKey nodes. +type APIKeyUpsertBulk struct { + create *APIKeyCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *APIKeyUpsertBulk) UpdateNewValues() *APIKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(apikey.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.APIKey.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *APIKeyUpsertBulk) Ignore() *APIKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *APIKeyUpsertBulk) DoNothing() *APIKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the APIKeyCreateBulk.OnConflict +// documentation for more info. +func (u *APIKeyUpsertBulk) Update(set func(*APIKeyUpsert)) *APIKeyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&APIKeyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *APIKeyUpsertBulk) SetUpdatedAt(v time.Time) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateUpdatedAt() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *APIKeyUpsertBulk) SetDeletedAt(v time.Time) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateDeletedAt() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *APIKeyUpsertBulk) ClearDeletedAt() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *APIKeyUpsertBulk) SetUserID(v int64) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateUserID() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateUserID() + }) +} + +// SetKey sets the "key" field. +func (u *APIKeyUpsertBulk) SetKey(v string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateKey() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *APIKeyUpsertBulk) SetName(v string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateName() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateName() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *APIKeyUpsertBulk) SetGroupID(v int64) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateGroupID() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *APIKeyUpsertBulk) ClearGroupID() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearGroupID() + }) +} + +// SetStatus sets the "status" field. +func (u *APIKeyUpsertBulk) SetStatus(v string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateStatus() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateStatus() + }) +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsertBulk) SetIPWhitelist(v []string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPWhitelist(v) + }) +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateIPWhitelist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPWhitelist() + }) +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsertBulk) ClearIPWhitelist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPWhitelist() + }) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsertBulk) SetIPBlacklist(v []string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPBlacklist(v) + }) +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateIPBlacklist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPBlacklist() + }) +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsertBulk) ClearIPBlacklist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPBlacklist() + }) +} + +// Exec executes the query. +func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the APIKeyCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for APIKeyCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *APIKeyUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/apikey_delete.go b/backend/ent/apikey_delete.go new file mode 100644 index 00000000..761db81d --- /dev/null +++ b/backend/ent/apikey_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// APIKeyDelete is the builder for deleting a APIKey entity. +type APIKeyDelete struct { + config + hooks []Hook + mutation *APIKeyMutation +} + +// Where appends a list predicates to the APIKeyDelete builder. +func (_d *APIKeyDelete) Where(ps ...predicate.APIKey) *APIKeyDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *APIKeyDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *APIKeyDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *APIKeyDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// APIKeyDeleteOne is the builder for deleting a single APIKey entity. +type APIKeyDeleteOne struct { + _d *APIKeyDelete +} + +// Where appends a list predicates to the APIKeyDelete builder. +func (_d *APIKeyDeleteOne) Where(ps ...predicate.APIKey) *APIKeyDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *APIKeyDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{apikey.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *APIKeyDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/apikey_query.go b/backend/ent/apikey_query.go new file mode 100644 index 00000000..9eee4077 --- /dev/null +++ b/backend/ent/apikey_query.go @@ -0,0 +1,796 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// APIKeyQuery is the builder for querying APIKey entities. +type APIKeyQuery struct { + config + ctx *QueryContext + order []apikey.OrderOption + inters []Interceptor + predicates []predicate.APIKey + withUser *UserQuery + withGroup *GroupQuery + withUsageLogs *UsageLogQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the APIKeyQuery builder. +func (_q *APIKeyQuery) Where(ps ...predicate.APIKey) *APIKeyQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *APIKeyQuery) Limit(limit int) *APIKeyQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *APIKeyQuery) Offset(offset int) *APIKeyQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *APIKeyQuery) Unique(unique bool) *APIKeyQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *APIKeyQuery) Order(o ...apikey.OrderOption) *APIKeyQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *APIKeyQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *APIKeyQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsageLogs chains the current query on the "usage_logs" edge. +func (_q *APIKeyQuery) QueryUsageLogs() *UsageLogQuery { + query := (&UsageLogClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, selector), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, apikey.UsageLogsTable, apikey.UsageLogsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first APIKey entity from the query. +// Returns a *NotFoundError when no APIKey was found. +func (_q *APIKeyQuery) First(ctx context.Context) (*APIKey, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{apikey.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *APIKeyQuery) FirstX(ctx context.Context) *APIKey { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first APIKey ID from the query. +// Returns a *NotFoundError when no APIKey ID was found. +func (_q *APIKeyQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{apikey.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *APIKeyQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single APIKey entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one APIKey entity is found. +// Returns a *NotFoundError when no APIKey entities are found. +func (_q *APIKeyQuery) Only(ctx context.Context) (*APIKey, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{apikey.Label} + default: + return nil, &NotSingularError{apikey.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *APIKeyQuery) OnlyX(ctx context.Context) *APIKey { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only APIKey ID in the query. +// Returns a *NotSingularError when more than one APIKey ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *APIKeyQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{apikey.Label} + default: + err = &NotSingularError{apikey.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *APIKeyQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of APIKeys. +func (_q *APIKeyQuery) All(ctx context.Context) ([]*APIKey, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*APIKey, *APIKeyQuery]() + return withInterceptors[[]*APIKey](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *APIKeyQuery) AllX(ctx context.Context) []*APIKey { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of APIKey IDs. +func (_q *APIKeyQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(apikey.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *APIKeyQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *APIKeyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*APIKeyQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *APIKeyQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *APIKeyQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *APIKeyQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the APIKeyQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *APIKeyQuery) Clone() *APIKeyQuery { + if _q == nil { + return nil + } + return &APIKeyQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]apikey.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.APIKey{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + withUsageLogs: _q.withUsageLogs.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *APIKeyQuery) WithUser(opts ...func(*UserQuery)) *APIKeyQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *APIKeyQuery) WithGroup(opts ...func(*GroupQuery)) *APIKeyQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to +// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *APIKeyQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *APIKeyQuery { + query := (&UsageLogClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageLogs = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.APIKey.Query(). +// GroupBy(apikey.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *APIKeyQuery) GroupBy(field string, fields ...string) *APIKeyGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &APIKeyGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = apikey.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.APIKey.Query(). +// Select(apikey.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *APIKeyQuery) Select(fields ...string) *APIKeySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &APIKeySelect{APIKeyQuery: _q} + sbuild.label = apikey.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a APIKeySelect configured with the given aggregations. +func (_q *APIKeyQuery) Aggregate(fns ...AggregateFunc) *APIKeySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *APIKeyQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !apikey.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKey, error) { + var ( + nodes = []*APIKey{} + _spec = _q.querySpec() + loadedTypes = [3]bool{ + _q.withUser != nil, + _q.withGroup != nil, + _q.withUsageLogs != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*APIKey).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &APIKey{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *APIKey, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *APIKey, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := _q.withUsageLogs; query != nil { + if err := _q.loadUsageLogs(ctx, query, nodes, + func(n *APIKey) { n.Edges.UsageLogs = []*UsageLog{} }, + func(n *APIKey, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *APIKeyQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*APIKey) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *APIKeyQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*APIKey) + for i := range nodes { + if nodes[i].GroupID == nil { + continue + } + fk := *nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *APIKeyQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *UsageLog)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*APIKey) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usagelog.FieldAPIKeyID) + } + query.Where(predicate.UsageLog(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(apikey.UsageLogsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.APIKeyID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "api_key_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *APIKeyQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID) + for i := range fields { + if fields[i] != apikey.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(apikey.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(apikey.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(apikey.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = apikey.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *APIKeyQuery) ForUpdate(opts ...sql.LockOption) *APIKeyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *APIKeyQuery) ForShare(opts ...sql.LockOption) *APIKeyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// APIKeyGroupBy is the group-by builder for APIKey entities. +type APIKeyGroupBy struct { + selector + build *APIKeyQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *APIKeyGroupBy) Aggregate(fns ...AggregateFunc) *APIKeyGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *APIKeyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*APIKeyQuery, *APIKeyGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *APIKeyGroupBy) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// APIKeySelect is the builder for selecting fields of APIKey entities. +type APIKeySelect struct { + *APIKeyQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *APIKeySelect) Aggregate(fns ...AggregateFunc) *APIKeySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *APIKeySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*APIKeyQuery, *APIKeySelect](ctx, _s.APIKeyQuery, _s, _s.inters, v) +} + +func (_s *APIKeySelect) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/apikey_update.go b/backend/ent/apikey_update.go new file mode 100644 index 00000000..9ae332a8 --- /dev/null +++ b/backend/ent/apikey_update.go @@ -0,0 +1,940 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// APIKeyUpdate is the builder for updating APIKey entities. +type APIKeyUpdate struct { + config + hooks []Hook + mutation *APIKeyMutation +} + +// Where appends a list predicates to the APIKeyUpdate builder. +func (_u *APIKeyUpdate) Where(ps ...predicate.APIKey) *APIKeyUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *APIKeyUpdate) SetUpdatedAt(v time.Time) *APIKeyUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *APIKeyUpdate) SetDeletedAt(v time.Time) *APIKeyUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableDeletedAt(v *time.Time) *APIKeyUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *APIKeyUpdate) ClearDeletedAt() *APIKeyUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *APIKeyUpdate) SetUserID(v int64) *APIKeyUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableUserID(v *int64) *APIKeyUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetKey sets the "key" field. +func (_u *APIKeyUpdate) SetKey(v string) *APIKeyUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableKey(v *string) *APIKeyUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *APIKeyUpdate) SetName(v string) *APIKeyUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableName(v *string) *APIKeyUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *APIKeyUpdate) SetGroupID(v int64) *APIKeyUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableGroupID(v *int64) *APIKeyUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *APIKeyUpdate) ClearGroupID() *APIKeyUpdate { + _u.mutation.ClearGroupID() + return _u +} + +// SetStatus sets the "status" field. +func (_u *APIKeyUpdate) SetStatus(v string) *APIKeyUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *APIKeyUpdate) SetNillableStatus(v *string) *APIKeyUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate { + _u.mutation.SetIPWhitelist(v) + return _u +} + +// AppendIPWhitelist appends value to the "ip_whitelist" field. +func (_u *APIKeyUpdate) AppendIPWhitelist(v []string) *APIKeyUpdate { + _u.mutation.AppendIPWhitelist(v) + return _u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (_u *APIKeyUpdate) ClearIPWhitelist() *APIKeyUpdate { + _u.mutation.ClearIPWhitelist() + return _u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_u *APIKeyUpdate) SetIPBlacklist(v []string) *APIKeyUpdate { + _u.mutation.SetIPBlacklist(v) + return _u +} + +// AppendIPBlacklist appends value to the "ip_blacklist" field. +func (_u *APIKeyUpdate) AppendIPBlacklist(v []string) *APIKeyUpdate { + _u.mutation.AppendIPBlacklist(v) + return _u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (_u *APIKeyUpdate) ClearIPBlacklist() *APIKeyUpdate { + _u.mutation.ClearIPBlacklist() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *APIKeyUpdate) SetGroup(v *Group) *APIKeyUpdate { + return _u.SetGroupID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *APIKeyUpdate) AddUsageLogIDs(ids ...int64) *APIKeyUpdate { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *APIKeyUpdate) AddUsageLogs(v ...*UsageLog) *APIKeyUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the APIKeyMutation object of the builder. +func (_u *APIKeyUpdate) Mutation() *APIKeyMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *APIKeyUpdate) ClearUser() *APIKeyUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *APIKeyUpdate) ClearGroup() *APIKeyUpdate { + _u.mutation.ClearGroup() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *APIKeyUpdate) ClearUsageLogs() *APIKeyUpdate { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *APIKeyUpdate) RemoveUsageLogIDs(ids ...int64) *APIKeyUpdate { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *APIKeyUpdate) RemoveUsageLogs(v ...*UsageLog) *APIKeyUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *APIKeyUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *APIKeyUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *APIKeyUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *APIKeyUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *APIKeyUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if apikey.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *APIKeyUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "APIKey.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "APIKey.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "APIKey.user"`) + } + return nil +} + +func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(apikey.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPWhitelist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPWhitelist, value) + }) + } + if _u.mutation.IPWhitelistCleared() { + _spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON) + } + if value, ok := _u.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPBlacklist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPBlacklist, value) + }) + } + if _u.mutation.IPBlacklistCleared() { + _spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{apikey.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// APIKeyUpdateOne is the builder for updating a single APIKey entity. +type APIKeyUpdateOne struct { + config + fields []string + hooks []Hook + mutation *APIKeyMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *APIKeyUpdateOne) SetUpdatedAt(v time.Time) *APIKeyUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *APIKeyUpdateOne) SetDeletedAt(v time.Time) *APIKeyUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableDeletedAt(v *time.Time) *APIKeyUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *APIKeyUpdateOne) ClearDeletedAt() *APIKeyUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *APIKeyUpdateOne) SetUserID(v int64) *APIKeyUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableUserID(v *int64) *APIKeyUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetKey sets the "key" field. +func (_u *APIKeyUpdateOne) SetKey(v string) *APIKeyUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableKey(v *string) *APIKeyUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *APIKeyUpdateOne) SetName(v string) *APIKeyUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableName(v *string) *APIKeyUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *APIKeyUpdateOne) SetGroupID(v int64) *APIKeyUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableGroupID(v *int64) *APIKeyUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *APIKeyUpdateOne) ClearGroupID() *APIKeyUpdateOne { + _u.mutation.ClearGroupID() + return _u +} + +// SetStatus sets the "status" field. +func (_u *APIKeyUpdateOne) SetStatus(v string) *APIKeyUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *APIKeyUpdateOne) SetNillableStatus(v *string) *APIKeyUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne { + _u.mutation.SetIPWhitelist(v) + return _u +} + +// AppendIPWhitelist appends value to the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) AppendIPWhitelist(v []string) *APIKeyUpdateOne { + _u.mutation.AppendIPWhitelist(v) + return _u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) ClearIPWhitelist() *APIKeyUpdateOne { + _u.mutation.ClearIPWhitelist() + return _u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) SetIPBlacklist(v []string) *APIKeyUpdateOne { + _u.mutation.SetIPBlacklist(v) + return _u +} + +// AppendIPBlacklist appends value to the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) AppendIPBlacklist(v []string) *APIKeyUpdateOne { + _u.mutation.AppendIPBlacklist(v) + return _u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) ClearIPBlacklist() *APIKeyUpdateOne { + _u.mutation.ClearIPBlacklist() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *APIKeyUpdateOne) SetGroup(v *Group) *APIKeyUpdateOne { + return _u.SetGroupID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *APIKeyUpdateOne) AddUsageLogIDs(ids ...int64) *APIKeyUpdateOne { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *APIKeyUpdateOne) AddUsageLogs(v ...*UsageLog) *APIKeyUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the APIKeyMutation object of the builder. +func (_u *APIKeyUpdateOne) Mutation() *APIKeyMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *APIKeyUpdateOne) ClearUser() *APIKeyUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *APIKeyUpdateOne) ClearGroup() *APIKeyUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *APIKeyUpdateOne) ClearUsageLogs() *APIKeyUpdateOne { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *APIKeyUpdateOne) RemoveUsageLogIDs(ids ...int64) *APIKeyUpdateOne { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *APIKeyUpdateOne) RemoveUsageLogs(v ...*UsageLog) *APIKeyUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Where appends a list predicates to the APIKeyUpdate builder. +func (_u *APIKeyUpdateOne) Where(ps ...predicate.APIKey) *APIKeyUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *APIKeyUpdateOne) Select(field string, fields ...string) *APIKeyUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated APIKey entity. +func (_u *APIKeyUpdateOne) Save(ctx context.Context) (*APIKey, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *APIKeyUpdateOne) SaveX(ctx context.Context) *APIKey { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *APIKeyUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *APIKeyUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *APIKeyUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if apikey.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := apikey.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *APIKeyUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := apikey.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "APIKey.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := apikey.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := apikey.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "APIKey.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "APIKey.user"`) + } + return nil +} + +func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "APIKey.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID) + for _, f := range fields { + if !apikey.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != apikey.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(apikey.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(apikey.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(apikey.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(apikey.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPWhitelist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPWhitelist, value) + }) + } + if _u.mutation.IPWhitelistCleared() { + _spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON) + } + if value, ok := _u.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPBlacklist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPBlacklist, value) + }) + } + if _u.mutation.IPBlacklistCleared() { + _spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.UserTable, + Columns: []string{apikey.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: apikey.GroupTable, + Columns: []string{apikey.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: apikey.UsageLogsTable, + Columns: []string{apikey.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &APIKey{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{apikey.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/client.go b/backend/ent/client.go new file mode 100644 index 00000000..35cf644f --- /dev/null +++ b/backend/ent/client.go @@ -0,0 +1,3009 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/Wei-Shaw/sub2api/ent/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" + + stdsql "database/sql" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // APIKey is the client for interacting with the APIKey builders. + APIKey *APIKeyClient + // Account is the client for interacting with the Account builders. + Account *AccountClient + // AccountGroup is the client for interacting with the AccountGroup builders. + AccountGroup *AccountGroupClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // PromoCode is the client for interacting with the PromoCode builders. + PromoCode *PromoCodeClient + // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. + PromoCodeUsage *PromoCodeUsageClient + // Proxy is the client for interacting with the Proxy builders. + Proxy *ProxyClient + // RedeemCode is the client for interacting with the RedeemCode builders. + RedeemCode *RedeemCodeClient + // Setting is the client for interacting with the Setting builders. + Setting *SettingClient + // UsageLog is the client for interacting with the UsageLog builders. + UsageLog *UsageLogClient + // User is the client for interacting with the User builders. + User *UserClient + // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. + UserAllowedGroup *UserAllowedGroupClient + // UserAttributeDefinition is the client for interacting with the UserAttributeDefinition builders. + UserAttributeDefinition *UserAttributeDefinitionClient + // UserAttributeValue is the client for interacting with the UserAttributeValue builders. + UserAttributeValue *UserAttributeValueClient + // UserSubscription is the client for interacting with the UserSubscription builders. + UserSubscription *UserSubscriptionClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.APIKey = NewAPIKeyClient(c.config) + c.Account = NewAccountClient(c.config) + c.AccountGroup = NewAccountGroupClient(c.config) + c.Group = NewGroupClient(c.config) + c.PromoCode = NewPromoCodeClient(c.config) + c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) + c.Proxy = NewProxyClient(c.config) + c.RedeemCode = NewRedeemCodeClient(c.config) + c.Setting = NewSettingClient(c.config) + c.UsageLog = NewUsageLogClient(c.config) + c.User = NewUserClient(c.config) + c.UserAllowedGroup = NewUserAllowedGroupClient(c.config) + c.UserAttributeDefinition = NewUserAttributeDefinitionClient(c.config) + c.UserAttributeValue = NewUserAttributeValueClient(c.config) + c.UserSubscription = NewUserSubscriptionClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + APIKey: NewAPIKeyClient(cfg), + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + Group: NewGroupClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + APIKey: NewAPIKeyClient(cfg), + Account: NewAccountClient(cfg), + AccountGroup: NewAccountGroupClient(cfg), + Group: NewGroupClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), + Proxy: NewProxyClient(cfg), + RedeemCode: NewRedeemCodeClient(cfg), + Setting: NewSettingClient(cfg), + UsageLog: NewUsageLogClient(cfg), + User: NewUserClient(cfg), + UserAllowedGroup: NewUserAllowedGroupClient(cfg), + UserAttributeDefinition: NewUserAttributeDefinitionClient(cfg), + UserAttributeValue: NewUserAttributeValueClient(cfg), + UserSubscription: NewUserSubscriptionClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// APIKey. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *APIKeyMutation: + return c.APIKey.mutate(ctx, m) + case *AccountMutation: + return c.Account.mutate(ctx, m) + case *AccountGroupMutation: + return c.AccountGroup.mutate(ctx, m) + case *GroupMutation: + return c.Group.mutate(ctx, m) + case *PromoCodeMutation: + return c.PromoCode.mutate(ctx, m) + case *PromoCodeUsageMutation: + return c.PromoCodeUsage.mutate(ctx, m) + case *ProxyMutation: + return c.Proxy.mutate(ctx, m) + case *RedeemCodeMutation: + return c.RedeemCode.mutate(ctx, m) + case *SettingMutation: + return c.Setting.mutate(ctx, m) + case *UsageLogMutation: + return c.UsageLog.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + case *UserAllowedGroupMutation: + return c.UserAllowedGroup.mutate(ctx, m) + case *UserAttributeDefinitionMutation: + return c.UserAttributeDefinition.mutate(ctx, m) + case *UserAttributeValueMutation: + return c.UserAttributeValue.mutate(ctx, m) + case *UserSubscriptionMutation: + return c.UserSubscription.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// APIKeyClient is a client for the APIKey schema. +type APIKeyClient struct { + config +} + +// NewAPIKeyClient returns a client for the APIKey from the given config. +func NewAPIKeyClient(c config) *APIKeyClient { + return &APIKeyClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `apikey.Hooks(f(g(h())))`. +func (c *APIKeyClient) Use(hooks ...Hook) { + c.hooks.APIKey = append(c.hooks.APIKey, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `apikey.Intercept(f(g(h())))`. +func (c *APIKeyClient) Intercept(interceptors ...Interceptor) { + c.inters.APIKey = append(c.inters.APIKey, interceptors...) +} + +// Create returns a builder for creating a APIKey entity. +func (c *APIKeyClient) Create() *APIKeyCreate { + mutation := newAPIKeyMutation(c.config, OpCreate) + return &APIKeyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of APIKey entities. +func (c *APIKeyClient) CreateBulk(builders ...*APIKeyCreate) *APIKeyCreateBulk { + return &APIKeyCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *APIKeyClient) MapCreateBulk(slice any, setFunc func(*APIKeyCreate, int)) *APIKeyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &APIKeyCreateBulk{err: fmt.Errorf("calling to APIKeyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*APIKeyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &APIKeyCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for APIKey. +func (c *APIKeyClient) Update() *APIKeyUpdate { + mutation := newAPIKeyMutation(c.config, OpUpdate) + return &APIKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *APIKeyClient) UpdateOne(_m *APIKey) *APIKeyUpdateOne { + mutation := newAPIKeyMutation(c.config, OpUpdateOne, withAPIKey(_m)) + return &APIKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *APIKeyClient) UpdateOneID(id int64) *APIKeyUpdateOne { + mutation := newAPIKeyMutation(c.config, OpUpdateOne, withAPIKeyID(id)) + return &APIKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for APIKey. +func (c *APIKeyClient) Delete() *APIKeyDelete { + mutation := newAPIKeyMutation(c.config, OpDelete) + return &APIKeyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *APIKeyClient) DeleteOne(_m *APIKey) *APIKeyDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *APIKeyClient) DeleteOneID(id int64) *APIKeyDeleteOne { + builder := c.Delete().Where(apikey.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &APIKeyDeleteOne{builder} +} + +// Query returns a query builder for APIKey. +func (c *APIKeyClient) Query() *APIKeyQuery { + return &APIKeyQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAPIKey}, + inters: c.Interceptors(), + } +} + +// Get returns a APIKey entity by its id. +func (c *APIKeyClient) Get(ctx context.Context, id int64) (*APIKey, error) { + return c.Query().Where(apikey.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *APIKeyClient) GetX(ctx context.Context, id int64) *APIKey { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a APIKey. +func (c *APIKeyClient) QueryUser(_m *APIKey) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a APIKey. +func (c *APIKeyClient) QueryGroup(_m *APIKey) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsageLogs queries the usage_logs edge of a APIKey. +func (c *APIKeyClient) QueryUsageLogs(_m *APIKey) *UsageLogQuery { + query := (&UsageLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(apikey.Table, apikey.FieldID, id), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, apikey.UsageLogsTable, apikey.UsageLogsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *APIKeyClient) Hooks() []Hook { + hooks := c.hooks.APIKey + return append(hooks[:len(hooks):len(hooks)], apikey.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *APIKeyClient) Interceptors() []Interceptor { + inters := c.inters.APIKey + return append(inters[:len(inters):len(inters)], apikey.Interceptors[:]...) +} + +func (c *APIKeyClient) mutate(ctx context.Context, m *APIKeyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&APIKeyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&APIKeyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&APIKeyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&APIKeyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown APIKey mutation op: %q", m.Op()) + } +} + +// AccountClient is a client for the Account schema. +type AccountClient struct { + config +} + +// NewAccountClient returns a client for the Account from the given config. +func NewAccountClient(c config) *AccountClient { + return &AccountClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `account.Hooks(f(g(h())))`. +func (c *AccountClient) Use(hooks ...Hook) { + c.hooks.Account = append(c.hooks.Account, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `account.Intercept(f(g(h())))`. +func (c *AccountClient) Intercept(interceptors ...Interceptor) { + c.inters.Account = append(c.inters.Account, interceptors...) +} + +// Create returns a builder for creating a Account entity. +func (c *AccountClient) Create() *AccountCreate { + mutation := newAccountMutation(c.config, OpCreate) + return &AccountCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Account entities. +func (c *AccountClient) CreateBulk(builders ...*AccountCreate) *AccountCreateBulk { + return &AccountCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AccountClient) MapCreateBulk(slice any, setFunc func(*AccountCreate, int)) *AccountCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AccountCreateBulk{err: fmt.Errorf("calling to AccountClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AccountCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AccountCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Account. +func (c *AccountClient) Update() *AccountUpdate { + mutation := newAccountMutation(c.config, OpUpdate) + return &AccountUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AccountClient) UpdateOne(_m *Account) *AccountUpdateOne { + mutation := newAccountMutation(c.config, OpUpdateOne, withAccount(_m)) + return &AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AccountClient) UpdateOneID(id int64) *AccountUpdateOne { + mutation := newAccountMutation(c.config, OpUpdateOne, withAccountID(id)) + return &AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Account. +func (c *AccountClient) Delete() *AccountDelete { + mutation := newAccountMutation(c.config, OpDelete) + return &AccountDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AccountClient) DeleteOne(_m *Account) *AccountDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AccountClient) DeleteOneID(id int64) *AccountDeleteOne { + builder := c.Delete().Where(account.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AccountDeleteOne{builder} +} + +// Query returns a query builder for Account. +func (c *AccountClient) Query() *AccountQuery { + return &AccountQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAccount}, + inters: c.Interceptors(), + } +} + +// Get returns a Account entity by its id. +func (c *AccountClient) Get(ctx context.Context, id int64) (*Account, error) { + return c.Query().Where(account.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AccountClient) GetX(ctx context.Context, id int64) *Account { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryGroups queries the groups edge of a Account. +func (c *AccountClient) QueryGroups(_m *Account) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryProxy queries the proxy edge of a Account. +func (c *AccountClient) QueryProxy(_m *Account) *ProxyQuery { + query := (&ProxyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(proxy.Table, proxy.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, account.ProxyTable, account.ProxyColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsageLogs queries the usage_logs edge of a Account. +func (c *AccountClient) QueryUsageLogs(_m *Account) *UsageLogQuery { + query := (&UsageLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, account.UsageLogsTable, account.UsageLogsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccountGroups queries the account_groups edge of a Account. +func (c *AccountClient) QueryAccountGroups(_m *Account) *AccountGroupQuery { + query := (&AccountGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(account.Table, account.FieldID, id), + sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn), + sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AccountClient) Hooks() []Hook { + hooks := c.hooks.Account + return append(hooks[:len(hooks):len(hooks)], account.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *AccountClient) Interceptors() []Interceptor { + inters := c.inters.Account + return append(inters[:len(inters):len(inters)], account.Interceptors[:]...) +} + +func (c *AccountClient) mutate(ctx context.Context, m *AccountMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AccountCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AccountUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AccountUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AccountDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Account mutation op: %q", m.Op()) + } +} + +// AccountGroupClient is a client for the AccountGroup schema. +type AccountGroupClient struct { + config +} + +// NewAccountGroupClient returns a client for the AccountGroup from the given config. +func NewAccountGroupClient(c config) *AccountGroupClient { + return &AccountGroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `accountgroup.Hooks(f(g(h())))`. +func (c *AccountGroupClient) Use(hooks ...Hook) { + c.hooks.AccountGroup = append(c.hooks.AccountGroup, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `accountgroup.Intercept(f(g(h())))`. +func (c *AccountGroupClient) Intercept(interceptors ...Interceptor) { + c.inters.AccountGroup = append(c.inters.AccountGroup, interceptors...) +} + +// Create returns a builder for creating a AccountGroup entity. +func (c *AccountGroupClient) Create() *AccountGroupCreate { + mutation := newAccountGroupMutation(c.config, OpCreate) + return &AccountGroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of AccountGroup entities. +func (c *AccountGroupClient) CreateBulk(builders ...*AccountGroupCreate) *AccountGroupCreateBulk { + return &AccountGroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AccountGroupClient) MapCreateBulk(slice any, setFunc func(*AccountGroupCreate, int)) *AccountGroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AccountGroupCreateBulk{err: fmt.Errorf("calling to AccountGroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AccountGroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AccountGroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for AccountGroup. +func (c *AccountGroupClient) Update() *AccountGroupUpdate { + mutation := newAccountGroupMutation(c.config, OpUpdate) + return &AccountGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AccountGroupClient) UpdateOne(_m *AccountGroup) *AccountGroupUpdateOne { + mutation := newAccountGroupMutation(c.config, OpUpdateOne) + mutation.account = &_m.AccountID + mutation.group = &_m.GroupID + return &AccountGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for AccountGroup. +func (c *AccountGroupClient) Delete() *AccountGroupDelete { + mutation := newAccountGroupMutation(c.config, OpDelete) + return &AccountGroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Query returns a query builder for AccountGroup. +func (c *AccountGroupClient) Query() *AccountGroupQuery { + return &AccountGroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAccountGroup}, + inters: c.Interceptors(), + } +} + +// QueryAccount queries the account edge of a AccountGroup. +func (c *AccountGroupClient) QueryAccount(_m *AccountGroup) *AccountQuery { + return c.Query(). + Where(accountgroup.AccountID(_m.AccountID), accountgroup.GroupID(_m.GroupID)). + QueryAccount() +} + +// QueryGroup queries the group edge of a AccountGroup. +func (c *AccountGroupClient) QueryGroup(_m *AccountGroup) *GroupQuery { + return c.Query(). + Where(accountgroup.AccountID(_m.AccountID), accountgroup.GroupID(_m.GroupID)). + QueryGroup() +} + +// Hooks returns the client hooks. +func (c *AccountGroupClient) Hooks() []Hook { + return c.hooks.AccountGroup +} + +// Interceptors returns the client interceptors. +func (c *AccountGroupClient) Interceptors() []Interceptor { + return c.inters.AccountGroup +} + +func (c *AccountGroupClient) mutate(ctx context.Context, m *AccountGroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AccountGroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AccountGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AccountGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AccountGroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown AccountGroup mutation op: %q", m.Op()) + } +} + +// GroupClient is a client for the Group schema. +type GroupClient struct { + config +} + +// NewGroupClient returns a client for the Group from the given config. +func NewGroupClient(c config) *GroupClient { + return &GroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `group.Hooks(f(g(h())))`. +func (c *GroupClient) Use(hooks ...Hook) { + c.hooks.Group = append(c.hooks.Group, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `group.Intercept(f(g(h())))`. +func (c *GroupClient) Intercept(interceptors ...Interceptor) { + c.inters.Group = append(c.inters.Group, interceptors...) +} + +// Create returns a builder for creating a Group entity. +func (c *GroupClient) Create() *GroupCreate { + mutation := newGroupMutation(c.config, OpCreate) + return &GroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Group entities. +func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk { + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*GroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &GroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Group. +func (c *GroupClient) Update() *GroupUpdate { + mutation := newGroupMutation(c.config, OpUpdate) + return &GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *GroupClient) UpdateOne(_m *Group) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroup(_m)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *GroupClient) UpdateOneID(id int64) *GroupUpdateOne { + mutation := newGroupMutation(c.config, OpUpdateOne, withGroupID(id)) + return &GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Group. +func (c *GroupClient) Delete() *GroupDelete { + mutation := newGroupMutation(c.config, OpDelete) + return &GroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *GroupClient) DeleteOne(_m *Group) *GroupDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *GroupClient) DeleteOneID(id int64) *GroupDeleteOne { + builder := c.Delete().Where(group.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &GroupDeleteOne{builder} +} + +// Query returns a query builder for Group. +func (c *GroupClient) Query() *GroupQuery { + return &GroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeGroup}, + inters: c.Interceptors(), + } +} + +// Get returns a Group entity by its id. +func (c *GroupClient) Get(ctx context.Context, id int64) (*Group, error) { + return c.Query().Where(group.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *GroupClient) GetX(ctx context.Context, id int64) *Group { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAPIKeys queries the api_keys edge of a Group. +func (c *GroupClient) QueryAPIKeys(_m *Group) *APIKeyQuery { + query := (&APIKeyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.APIKeysTable, group.APIKeysColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryRedeemCodes queries the redeem_codes edge of a Group. +func (c *GroupClient) QueryRedeemCodes(_m *Group) *RedeemCodeQuery { + query := (&RedeemCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.RedeemCodesTable, group.RedeemCodesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySubscriptions queries the subscriptions edge of a Group. +func (c *GroupClient) QuerySubscriptions(_m *Group) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.SubscriptionsTable, group.SubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsageLogs queries the usage_logs edge of a Group. +func (c *GroupClient) QueryUsageLogs(_m *Group) *UsageLogQuery { + query := (&UsageLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.UsageLogsTable, group.UsageLogsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccounts queries the accounts edge of a Group. +func (c *GroupClient) QueryAccounts(_m *Group) *AccountQuery { + query := (&AccountClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AccountsTable, group.AccountsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAllowedUsers queries the allowed_users edge of a Group. +func (c *GroupClient) QueryAllowedUsers(_m *Group) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AllowedUsersTable, group.AllowedUsersPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccountGroups queries the account_groups edge of a Group. +func (c *GroupClient) QueryAccountGroups(_m *Group) *AccountGroupQuery { + query := (&AccountGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(accountgroup.Table, accountgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.AccountGroupsTable, group.AccountGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUserAllowedGroups queries the user_allowed_groups edge of a Group. +func (c *GroupClient) QueryUserAllowedGroups(_m *Group) *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, id), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.UserAllowedGroupsTable, group.UserAllowedGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *GroupClient) Hooks() []Hook { + hooks := c.hooks.Group + return append(hooks[:len(hooks):len(hooks)], group.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *GroupClient) Interceptors() []Interceptor { + inters := c.inters.Group + return append(inters[:len(inters):len(inters)], group.Interceptors[:]...) +} + +func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&GroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&GroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&GroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&GroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Group mutation op: %q", m.Op()) + } +} + +// PromoCodeClient is a client for the PromoCode schema. +type PromoCodeClient struct { + config +} + +// NewPromoCodeClient returns a client for the PromoCode from the given config. +func NewPromoCodeClient(c config) *PromoCodeClient { + return &PromoCodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `promocode.Hooks(f(g(h())))`. +func (c *PromoCodeClient) Use(hooks ...Hook) { + c.hooks.PromoCode = append(c.hooks.PromoCode, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `promocode.Intercept(f(g(h())))`. +func (c *PromoCodeClient) Intercept(interceptors ...Interceptor) { + c.inters.PromoCode = append(c.inters.PromoCode, interceptors...) +} + +// Create returns a builder for creating a PromoCode entity. +func (c *PromoCodeClient) Create() *PromoCodeCreate { + mutation := newPromoCodeMutation(c.config, OpCreate) + return &PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PromoCode entities. +func (c *PromoCodeClient) CreateBulk(builders ...*PromoCodeCreate) *PromoCodeCreateBulk { + return &PromoCodeCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PromoCodeClient) MapCreateBulk(slice any, setFunc func(*PromoCodeCreate, int)) *PromoCodeCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PromoCodeCreateBulk{err: fmt.Errorf("calling to PromoCodeClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PromoCodeCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PromoCodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PromoCode. +func (c *PromoCodeClient) Update() *PromoCodeUpdate { + mutation := newPromoCodeMutation(c.config, OpUpdate) + return &PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PromoCodeClient) UpdateOne(_m *PromoCode) *PromoCodeUpdateOne { + mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCode(_m)) + return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PromoCodeClient) UpdateOneID(id int64) *PromoCodeUpdateOne { + mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCodeID(id)) + return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PromoCode. +func (c *PromoCodeClient) Delete() *PromoCodeDelete { + mutation := newPromoCodeMutation(c.config, OpDelete) + return &PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PromoCodeClient) DeleteOne(_m *PromoCode) *PromoCodeDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PromoCodeClient) DeleteOneID(id int64) *PromoCodeDeleteOne { + builder := c.Delete().Where(promocode.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PromoCodeDeleteOne{builder} +} + +// Query returns a query builder for PromoCode. +func (c *PromoCodeClient) Query() *PromoCodeQuery { + return &PromoCodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePromoCode}, + inters: c.Interceptors(), + } +} + +// Get returns a PromoCode entity by its id. +func (c *PromoCodeClient) Get(ctx context.Context, id int64) (*PromoCode, error) { + return c.Query().Where(promocode.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PromoCodeClient) GetX(ctx context.Context, id int64) *PromoCode { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUsageRecords queries the usage_records edge of a PromoCode. +func (c *PromoCodeClient) QueryUsageRecords(_m *PromoCode) *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocode.Table, promocode.FieldID, id), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PromoCodeClient) Hooks() []Hook { + return c.hooks.PromoCode +} + +// Interceptors returns the client interceptors. +func (c *PromoCodeClient) Interceptors() []Interceptor { + return c.inters.PromoCode +} + +func (c *PromoCodeClient) mutate(ctx context.Context, m *PromoCodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PromoCode mutation op: %q", m.Op()) + } +} + +// PromoCodeUsageClient is a client for the PromoCodeUsage schema. +type PromoCodeUsageClient struct { + config +} + +// NewPromoCodeUsageClient returns a client for the PromoCodeUsage from the given config. +func NewPromoCodeUsageClient(c config) *PromoCodeUsageClient { + return &PromoCodeUsageClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `promocodeusage.Hooks(f(g(h())))`. +func (c *PromoCodeUsageClient) Use(hooks ...Hook) { + c.hooks.PromoCodeUsage = append(c.hooks.PromoCodeUsage, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `promocodeusage.Intercept(f(g(h())))`. +func (c *PromoCodeUsageClient) Intercept(interceptors ...Interceptor) { + c.inters.PromoCodeUsage = append(c.inters.PromoCodeUsage, interceptors...) +} + +// Create returns a builder for creating a PromoCodeUsage entity. +func (c *PromoCodeUsageClient) Create() *PromoCodeUsageCreate { + mutation := newPromoCodeUsageMutation(c.config, OpCreate) + return &PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PromoCodeUsage entities. +func (c *PromoCodeUsageClient) CreateBulk(builders ...*PromoCodeUsageCreate) *PromoCodeUsageCreateBulk { + return &PromoCodeUsageCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PromoCodeUsageClient) MapCreateBulk(slice any, setFunc func(*PromoCodeUsageCreate, int)) *PromoCodeUsageCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PromoCodeUsageCreateBulk{err: fmt.Errorf("calling to PromoCodeUsageClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PromoCodeUsageCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PromoCodeUsageCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Update() *PromoCodeUsageUpdate { + mutation := newPromoCodeUsageMutation(c.config, OpUpdate) + return &PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PromoCodeUsageClient) UpdateOne(_m *PromoCodeUsage) *PromoCodeUsageUpdateOne { + mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsage(_m)) + return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PromoCodeUsageClient) UpdateOneID(id int64) *PromoCodeUsageUpdateOne { + mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsageID(id)) + return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Delete() *PromoCodeUsageDelete { + mutation := newPromoCodeUsageMutation(c.config, OpDelete) + return &PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PromoCodeUsageClient) DeleteOne(_m *PromoCodeUsage) *PromoCodeUsageDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PromoCodeUsageClient) DeleteOneID(id int64) *PromoCodeUsageDeleteOne { + builder := c.Delete().Where(promocodeusage.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PromoCodeUsageDeleteOne{builder} +} + +// Query returns a query builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Query() *PromoCodeUsageQuery { + return &PromoCodeUsageQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePromoCodeUsage}, + inters: c.Interceptors(), + } +} + +// Get returns a PromoCodeUsage entity by its id. +func (c *PromoCodeUsageClient) Get(ctx context.Context, id int64) (*PromoCodeUsage, error) { + return c.Query().Where(promocodeusage.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PromoCodeUsageClient) GetX(ctx context.Context, id int64) *PromoCodeUsage { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPromoCode queries the promo_code edge of a PromoCodeUsage. +func (c *PromoCodeUsageClient) QueryPromoCode(_m *PromoCodeUsage) *PromoCodeQuery { + query := (&PromoCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id), + sqlgraph.To(promocode.Table, promocode.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUser queries the user edge of a PromoCodeUsage. +func (c *PromoCodeUsageClient) QueryUser(_m *PromoCodeUsage) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PromoCodeUsageClient) Hooks() []Hook { + return c.hooks.PromoCodeUsage +} + +// Interceptors returns the client interceptors. +func (c *PromoCodeUsageClient) Interceptors() []Interceptor { + return c.inters.PromoCodeUsage +} + +func (c *PromoCodeUsageClient) mutate(ctx context.Context, m *PromoCodeUsageMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PromoCodeUsage mutation op: %q", m.Op()) + } +} + +// ProxyClient is a client for the Proxy schema. +type ProxyClient struct { + config +} + +// NewProxyClient returns a client for the Proxy from the given config. +func NewProxyClient(c config) *ProxyClient { + return &ProxyClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `proxy.Hooks(f(g(h())))`. +func (c *ProxyClient) Use(hooks ...Hook) { + c.hooks.Proxy = append(c.hooks.Proxy, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `proxy.Intercept(f(g(h())))`. +func (c *ProxyClient) Intercept(interceptors ...Interceptor) { + c.inters.Proxy = append(c.inters.Proxy, interceptors...) +} + +// Create returns a builder for creating a Proxy entity. +func (c *ProxyClient) Create() *ProxyCreate { + mutation := newProxyMutation(c.config, OpCreate) + return &ProxyCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Proxy entities. +func (c *ProxyClient) CreateBulk(builders ...*ProxyCreate) *ProxyCreateBulk { + return &ProxyCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ProxyClient) MapCreateBulk(slice any, setFunc func(*ProxyCreate, int)) *ProxyCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ProxyCreateBulk{err: fmt.Errorf("calling to ProxyClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ProxyCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ProxyCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Proxy. +func (c *ProxyClient) Update() *ProxyUpdate { + mutation := newProxyMutation(c.config, OpUpdate) + return &ProxyUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ProxyClient) UpdateOne(_m *Proxy) *ProxyUpdateOne { + mutation := newProxyMutation(c.config, OpUpdateOne, withProxy(_m)) + return &ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ProxyClient) UpdateOneID(id int64) *ProxyUpdateOne { + mutation := newProxyMutation(c.config, OpUpdateOne, withProxyID(id)) + return &ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Proxy. +func (c *ProxyClient) Delete() *ProxyDelete { + mutation := newProxyMutation(c.config, OpDelete) + return &ProxyDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ProxyClient) DeleteOne(_m *Proxy) *ProxyDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ProxyClient) DeleteOneID(id int64) *ProxyDeleteOne { + builder := c.Delete().Where(proxy.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ProxyDeleteOne{builder} +} + +// Query returns a query builder for Proxy. +func (c *ProxyClient) Query() *ProxyQuery { + return &ProxyQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeProxy}, + inters: c.Interceptors(), + } +} + +// Get returns a Proxy entity by its id. +func (c *ProxyClient) Get(ctx context.Context, id int64) (*Proxy, error) { + return c.Query().Where(proxy.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ProxyClient) GetX(ctx context.Context, id int64) *Proxy { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAccounts queries the accounts edge of a Proxy. +func (c *ProxyClient) QueryAccounts(_m *Proxy) *AccountQuery { + query := (&AccountClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(proxy.Table, proxy.FieldID, id), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, proxy.AccountsTable, proxy.AccountsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ProxyClient) Hooks() []Hook { + hooks := c.hooks.Proxy + return append(hooks[:len(hooks):len(hooks)], proxy.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *ProxyClient) Interceptors() []Interceptor { + inters := c.inters.Proxy + return append(inters[:len(inters):len(inters)], proxy.Interceptors[:]...) +} + +func (c *ProxyClient) mutate(ctx context.Context, m *ProxyMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ProxyCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ProxyUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ProxyUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ProxyDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Proxy mutation op: %q", m.Op()) + } +} + +// RedeemCodeClient is a client for the RedeemCode schema. +type RedeemCodeClient struct { + config +} + +// NewRedeemCodeClient returns a client for the RedeemCode from the given config. +func NewRedeemCodeClient(c config) *RedeemCodeClient { + return &RedeemCodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `redeemcode.Hooks(f(g(h())))`. +func (c *RedeemCodeClient) Use(hooks ...Hook) { + c.hooks.RedeemCode = append(c.hooks.RedeemCode, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `redeemcode.Intercept(f(g(h())))`. +func (c *RedeemCodeClient) Intercept(interceptors ...Interceptor) { + c.inters.RedeemCode = append(c.inters.RedeemCode, interceptors...) +} + +// Create returns a builder for creating a RedeemCode entity. +func (c *RedeemCodeClient) Create() *RedeemCodeCreate { + mutation := newRedeemCodeMutation(c.config, OpCreate) + return &RedeemCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of RedeemCode entities. +func (c *RedeemCodeClient) CreateBulk(builders ...*RedeemCodeCreate) *RedeemCodeCreateBulk { + return &RedeemCodeCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *RedeemCodeClient) MapCreateBulk(slice any, setFunc func(*RedeemCodeCreate, int)) *RedeemCodeCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &RedeemCodeCreateBulk{err: fmt.Errorf("calling to RedeemCodeClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*RedeemCodeCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &RedeemCodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for RedeemCode. +func (c *RedeemCodeClient) Update() *RedeemCodeUpdate { + mutation := newRedeemCodeMutation(c.config, OpUpdate) + return &RedeemCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *RedeemCodeClient) UpdateOne(_m *RedeemCode) *RedeemCodeUpdateOne { + mutation := newRedeemCodeMutation(c.config, OpUpdateOne, withRedeemCode(_m)) + return &RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *RedeemCodeClient) UpdateOneID(id int64) *RedeemCodeUpdateOne { + mutation := newRedeemCodeMutation(c.config, OpUpdateOne, withRedeemCodeID(id)) + return &RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for RedeemCode. +func (c *RedeemCodeClient) Delete() *RedeemCodeDelete { + mutation := newRedeemCodeMutation(c.config, OpDelete) + return &RedeemCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *RedeemCodeClient) DeleteOne(_m *RedeemCode) *RedeemCodeDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *RedeemCodeClient) DeleteOneID(id int64) *RedeemCodeDeleteOne { + builder := c.Delete().Where(redeemcode.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &RedeemCodeDeleteOne{builder} +} + +// Query returns a query builder for RedeemCode. +func (c *RedeemCodeClient) Query() *RedeemCodeQuery { + return &RedeemCodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeRedeemCode}, + inters: c.Interceptors(), + } +} + +// Get returns a RedeemCode entity by its id. +func (c *RedeemCodeClient) Get(ctx context.Context, id int64) (*RedeemCode, error) { + return c.Query().Where(redeemcode.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *RedeemCodeClient) GetX(ctx context.Context, id int64) *RedeemCode { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a RedeemCode. +func (c *RedeemCodeClient) QueryUser(_m *RedeemCode) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.UserTable, redeemcode.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a RedeemCode. +func (c *RedeemCodeClient) QueryGroup(_m *RedeemCode) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.GroupTable, redeemcode.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *RedeemCodeClient) Hooks() []Hook { + return c.hooks.RedeemCode +} + +// Interceptors returns the client interceptors. +func (c *RedeemCodeClient) Interceptors() []Interceptor { + return c.inters.RedeemCode +} + +func (c *RedeemCodeClient) mutate(ctx context.Context, m *RedeemCodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&RedeemCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&RedeemCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&RedeemCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&RedeemCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown RedeemCode mutation op: %q", m.Op()) + } +} + +// SettingClient is a client for the Setting schema. +type SettingClient struct { + config +} + +// NewSettingClient returns a client for the Setting from the given config. +func NewSettingClient(c config) *SettingClient { + return &SettingClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `setting.Hooks(f(g(h())))`. +func (c *SettingClient) Use(hooks ...Hook) { + c.hooks.Setting = append(c.hooks.Setting, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `setting.Intercept(f(g(h())))`. +func (c *SettingClient) Intercept(interceptors ...Interceptor) { + c.inters.Setting = append(c.inters.Setting, interceptors...) +} + +// Create returns a builder for creating a Setting entity. +func (c *SettingClient) Create() *SettingCreate { + mutation := newSettingMutation(c.config, OpCreate) + return &SettingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Setting entities. +func (c *SettingClient) CreateBulk(builders ...*SettingCreate) *SettingCreateBulk { + return &SettingCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *SettingClient) MapCreateBulk(slice any, setFunc func(*SettingCreate, int)) *SettingCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &SettingCreateBulk{err: fmt.Errorf("calling to SettingClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*SettingCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &SettingCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Setting. +func (c *SettingClient) Update() *SettingUpdate { + mutation := newSettingMutation(c.config, OpUpdate) + return &SettingUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *SettingClient) UpdateOne(_m *Setting) *SettingUpdateOne { + mutation := newSettingMutation(c.config, OpUpdateOne, withSetting(_m)) + return &SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *SettingClient) UpdateOneID(id int64) *SettingUpdateOne { + mutation := newSettingMutation(c.config, OpUpdateOne, withSettingID(id)) + return &SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Setting. +func (c *SettingClient) Delete() *SettingDelete { + mutation := newSettingMutation(c.config, OpDelete) + return &SettingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *SettingClient) DeleteOne(_m *Setting) *SettingDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *SettingClient) DeleteOneID(id int64) *SettingDeleteOne { + builder := c.Delete().Where(setting.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &SettingDeleteOne{builder} +} + +// Query returns a query builder for Setting. +func (c *SettingClient) Query() *SettingQuery { + return &SettingQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeSetting}, + inters: c.Interceptors(), + } +} + +// Get returns a Setting entity by its id. +func (c *SettingClient) Get(ctx context.Context, id int64) (*Setting, error) { + return c.Query().Where(setting.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *SettingClient) GetX(ctx context.Context, id int64) *Setting { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *SettingClient) Hooks() []Hook { + return c.hooks.Setting +} + +// Interceptors returns the client interceptors. +func (c *SettingClient) Interceptors() []Interceptor { + return c.inters.Setting +} + +func (c *SettingClient) mutate(ctx context.Context, m *SettingMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&SettingCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&SettingUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&SettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&SettingDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Setting mutation op: %q", m.Op()) + } +} + +// UsageLogClient is a client for the UsageLog schema. +type UsageLogClient struct { + config +} + +// NewUsageLogClient returns a client for the UsageLog from the given config. +func NewUsageLogClient(c config) *UsageLogClient { + return &UsageLogClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `usagelog.Hooks(f(g(h())))`. +func (c *UsageLogClient) Use(hooks ...Hook) { + c.hooks.UsageLog = append(c.hooks.UsageLog, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `usagelog.Intercept(f(g(h())))`. +func (c *UsageLogClient) Intercept(interceptors ...Interceptor) { + c.inters.UsageLog = append(c.inters.UsageLog, interceptors...) +} + +// Create returns a builder for creating a UsageLog entity. +func (c *UsageLogClient) Create() *UsageLogCreate { + mutation := newUsageLogMutation(c.config, OpCreate) + return &UsageLogCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UsageLog entities. +func (c *UsageLogClient) CreateBulk(builders ...*UsageLogCreate) *UsageLogCreateBulk { + return &UsageLogCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UsageLogClient) MapCreateBulk(slice any, setFunc func(*UsageLogCreate, int)) *UsageLogCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UsageLogCreateBulk{err: fmt.Errorf("calling to UsageLogClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UsageLogCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UsageLogCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UsageLog. +func (c *UsageLogClient) Update() *UsageLogUpdate { + mutation := newUsageLogMutation(c.config, OpUpdate) + return &UsageLogUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UsageLogClient) UpdateOne(_m *UsageLog) *UsageLogUpdateOne { + mutation := newUsageLogMutation(c.config, OpUpdateOne, withUsageLog(_m)) + return &UsageLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UsageLogClient) UpdateOneID(id int64) *UsageLogUpdateOne { + mutation := newUsageLogMutation(c.config, OpUpdateOne, withUsageLogID(id)) + return &UsageLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UsageLog. +func (c *UsageLogClient) Delete() *UsageLogDelete { + mutation := newUsageLogMutation(c.config, OpDelete) + return &UsageLogDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UsageLogClient) DeleteOne(_m *UsageLog) *UsageLogDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UsageLogClient) DeleteOneID(id int64) *UsageLogDeleteOne { + builder := c.Delete().Where(usagelog.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UsageLogDeleteOne{builder} +} + +// Query returns a query builder for UsageLog. +func (c *UsageLogClient) Query() *UsageLogQuery { + return &UsageLogQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUsageLog}, + inters: c.Interceptors(), + } +} + +// Get returns a UsageLog entity by its id. +func (c *UsageLogClient) Get(ctx context.Context, id int64) (*UsageLog, error) { + return c.Query().Where(usagelog.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UsageLogClient) GetX(ctx context.Context, id int64) *UsageLog { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a UsageLog. +func (c *UsageLogClient) QueryUser(_m *UsageLog) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.UserTable, usagelog.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAPIKey queries the api_key edge of a UsageLog. +func (c *UsageLogClient) QueryAPIKey(_m *UsageLog) *APIKeyQuery { + query := (&APIKeyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, id), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.APIKeyTable, usagelog.APIKeyColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAccount queries the account edge of a UsageLog. +func (c *UsageLogClient) QueryAccount(_m *UsageLog) *AccountQuery { + query := (&AccountClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, id), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.AccountTable, usagelog.AccountColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a UsageLog. +func (c *UsageLogClient) QueryGroup(_m *UsageLog) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.GroupTable, usagelog.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySubscription queries the subscription edge of a UsageLog. +func (c *UsageLogClient) QuerySubscription(_m *UsageLog) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.SubscriptionTable, usagelog.SubscriptionColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UsageLogClient) Hooks() []Hook { + return c.hooks.UsageLog +} + +// Interceptors returns the client interceptors. +func (c *UsageLogClient) Interceptors() []Interceptor { + return c.inters.UsageLog +} + +func (c *UsageLogClient) mutate(ctx context.Context, m *UsageLogMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UsageLogCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UsageLogUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UsageLogUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UsageLogDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UsageLog mutation op: %q", m.Op()) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(_m *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(_m)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id int64) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(_m *User) *UserDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id int64) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id int64) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id int64) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAPIKeys queries the api_keys edge of a User. +func (c *UserClient) QueryAPIKeys(_m *User) *APIKeyQuery { + query := (&APIKeyClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.APIKeysTable, user.APIKeysColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryRedeemCodes queries the redeem_codes edge of a User. +func (c *UserClient) QueryRedeemCodes(_m *User) *RedeemCodeQuery { + query := (&RedeemCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.RedeemCodesTable, user.RedeemCodesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QuerySubscriptions queries the subscriptions edge of a User. +func (c *UserClient) QuerySubscriptions(_m *User) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.SubscriptionsTable, user.SubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAssignedSubscriptions queries the assigned_subscriptions edge of a User. +func (c *UserClient) QueryAssignedSubscriptions(_m *User) *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AssignedSubscriptionsTable, user.AssignedSubscriptionsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAllowedGroups queries the allowed_groups edge of a User. +func (c *UserClient) QueryAllowedGroups(_m *User) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.AllowedGroupsTable, user.AllowedGroupsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsageLogs queries the usage_logs edge of a User. +func (c *UserClient) QueryUsageLogs(_m *User) *UsageLogQuery { + query := (&UsageLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.UsageLogsTable, user.UsageLogsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAttributeValues queries the attribute_values edge of a User. +func (c *UserClient) QueryAttributeValues(_m *User) *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AttributeValuesTable, user.AttributeValuesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryPromoCodeUsages queries the promo_code_usages edge of a User. +func (c *UserClient) QueryPromoCodeUsages(_m *User) *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUserAllowedGroups queries the user_allowed_groups edge of a User. +func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.UserColumn), + sqlgraph.Edge(sqlgraph.O2M, true, user.UserAllowedGroupsTable, user.UserAllowedGroupsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + hooks := c.hooks.User + return append(hooks[:len(hooks):len(hooks)], user.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + inters := c.inters.User + return append(inters[:len(inters):len(inters)], user.Interceptors[:]...) +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// UserAllowedGroupClient is a client for the UserAllowedGroup schema. +type UserAllowedGroupClient struct { + config +} + +// NewUserAllowedGroupClient returns a client for the UserAllowedGroup from the given config. +func NewUserAllowedGroupClient(c config) *UserAllowedGroupClient { + return &UserAllowedGroupClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userallowedgroup.Hooks(f(g(h())))`. +func (c *UserAllowedGroupClient) Use(hooks ...Hook) { + c.hooks.UserAllowedGroup = append(c.hooks.UserAllowedGroup, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userallowedgroup.Intercept(f(g(h())))`. +func (c *UserAllowedGroupClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAllowedGroup = append(c.inters.UserAllowedGroup, interceptors...) +} + +// Create returns a builder for creating a UserAllowedGroup entity. +func (c *UserAllowedGroupClient) Create() *UserAllowedGroupCreate { + mutation := newUserAllowedGroupMutation(c.config, OpCreate) + return &UserAllowedGroupCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAllowedGroup entities. +func (c *UserAllowedGroupClient) CreateBulk(builders ...*UserAllowedGroupCreate) *UserAllowedGroupCreateBulk { + return &UserAllowedGroupCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAllowedGroupClient) MapCreateBulk(slice any, setFunc func(*UserAllowedGroupCreate, int)) *UserAllowedGroupCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAllowedGroupCreateBulk{err: fmt.Errorf("calling to UserAllowedGroupClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAllowedGroupCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAllowedGroupCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Update() *UserAllowedGroupUpdate { + mutation := newUserAllowedGroupMutation(c.config, OpUpdate) + return &UserAllowedGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAllowedGroupClient) UpdateOne(_m *UserAllowedGroup) *UserAllowedGroupUpdateOne { + mutation := newUserAllowedGroupMutation(c.config, OpUpdateOne) + mutation.user = &_m.UserID + mutation.group = &_m.GroupID + return &UserAllowedGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Delete() *UserAllowedGroupDelete { + mutation := newUserAllowedGroupMutation(c.config, OpDelete) + return &UserAllowedGroupDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Query returns a query builder for UserAllowedGroup. +func (c *UserAllowedGroupClient) Query() *UserAllowedGroupQuery { + return &UserAllowedGroupQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAllowedGroup}, + inters: c.Interceptors(), + } +} + +// QueryUser queries the user edge of a UserAllowedGroup. +func (c *UserAllowedGroupClient) QueryUser(_m *UserAllowedGroup) *UserQuery { + return c.Query(). + Where(userallowedgroup.UserID(_m.UserID), userallowedgroup.GroupID(_m.GroupID)). + QueryUser() +} + +// QueryGroup queries the group edge of a UserAllowedGroup. +func (c *UserAllowedGroupClient) QueryGroup(_m *UserAllowedGroup) *GroupQuery { + return c.Query(). + Where(userallowedgroup.UserID(_m.UserID), userallowedgroup.GroupID(_m.GroupID)). + QueryGroup() +} + +// Hooks returns the client hooks. +func (c *UserAllowedGroupClient) Hooks() []Hook { + return c.hooks.UserAllowedGroup +} + +// Interceptors returns the client interceptors. +func (c *UserAllowedGroupClient) Interceptors() []Interceptor { + return c.inters.UserAllowedGroup +} + +func (c *UserAllowedGroupClient) mutate(ctx context.Context, m *UserAllowedGroupMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAllowedGroupCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAllowedGroupUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAllowedGroupUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAllowedGroupDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAllowedGroup mutation op: %q", m.Op()) + } +} + +// UserAttributeDefinitionClient is a client for the UserAttributeDefinition schema. +type UserAttributeDefinitionClient struct { + config +} + +// NewUserAttributeDefinitionClient returns a client for the UserAttributeDefinition from the given config. +func NewUserAttributeDefinitionClient(c config) *UserAttributeDefinitionClient { + return &UserAttributeDefinitionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userattributedefinition.Hooks(f(g(h())))`. +func (c *UserAttributeDefinitionClient) Use(hooks ...Hook) { + c.hooks.UserAttributeDefinition = append(c.hooks.UserAttributeDefinition, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userattributedefinition.Intercept(f(g(h())))`. +func (c *UserAttributeDefinitionClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAttributeDefinition = append(c.inters.UserAttributeDefinition, interceptors...) +} + +// Create returns a builder for creating a UserAttributeDefinition entity. +func (c *UserAttributeDefinitionClient) Create() *UserAttributeDefinitionCreate { + mutation := newUserAttributeDefinitionMutation(c.config, OpCreate) + return &UserAttributeDefinitionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAttributeDefinition entities. +func (c *UserAttributeDefinitionClient) CreateBulk(builders ...*UserAttributeDefinitionCreate) *UserAttributeDefinitionCreateBulk { + return &UserAttributeDefinitionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAttributeDefinitionClient) MapCreateBulk(slice any, setFunc func(*UserAttributeDefinitionCreate, int)) *UserAttributeDefinitionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAttributeDefinitionCreateBulk{err: fmt.Errorf("calling to UserAttributeDefinitionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAttributeDefinitionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAttributeDefinitionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Update() *UserAttributeDefinitionUpdate { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdate) + return &UserAttributeDefinitionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAttributeDefinitionClient) UpdateOne(_m *UserAttributeDefinition) *UserAttributeDefinitionUpdateOne { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdateOne, withUserAttributeDefinition(_m)) + return &UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserAttributeDefinitionClient) UpdateOneID(id int64) *UserAttributeDefinitionUpdateOne { + mutation := newUserAttributeDefinitionMutation(c.config, OpUpdateOne, withUserAttributeDefinitionID(id)) + return &UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Delete() *UserAttributeDefinitionDelete { + mutation := newUserAttributeDefinitionMutation(c.config, OpDelete) + return &UserAttributeDefinitionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserAttributeDefinitionClient) DeleteOne(_m *UserAttributeDefinition) *UserAttributeDefinitionDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserAttributeDefinitionClient) DeleteOneID(id int64) *UserAttributeDefinitionDeleteOne { + builder := c.Delete().Where(userattributedefinition.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserAttributeDefinitionDeleteOne{builder} +} + +// Query returns a query builder for UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) Query() *UserAttributeDefinitionQuery { + return &UserAttributeDefinitionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAttributeDefinition}, + inters: c.Interceptors(), + } +} + +// Get returns a UserAttributeDefinition entity by its id. +func (c *UserAttributeDefinitionClient) Get(ctx context.Context, id int64) (*UserAttributeDefinition, error) { + return c.Query().Where(userattributedefinition.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserAttributeDefinitionClient) GetX(ctx context.Context, id int64) *UserAttributeDefinition { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryValues queries the values edge of a UserAttributeDefinition. +func (c *UserAttributeDefinitionClient) QueryValues(_m *UserAttributeDefinition) *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributedefinition.Table, userattributedefinition.FieldID, id), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, userattributedefinition.ValuesTable, userattributedefinition.ValuesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserAttributeDefinitionClient) Hooks() []Hook { + hooks := c.hooks.UserAttributeDefinition + return append(hooks[:len(hooks):len(hooks)], userattributedefinition.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *UserAttributeDefinitionClient) Interceptors() []Interceptor { + inters := c.inters.UserAttributeDefinition + return append(inters[:len(inters):len(inters)], userattributedefinition.Interceptors[:]...) +} + +func (c *UserAttributeDefinitionClient) mutate(ctx context.Context, m *UserAttributeDefinitionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAttributeDefinitionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAttributeDefinitionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAttributeDefinitionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAttributeDefinitionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAttributeDefinition mutation op: %q", m.Op()) + } +} + +// UserAttributeValueClient is a client for the UserAttributeValue schema. +type UserAttributeValueClient struct { + config +} + +// NewUserAttributeValueClient returns a client for the UserAttributeValue from the given config. +func NewUserAttributeValueClient(c config) *UserAttributeValueClient { + return &UserAttributeValueClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `userattributevalue.Hooks(f(g(h())))`. +func (c *UserAttributeValueClient) Use(hooks ...Hook) { + c.hooks.UserAttributeValue = append(c.hooks.UserAttributeValue, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `userattributevalue.Intercept(f(g(h())))`. +func (c *UserAttributeValueClient) Intercept(interceptors ...Interceptor) { + c.inters.UserAttributeValue = append(c.inters.UserAttributeValue, interceptors...) +} + +// Create returns a builder for creating a UserAttributeValue entity. +func (c *UserAttributeValueClient) Create() *UserAttributeValueCreate { + mutation := newUserAttributeValueMutation(c.config, OpCreate) + return &UserAttributeValueCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserAttributeValue entities. +func (c *UserAttributeValueClient) CreateBulk(builders ...*UserAttributeValueCreate) *UserAttributeValueCreateBulk { + return &UserAttributeValueCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserAttributeValueClient) MapCreateBulk(slice any, setFunc func(*UserAttributeValueCreate, int)) *UserAttributeValueCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserAttributeValueCreateBulk{err: fmt.Errorf("calling to UserAttributeValueClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserAttributeValueCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserAttributeValueCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserAttributeValue. +func (c *UserAttributeValueClient) Update() *UserAttributeValueUpdate { + mutation := newUserAttributeValueMutation(c.config, OpUpdate) + return &UserAttributeValueUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserAttributeValueClient) UpdateOne(_m *UserAttributeValue) *UserAttributeValueUpdateOne { + mutation := newUserAttributeValueMutation(c.config, OpUpdateOne, withUserAttributeValue(_m)) + return &UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserAttributeValueClient) UpdateOneID(id int64) *UserAttributeValueUpdateOne { + mutation := newUserAttributeValueMutation(c.config, OpUpdateOne, withUserAttributeValueID(id)) + return &UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserAttributeValue. +func (c *UserAttributeValueClient) Delete() *UserAttributeValueDelete { + mutation := newUserAttributeValueMutation(c.config, OpDelete) + return &UserAttributeValueDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserAttributeValueClient) DeleteOne(_m *UserAttributeValue) *UserAttributeValueDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserAttributeValueClient) DeleteOneID(id int64) *UserAttributeValueDeleteOne { + builder := c.Delete().Where(userattributevalue.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserAttributeValueDeleteOne{builder} +} + +// Query returns a query builder for UserAttributeValue. +func (c *UserAttributeValueClient) Query() *UserAttributeValueQuery { + return &UserAttributeValueQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserAttributeValue}, + inters: c.Interceptors(), + } +} + +// Get returns a UserAttributeValue entity by its id. +func (c *UserAttributeValueClient) Get(ctx context.Context, id int64) (*UserAttributeValue, error) { + return c.Query().Where(userattributevalue.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserAttributeValueClient) GetX(ctx context.Context, id int64) *UserAttributeValue { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a UserAttributeValue. +func (c *UserAttributeValueClient) QueryUser(_m *UserAttributeValue) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.UserTable, userattributevalue.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryDefinition queries the definition edge of a UserAttributeValue. +func (c *UserAttributeValueClient) QueryDefinition(_m *UserAttributeValue) *UserAttributeDefinitionQuery { + query := (&UserAttributeDefinitionClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, id), + sqlgraph.To(userattributedefinition.Table, userattributedefinition.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.DefinitionTable, userattributevalue.DefinitionColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserAttributeValueClient) Hooks() []Hook { + return c.hooks.UserAttributeValue +} + +// Interceptors returns the client interceptors. +func (c *UserAttributeValueClient) Interceptors() []Interceptor { + return c.inters.UserAttributeValue +} + +func (c *UserAttributeValueClient) mutate(ctx context.Context, m *UserAttributeValueMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserAttributeValueCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserAttributeValueUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserAttributeValueUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserAttributeValueDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserAttributeValue mutation op: %q", m.Op()) + } +} + +// UserSubscriptionClient is a client for the UserSubscription schema. +type UserSubscriptionClient struct { + config +} + +// NewUserSubscriptionClient returns a client for the UserSubscription from the given config. +func NewUserSubscriptionClient(c config) *UserSubscriptionClient { + return &UserSubscriptionClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `usersubscription.Hooks(f(g(h())))`. +func (c *UserSubscriptionClient) Use(hooks ...Hook) { + c.hooks.UserSubscription = append(c.hooks.UserSubscription, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `usersubscription.Intercept(f(g(h())))`. +func (c *UserSubscriptionClient) Intercept(interceptors ...Interceptor) { + c.inters.UserSubscription = append(c.inters.UserSubscription, interceptors...) +} + +// Create returns a builder for creating a UserSubscription entity. +func (c *UserSubscriptionClient) Create() *UserSubscriptionCreate { + mutation := newUserSubscriptionMutation(c.config, OpCreate) + return &UserSubscriptionCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of UserSubscription entities. +func (c *UserSubscriptionClient) CreateBulk(builders ...*UserSubscriptionCreate) *UserSubscriptionCreateBulk { + return &UserSubscriptionCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserSubscriptionClient) MapCreateBulk(slice any, setFunc func(*UserSubscriptionCreate, int)) *UserSubscriptionCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserSubscriptionCreateBulk{err: fmt.Errorf("calling to UserSubscriptionClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserSubscriptionCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserSubscriptionCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for UserSubscription. +func (c *UserSubscriptionClient) Update() *UserSubscriptionUpdate { + mutation := newUserSubscriptionMutation(c.config, OpUpdate) + return &UserSubscriptionUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserSubscriptionClient) UpdateOne(_m *UserSubscription) *UserSubscriptionUpdateOne { + mutation := newUserSubscriptionMutation(c.config, OpUpdateOne, withUserSubscription(_m)) + return &UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserSubscriptionClient) UpdateOneID(id int64) *UserSubscriptionUpdateOne { + mutation := newUserSubscriptionMutation(c.config, OpUpdateOne, withUserSubscriptionID(id)) + return &UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for UserSubscription. +func (c *UserSubscriptionClient) Delete() *UserSubscriptionDelete { + mutation := newUserSubscriptionMutation(c.config, OpDelete) + return &UserSubscriptionDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserSubscriptionClient) DeleteOne(_m *UserSubscription) *UserSubscriptionDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserSubscriptionClient) DeleteOneID(id int64) *UserSubscriptionDeleteOne { + builder := c.Delete().Where(usersubscription.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserSubscriptionDeleteOne{builder} +} + +// Query returns a query builder for UserSubscription. +func (c *UserSubscriptionClient) Query() *UserSubscriptionQuery { + return &UserSubscriptionQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUserSubscription}, + inters: c.Interceptors(), + } +} + +// Get returns a UserSubscription entity by its id. +func (c *UserSubscriptionClient) Get(ctx context.Context, id int64) (*UserSubscription, error) { + return c.Query().Where(usersubscription.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserSubscriptionClient) GetX(ctx context.Context, id int64) *UserSubscription { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUser queries the user edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryUser(_m *UserSubscription) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.UserTable, usersubscription.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryGroup queries the group edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryGroup(_m *UserSubscription) *GroupQuery { + query := (&GroupClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.GroupTable, usersubscription.GroupColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAssignedByUser queries the assigned_by_user edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryAssignedByUser(_m *UserSubscription) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.AssignedByUserTable, usersubscription.AssignedByUserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUsageLogs queries the usage_logs edge of a UserSubscription. +func (c *UserSubscriptionClient) QueryUsageLogs(_m *UserSubscription) *UsageLogQuery { + query := (&UsageLogClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, id), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, usersubscription.UsageLogsTable, usersubscription.UsageLogsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserSubscriptionClient) Hooks() []Hook { + hooks := c.hooks.UserSubscription + return append(hooks[:len(hooks):len(hooks)], usersubscription.Hooks[:]...) +} + +// Interceptors returns the client interceptors. +func (c *UserSubscriptionClient) Interceptors() []Interceptor { + inters := c.inters.UserSubscription + return append(inters[:len(inters):len(inters)], usersubscription.Interceptors[:]...) +} + +func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscriptionMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserSubscriptionCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserSubscriptionUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserSubscriptionUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserSubscriptionDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown UserSubscription mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Hook + } + inters struct { + APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Interceptor + } +) + +// ExecContext allows calling the underlying ExecContext method of the driver if it is supported by it. +// See, database/sql#DB.ExecContext for more information. +func (c *config) ExecContext(ctx context.Context, query string, args ...any) (stdsql.Result, error) { + ex, ok := c.driver.(interface { + ExecContext(context.Context, string, ...any) (stdsql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.ExecContext is not supported") + } + return ex.ExecContext(ctx, query, args...) +} + +// QueryContext allows calling the underlying QueryContext method of the driver if it is supported by it. +// See, database/sql#DB.QueryContext for more information. +func (c *config) QueryContext(ctx context.Context, query string, args ...any) (*stdsql.Rows, error) { + q, ok := c.driver.(interface { + QueryContext(context.Context, string, ...any) (*stdsql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Driver.QueryContext is not supported") + } + return q.QueryContext(ctx, query, args...) +} diff --git a/backend/ent/driver_access.go b/backend/ent/driver_access.go new file mode 100644 index 00000000..b0693572 --- /dev/null +++ b/backend/ent/driver_access.go @@ -0,0 +1,8 @@ +package ent + +import "entgo.io/ent/dialect" + +// Driver 暴露底层 driver,供需要 raw SQL 的集成层使用。 +func (c *Client) Driver() dialect.Driver { + return c.driver +} diff --git a/backend/ent/ent.go b/backend/ent/ent.go new file mode 100644 index 00000000..410375a7 --- /dev/null +++ b/backend/ent/ent.go @@ -0,0 +1,636 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(t, c string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + apikey.Table: apikey.ValidColumn, + account.Table: account.ValidColumn, + accountgroup.Table: accountgroup.ValidColumn, + group.Table: group.ValidColumn, + promocode.Table: promocode.ValidColumn, + promocodeusage.Table: promocodeusage.ValidColumn, + proxy.Table: proxy.ValidColumn, + redeemcode.Table: redeemcode.ValidColumn, + setting.Table: setting.ValidColumn, + usagelog.Table: usagelog.ValidColumn, + user.Table: user.ValidColumn, + userallowedgroup.Table: userallowedgroup.ValidColumn, + userattributedefinition.Table: userattributedefinition.ValidColumn, + userattributevalue.Table: userattributevalue.ValidColumn, + usersubscription.Table: usersubscription.ValidColumn, + }) + }) + return columnCheck(t, c) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/backend/ent/enttest/enttest.go b/backend/ent/enttest/enttest.go new file mode 100644 index 00000000..fbeace40 --- /dev/null +++ b/backend/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/ent" + // required by schema hooks. + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/Wei-Shaw/sub2api/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/backend/ent/generate.go b/backend/ent/generate.go new file mode 100644 index 00000000..59843cec --- /dev/null +++ b/backend/ent/generate.go @@ -0,0 +1,6 @@ +// Package ent provides the generated ORM code for database entities. +package ent + +// 启用 sql/execquery 以生成 ExecContext/QueryContext 的透传接口,便于事务内执行原生 SQL。 +// 启用 sql/lock 以支持 FOR UPDATE 行锁。 +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery,sql/lock --idtype int64 ./schema diff --git a/backend/ent/group.go b/backend/ent/group.go new file mode 100644 index 00000000..4a31442a --- /dev/null +++ b/backend/ent/group.go @@ -0,0 +1,473 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" +) + +// Group is the model entity for the Group schema. +type Group struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description *string `json:"description,omitempty"` + // RateMultiplier holds the value of the "rate_multiplier" field. + RateMultiplier float64 `json:"rate_multiplier,omitempty"` + // IsExclusive holds the value of the "is_exclusive" field. + IsExclusive bool `json:"is_exclusive,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Platform holds the value of the "platform" field. + Platform string `json:"platform,omitempty"` + // SubscriptionType holds the value of the "subscription_type" field. + SubscriptionType string `json:"subscription_type,omitempty"` + // DailyLimitUsd holds the value of the "daily_limit_usd" field. + DailyLimitUsd *float64 `json:"daily_limit_usd,omitempty"` + // WeeklyLimitUsd holds the value of the "weekly_limit_usd" field. + WeeklyLimitUsd *float64 `json:"weekly_limit_usd,omitempty"` + // MonthlyLimitUsd holds the value of the "monthly_limit_usd" field. + MonthlyLimitUsd *float64 `json:"monthly_limit_usd,omitempty"` + // DefaultValidityDays holds the value of the "default_validity_days" field. + DefaultValidityDays int `json:"default_validity_days,omitempty"` + // ImagePrice1k holds the value of the "image_price_1k" field. + ImagePrice1k *float64 `json:"image_price_1k,omitempty"` + // ImagePrice2k holds the value of the "image_price_2k" field. + ImagePrice2k *float64 `json:"image_price_2k,omitempty"` + // ImagePrice4k holds the value of the "image_price_4k" field. + ImagePrice4k *float64 `json:"image_price_4k,omitempty"` + // 是否仅允许 Claude Code 客户端 + ClaudeCodeOnly bool `json:"claude_code_only,omitempty"` + // 非 Claude Code 请求降级使用的分组 ID + FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the GroupQuery when eager-loading is set. + Edges GroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// GroupEdges holds the relations/edges for other nodes in the graph. +type GroupEdges struct { + // APIKeys holds the value of the api_keys edge. + APIKeys []*APIKey `json:"api_keys,omitempty"` + // RedeemCodes holds the value of the redeem_codes edge. + RedeemCodes []*RedeemCode `json:"redeem_codes,omitempty"` + // Subscriptions holds the value of the subscriptions edge. + Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` + // UsageLogs holds the value of the usage_logs edge. + UsageLogs []*UsageLog `json:"usage_logs,omitempty"` + // Accounts holds the value of the accounts edge. + Accounts []*Account `json:"accounts,omitempty"` + // AllowedUsers holds the value of the allowed_users edge. + AllowedUsers []*User `json:"allowed_users,omitempty"` + // AccountGroups holds the value of the account_groups edge. + AccountGroups []*AccountGroup `json:"account_groups,omitempty"` + // UserAllowedGroups holds the value of the user_allowed_groups edge. + UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [8]bool +} + +// APIKeysOrErr returns the APIKeys value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) APIKeysOrErr() ([]*APIKey, error) { + if e.loadedTypes[0] { + return e.APIKeys, nil + } + return nil, &NotLoadedError{edge: "api_keys"} +} + +// RedeemCodesOrErr returns the RedeemCodes value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) RedeemCodesOrErr() ([]*RedeemCode, error) { + if e.loadedTypes[1] { + return e.RedeemCodes, nil + } + return nil, &NotLoadedError{edge: "redeem_codes"} +} + +// SubscriptionsOrErr returns the Subscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) SubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[2] { + return e.Subscriptions, nil + } + return nil, &NotLoadedError{edge: "subscriptions"} +} + +// UsageLogsOrErr returns the UsageLogs value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) UsageLogsOrErr() ([]*UsageLog, error) { + if e.loadedTypes[3] { + return e.UsageLogs, nil + } + return nil, &NotLoadedError{edge: "usage_logs"} +} + +// AccountsOrErr returns the Accounts value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AccountsOrErr() ([]*Account, error) { + if e.loadedTypes[4] { + return e.Accounts, nil + } + return nil, &NotLoadedError{edge: "accounts"} +} + +// AllowedUsersOrErr returns the AllowedUsers value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AllowedUsersOrErr() ([]*User, error) { + if e.loadedTypes[5] { + return e.AllowedUsers, nil + } + return nil, &NotLoadedError{edge: "allowed_users"} +} + +// AccountGroupsOrErr returns the AccountGroups value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) AccountGroupsOrErr() ([]*AccountGroup, error) { + if e.loadedTypes[6] { + return e.AccountGroups, nil + } + return nil, &NotLoadedError{edge: "account_groups"} +} + +// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e GroupEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { + if e.loadedTypes[7] { + return e.UserAllowedGroups, nil + } + return nil, &NotLoadedError{edge: "user_allowed_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Group) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case group.FieldIsExclusive, group.FieldClaudeCodeOnly: + values[i] = new(sql.NullBool) + case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k: + values[i] = new(sql.NullFloat64) + case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID: + values[i] = new(sql.NullInt64) + case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType: + values[i] = new(sql.NullString) + case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Group fields. +func (_m *Group) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case group.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case group.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case group.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case group.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case group.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case group.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + _m.Description = new(string) + *_m.Description = value.String + } + case group.FieldRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i]) + } else if value.Valid { + _m.RateMultiplier = value.Float64 + } + case group.FieldIsExclusive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_exclusive", values[i]) + } else if value.Valid { + _m.IsExclusive = value.Bool + } + case group.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case group.FieldPlatform: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field platform", values[i]) + } else if value.Valid { + _m.Platform = value.String + } + case group.FieldSubscriptionType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field subscription_type", values[i]) + } else if value.Valid { + _m.SubscriptionType = value.String + } + case group.FieldDailyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field daily_limit_usd", values[i]) + } else if value.Valid { + _m.DailyLimitUsd = new(float64) + *_m.DailyLimitUsd = value.Float64 + } + case group.FieldWeeklyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field weekly_limit_usd", values[i]) + } else if value.Valid { + _m.WeeklyLimitUsd = new(float64) + *_m.WeeklyLimitUsd = value.Float64 + } + case group.FieldMonthlyLimitUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field monthly_limit_usd", values[i]) + } else if value.Valid { + _m.MonthlyLimitUsd = new(float64) + *_m.MonthlyLimitUsd = value.Float64 + } + case group.FieldDefaultValidityDays: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field default_validity_days", values[i]) + } else if value.Valid { + _m.DefaultValidityDays = int(value.Int64) + } + case group.FieldImagePrice1k: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field image_price_1k", values[i]) + } else if value.Valid { + _m.ImagePrice1k = new(float64) + *_m.ImagePrice1k = value.Float64 + } + case group.FieldImagePrice2k: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field image_price_2k", values[i]) + } else if value.Valid { + _m.ImagePrice2k = new(float64) + *_m.ImagePrice2k = value.Float64 + } + case group.FieldImagePrice4k: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field image_price_4k", values[i]) + } else if value.Valid { + _m.ImagePrice4k = new(float64) + *_m.ImagePrice4k = value.Float64 + } + case group.FieldClaudeCodeOnly: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field claude_code_only", values[i]) + } else if value.Valid { + _m.ClaudeCodeOnly = value.Bool + } + case group.FieldFallbackGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field fallback_group_id", values[i]) + } else if value.Valid { + _m.FallbackGroupID = new(int64) + *_m.FallbackGroupID = value.Int64 + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Group. +// This includes values selected through modifiers, order, etc. +func (_m *Group) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAPIKeys queries the "api_keys" edge of the Group entity. +func (_m *Group) QueryAPIKeys() *APIKeyQuery { + return NewGroupClient(_m.config).QueryAPIKeys(_m) +} + +// QueryRedeemCodes queries the "redeem_codes" edge of the Group entity. +func (_m *Group) QueryRedeemCodes() *RedeemCodeQuery { + return NewGroupClient(_m.config).QueryRedeemCodes(_m) +} + +// QuerySubscriptions queries the "subscriptions" edge of the Group entity. +func (_m *Group) QuerySubscriptions() *UserSubscriptionQuery { + return NewGroupClient(_m.config).QuerySubscriptions(_m) +} + +// QueryUsageLogs queries the "usage_logs" edge of the Group entity. +func (_m *Group) QueryUsageLogs() *UsageLogQuery { + return NewGroupClient(_m.config).QueryUsageLogs(_m) +} + +// QueryAccounts queries the "accounts" edge of the Group entity. +func (_m *Group) QueryAccounts() *AccountQuery { + return NewGroupClient(_m.config).QueryAccounts(_m) +} + +// QueryAllowedUsers queries the "allowed_users" edge of the Group entity. +func (_m *Group) QueryAllowedUsers() *UserQuery { + return NewGroupClient(_m.config).QueryAllowedUsers(_m) +} + +// QueryAccountGroups queries the "account_groups" edge of the Group entity. +func (_m *Group) QueryAccountGroups() *AccountGroupQuery { + return NewGroupClient(_m.config).QueryAccountGroups(_m) +} + +// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the Group entity. +func (_m *Group) QueryUserAllowedGroups() *UserAllowedGroupQuery { + return NewGroupClient(_m.config).QueryUserAllowedGroups(_m) +} + +// Update returns a builder for updating this Group. +// Note that you need to call Group.Unwrap() before calling this method if this Group +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Group) Update() *GroupUpdateOne { + return NewGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Group entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Group) Unwrap() *Group { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Group is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Group) String() string { + var builder strings.Builder + builder.WriteString("Group(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + if v := _m.Description; v != nil { + builder.WriteString("description=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) + builder.WriteString(", ") + builder.WriteString("is_exclusive=") + builder.WriteString(fmt.Sprintf("%v", _m.IsExclusive)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("platform=") + builder.WriteString(_m.Platform) + builder.WriteString(", ") + builder.WriteString("subscription_type=") + builder.WriteString(_m.SubscriptionType) + builder.WriteString(", ") + if v := _m.DailyLimitUsd; v != nil { + builder.WriteString("daily_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.WeeklyLimitUsd; v != nil { + builder.WriteString("weekly_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.MonthlyLimitUsd; v != nil { + builder.WriteString("monthly_limit_usd=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("default_validity_days=") + builder.WriteString(fmt.Sprintf("%v", _m.DefaultValidityDays)) + builder.WriteString(", ") + if v := _m.ImagePrice1k; v != nil { + builder.WriteString("image_price_1k=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.ImagePrice2k; v != nil { + builder.WriteString("image_price_2k=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.ImagePrice4k; v != nil { + builder.WriteString("image_price_4k=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("claude_code_only=") + builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly)) + builder.WriteString(", ") + if v := _m.FallbackGroupID; v != nil { + builder.WriteString("fallback_group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteByte(')') + return builder.String() +} + +// Groups is a parsable slice of Group. +type Groups []*Group diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go new file mode 100644 index 00000000..c4317f00 --- /dev/null +++ b/backend/ent/group/group.go @@ -0,0 +1,478 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the group type in the database. + Label = "group" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. + FieldRateMultiplier = "rate_multiplier" + // FieldIsExclusive holds the string denoting the is_exclusive field in the database. + FieldIsExclusive = "is_exclusive" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldPlatform holds the string denoting the platform field in the database. + FieldPlatform = "platform" + // FieldSubscriptionType holds the string denoting the subscription_type field in the database. + FieldSubscriptionType = "subscription_type" + // FieldDailyLimitUsd holds the string denoting the daily_limit_usd field in the database. + FieldDailyLimitUsd = "daily_limit_usd" + // FieldWeeklyLimitUsd holds the string denoting the weekly_limit_usd field in the database. + FieldWeeklyLimitUsd = "weekly_limit_usd" + // FieldMonthlyLimitUsd holds the string denoting the monthly_limit_usd field in the database. + FieldMonthlyLimitUsd = "monthly_limit_usd" + // FieldDefaultValidityDays holds the string denoting the default_validity_days field in the database. + FieldDefaultValidityDays = "default_validity_days" + // FieldImagePrice1k holds the string denoting the image_price_1k field in the database. + FieldImagePrice1k = "image_price_1k" + // FieldImagePrice2k holds the string denoting the image_price_2k field in the database. + FieldImagePrice2k = "image_price_2k" + // FieldImagePrice4k holds the string denoting the image_price_4k field in the database. + FieldImagePrice4k = "image_price_4k" + // FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database. + FieldClaudeCodeOnly = "claude_code_only" + // FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database. + FieldFallbackGroupID = "fallback_group_id" + // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. + EdgeAPIKeys = "api_keys" + // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. + EdgeRedeemCodes = "redeem_codes" + // EdgeSubscriptions holds the string denoting the subscriptions edge name in mutations. + EdgeSubscriptions = "subscriptions" + // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. + EdgeUsageLogs = "usage_logs" + // EdgeAccounts holds the string denoting the accounts edge name in mutations. + EdgeAccounts = "accounts" + // EdgeAllowedUsers holds the string denoting the allowed_users edge name in mutations. + EdgeAllowedUsers = "allowed_users" + // EdgeAccountGroups holds the string denoting the account_groups edge name in mutations. + EdgeAccountGroups = "account_groups" + // EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations. + EdgeUserAllowedGroups = "user_allowed_groups" + // Table holds the table name of the group in the database. + Table = "groups" + // APIKeysTable is the table that holds the api_keys relation/edge. + APIKeysTable = "api_keys" + // APIKeysInverseTable is the table name for the APIKey entity. + // It exists in this package in order to avoid circular dependency with the "apikey" package. + APIKeysInverseTable = "api_keys" + // APIKeysColumn is the table column denoting the api_keys relation/edge. + APIKeysColumn = "group_id" + // RedeemCodesTable is the table that holds the redeem_codes relation/edge. + RedeemCodesTable = "redeem_codes" + // RedeemCodesInverseTable is the table name for the RedeemCode entity. + // It exists in this package in order to avoid circular dependency with the "redeemcode" package. + RedeemCodesInverseTable = "redeem_codes" + // RedeemCodesColumn is the table column denoting the redeem_codes relation/edge. + RedeemCodesColumn = "group_id" + // SubscriptionsTable is the table that holds the subscriptions relation/edge. + SubscriptionsTable = "user_subscriptions" + // SubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + SubscriptionsInverseTable = "user_subscriptions" + // SubscriptionsColumn is the table column denoting the subscriptions relation/edge. + SubscriptionsColumn = "group_id" + // UsageLogsTable is the table that holds the usage_logs relation/edge. + UsageLogsTable = "usage_logs" + // UsageLogsInverseTable is the table name for the UsageLog entity. + // It exists in this package in order to avoid circular dependency with the "usagelog" package. + UsageLogsInverseTable = "usage_logs" + // UsageLogsColumn is the table column denoting the usage_logs relation/edge. + UsageLogsColumn = "group_id" + // AccountsTable is the table that holds the accounts relation/edge. The primary key declared below. + AccountsTable = "account_groups" + // AccountsInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountsInverseTable = "accounts" + // AllowedUsersTable is the table that holds the allowed_users relation/edge. The primary key declared below. + AllowedUsersTable = "user_allowed_groups" + // AllowedUsersInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AllowedUsersInverseTable = "users" + // AccountGroupsTable is the table that holds the account_groups relation/edge. + AccountGroupsTable = "account_groups" + // AccountGroupsInverseTable is the table name for the AccountGroup entity. + // It exists in this package in order to avoid circular dependency with the "accountgroup" package. + AccountGroupsInverseTable = "account_groups" + // AccountGroupsColumn is the table column denoting the account_groups relation/edge. + AccountGroupsColumn = "group_id" + // UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge. + UserAllowedGroupsTable = "user_allowed_groups" + // UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity. + // It exists in this package in order to avoid circular dependency with the "userallowedgroup" package. + UserAllowedGroupsInverseTable = "user_allowed_groups" + // UserAllowedGroupsColumn is the table column denoting the user_allowed_groups relation/edge. + UserAllowedGroupsColumn = "group_id" +) + +// Columns holds all SQL columns for group fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldDescription, + FieldRateMultiplier, + FieldIsExclusive, + FieldStatus, + FieldPlatform, + FieldSubscriptionType, + FieldDailyLimitUsd, + FieldWeeklyLimitUsd, + FieldMonthlyLimitUsd, + FieldDefaultValidityDays, + FieldImagePrice1k, + FieldImagePrice2k, + FieldImagePrice4k, + FieldClaudeCodeOnly, + FieldFallbackGroupID, +} + +var ( + // AccountsPrimaryKey and AccountsColumn2 are the table columns denoting the + // primary key for the accounts relation (M2M). + AccountsPrimaryKey = []string{"account_id", "group_id"} + // AllowedUsersPrimaryKey and AllowedUsersColumn2 are the table columns denoting the + // primary key for the allowed_users relation (M2M). + AllowedUsersPrimaryKey = []string{"user_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field. + DefaultRateMultiplier float64 + // DefaultIsExclusive holds the default value on creation for the "is_exclusive" field. + DefaultIsExclusive bool + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultPlatform holds the default value on creation for the "platform" field. + DefaultPlatform string + // PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + PlatformValidator func(string) error + // DefaultSubscriptionType holds the default value on creation for the "subscription_type" field. + DefaultSubscriptionType string + // SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save. + SubscriptionTypeValidator func(string) error + // DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field. + DefaultDefaultValidityDays int + // DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field. + DefaultClaudeCodeOnly bool +) + +// OrderOption defines the ordering options for the Group queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByRateMultiplier orders the results by the rate_multiplier field. +func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() +} + +// ByIsExclusive orders the results by the is_exclusive field. +func ByIsExclusive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsExclusive, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByPlatform orders the results by the platform field. +func ByPlatform(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlatform, opts...).ToFunc() +} + +// BySubscriptionType orders the results by the subscription_type field. +func BySubscriptionType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSubscriptionType, opts...).ToFunc() +} + +// ByDailyLimitUsd orders the results by the daily_limit_usd field. +func ByDailyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyLimitUsd, opts...).ToFunc() +} + +// ByWeeklyLimitUsd orders the results by the weekly_limit_usd field. +func ByWeeklyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyLimitUsd, opts...).ToFunc() +} + +// ByMonthlyLimitUsd orders the results by the monthly_limit_usd field. +func ByMonthlyLimitUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyLimitUsd, opts...).ToFunc() +} + +// ByDefaultValidityDays orders the results by the default_validity_days field. +func ByDefaultValidityDays(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDefaultValidityDays, opts...).ToFunc() +} + +// ByImagePrice1k orders the results by the image_price_1k field. +func ByImagePrice1k(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImagePrice1k, opts...).ToFunc() +} + +// ByImagePrice2k orders the results by the image_price_2k field. +func ByImagePrice2k(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImagePrice2k, opts...).ToFunc() +} + +// ByImagePrice4k orders the results by the image_price_4k field. +func ByImagePrice4k(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImagePrice4k, opts...).ToFunc() +} + +// ByClaudeCodeOnly orders the results by the claude_code_only field. +func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc() +} + +// ByFallbackGroupID orders the results by the fallback_group_id field. +func ByFallbackGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFallbackGroupID, opts...).ToFunc() +} + +// ByAPIKeysCount orders the results by api_keys count. +func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAPIKeysStep(), opts...) + } +} + +// ByAPIKeys orders the results by api_keys terms. +func ByAPIKeys(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAPIKeysStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByRedeemCodesCount orders the results by redeem_codes count. +func ByRedeemCodesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRedeemCodesStep(), opts...) + } +} + +// ByRedeemCodes orders the results by redeem_codes terms. +func ByRedeemCodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRedeemCodesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// BySubscriptionsCount orders the results by subscriptions count. +func BySubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSubscriptionsStep(), opts...) + } +} + +// BySubscriptions orders the results by subscriptions terms. +func BySubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUsageLogsCount orders the results by usage_logs count. +func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...) + } +} + +// ByUsageLogs orders the results by usage_logs terms. +func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountsCount orders the results by accounts count. +func ByAccountsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountsStep(), opts...) + } +} + +// ByAccounts orders the results by accounts terms. +func ByAccounts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAllowedUsersCount orders the results by allowed_users count. +func ByAllowedUsersCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowedUsersStep(), opts...) + } +} + +// ByAllowedUsers orders the results by allowed_users terms. +func ByAllowedUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowedUsersStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAccountGroupsCount orders the results by account_groups count. +func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...) + } +} + +// ByAccountGroups orders the results by account_groups terms. +func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUserAllowedGroupsCount orders the results by user_allowed_groups count. +func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserAllowedGroupsStep(), opts...) + } +} + +// ByUserAllowedGroups orders the results by user_allowed_groups terms. +func ByUserAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAPIKeysStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(APIKeysInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) +} +func newRedeemCodesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RedeemCodesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) +} +func newSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) +} +func newUsageLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) +} +func newAccountsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AccountsTable, AccountsPrimaryKey...), + ) +} +func newAllowedUsersStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowedUsersInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowedUsersTable, AllowedUsersPrimaryKey...), + ) +} +func newAccountGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) +} +func newUserAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserAllowedGroupsInverseTable, UserAllowedGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) +} diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go new file mode 100644 index 00000000..fb2f942f --- /dev/null +++ b/backend/ent/group/where.go @@ -0,0 +1,1265 @@ +// Code generated by ent, DO NOT EDIT. + +package group + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ. +func RateMultiplier(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// IsExclusive applies equality check predicate on the "is_exclusive" field. It's identical to IsExclusiveEQ. +func IsExclusive(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldIsExclusive, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldStatus, v)) +} + +// Platform applies equality check predicate on the "platform" field. It's identical to PlatformEQ. +func Platform(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPlatform, v)) +} + +// SubscriptionType applies equality check predicate on the "subscription_type" field. It's identical to SubscriptionTypeEQ. +func SubscriptionType(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSubscriptionType, v)) +} + +// DailyLimitUsd applies equality check predicate on the "daily_limit_usd" field. It's identical to DailyLimitUsdEQ. +func DailyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDailyLimitUsd, v)) +} + +// WeeklyLimitUsd applies equality check predicate on the "weekly_limit_usd" field. It's identical to WeeklyLimitUsdEQ. +func WeeklyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldWeeklyLimitUsd, v)) +} + +// MonthlyLimitUsd applies equality check predicate on the "monthly_limit_usd" field. It's identical to MonthlyLimitUsdEQ. +func MonthlyLimitUsd(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMonthlyLimitUsd, v)) +} + +// DefaultValidityDays applies equality check predicate on the "default_validity_days" field. It's identical to DefaultValidityDaysEQ. +func DefaultValidityDays(v int) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDefaultValidityDays, v)) +} + +// ImagePrice1k applies equality check predicate on the "image_price_1k" field. It's identical to ImagePrice1kEQ. +func ImagePrice1k(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice1k, v)) +} + +// ImagePrice2k applies equality check predicate on the "image_price_2k" field. It's identical to ImagePrice2kEQ. +func ImagePrice2k(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice2k, v)) +} + +// ImagePrice4k applies equality check predicate on the "image_price_4k" field. It's identical to ImagePrice4kEQ. +func ImagePrice4k(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice4k, v)) +} + +// ClaudeCodeOnly applies equality check predicate on the "claude_code_only" field. It's identical to ClaudeCodeOnlyEQ. +func ClaudeCodeOnly(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v)) +} + +// FallbackGroupID applies equality check predicate on the "fallback_group_id" field. It's identical to FallbackGroupIDEQ. +func FallbackGroupID(v int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldFallbackGroupID, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldDescription, v)) +} + +// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field. +func RateMultiplierEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field. +func RateMultiplierNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierIn applies the In predicate on the "rate_multiplier" field. +func RateMultiplierIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field. +func RateMultiplierNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field. +func RateMultiplierGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldRateMultiplier, v)) +} + +// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field. +func RateMultiplierGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldRateMultiplier, v)) +} + +// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field. +func RateMultiplierLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldRateMultiplier, v)) +} + +// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field. +func RateMultiplierLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldRateMultiplier, v)) +} + +// IsExclusiveEQ applies the EQ predicate on the "is_exclusive" field. +func IsExclusiveEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldIsExclusive, v)) +} + +// IsExclusiveNEQ applies the NEQ predicate on the "is_exclusive" field. +func IsExclusiveNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldIsExclusive, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldStatus, v)) +} + +// PlatformEQ applies the EQ predicate on the "platform" field. +func PlatformEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldPlatform, v)) +} + +// PlatformNEQ applies the NEQ predicate on the "platform" field. +func PlatformNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldPlatform, v)) +} + +// PlatformIn applies the In predicate on the "platform" field. +func PlatformIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldPlatform, vs...)) +} + +// PlatformNotIn applies the NotIn predicate on the "platform" field. +func PlatformNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldPlatform, vs...)) +} + +// PlatformGT applies the GT predicate on the "platform" field. +func PlatformGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldPlatform, v)) +} + +// PlatformGTE applies the GTE predicate on the "platform" field. +func PlatformGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldPlatform, v)) +} + +// PlatformLT applies the LT predicate on the "platform" field. +func PlatformLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldPlatform, v)) +} + +// PlatformLTE applies the LTE predicate on the "platform" field. +func PlatformLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldPlatform, v)) +} + +// PlatformContains applies the Contains predicate on the "platform" field. +func PlatformContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldPlatform, v)) +} + +// PlatformHasPrefix applies the HasPrefix predicate on the "platform" field. +func PlatformHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldPlatform, v)) +} + +// PlatformHasSuffix applies the HasSuffix predicate on the "platform" field. +func PlatformHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldPlatform, v)) +} + +// PlatformEqualFold applies the EqualFold predicate on the "platform" field. +func PlatformEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldPlatform, v)) +} + +// PlatformContainsFold applies the ContainsFold predicate on the "platform" field. +func PlatformContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldPlatform, v)) +} + +// SubscriptionTypeEQ applies the EQ predicate on the "subscription_type" field. +func SubscriptionTypeEQ(v string) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldSubscriptionType, v)) +} + +// SubscriptionTypeNEQ applies the NEQ predicate on the "subscription_type" field. +func SubscriptionTypeNEQ(v string) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldSubscriptionType, v)) +} + +// SubscriptionTypeIn applies the In predicate on the "subscription_type" field. +func SubscriptionTypeIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldIn(FieldSubscriptionType, vs...)) +} + +// SubscriptionTypeNotIn applies the NotIn predicate on the "subscription_type" field. +func SubscriptionTypeNotIn(vs ...string) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldSubscriptionType, vs...)) +} + +// SubscriptionTypeGT applies the GT predicate on the "subscription_type" field. +func SubscriptionTypeGT(v string) predicate.Group { + return predicate.Group(sql.FieldGT(FieldSubscriptionType, v)) +} + +// SubscriptionTypeGTE applies the GTE predicate on the "subscription_type" field. +func SubscriptionTypeGTE(v string) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldSubscriptionType, v)) +} + +// SubscriptionTypeLT applies the LT predicate on the "subscription_type" field. +func SubscriptionTypeLT(v string) predicate.Group { + return predicate.Group(sql.FieldLT(FieldSubscriptionType, v)) +} + +// SubscriptionTypeLTE applies the LTE predicate on the "subscription_type" field. +func SubscriptionTypeLTE(v string) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldSubscriptionType, v)) +} + +// SubscriptionTypeContains applies the Contains predicate on the "subscription_type" field. +func SubscriptionTypeContains(v string) predicate.Group { + return predicate.Group(sql.FieldContains(FieldSubscriptionType, v)) +} + +// SubscriptionTypeHasPrefix applies the HasPrefix predicate on the "subscription_type" field. +func SubscriptionTypeHasPrefix(v string) predicate.Group { + return predicate.Group(sql.FieldHasPrefix(FieldSubscriptionType, v)) +} + +// SubscriptionTypeHasSuffix applies the HasSuffix predicate on the "subscription_type" field. +func SubscriptionTypeHasSuffix(v string) predicate.Group { + return predicate.Group(sql.FieldHasSuffix(FieldSubscriptionType, v)) +} + +// SubscriptionTypeEqualFold applies the EqualFold predicate on the "subscription_type" field. +func SubscriptionTypeEqualFold(v string) predicate.Group { + return predicate.Group(sql.FieldEqualFold(FieldSubscriptionType, v)) +} + +// SubscriptionTypeContainsFold applies the ContainsFold predicate on the "subscription_type" field. +func SubscriptionTypeContainsFold(v string) predicate.Group { + return predicate.Group(sql.FieldContainsFold(FieldSubscriptionType, v)) +} + +// DailyLimitUsdEQ applies the EQ predicate on the "daily_limit_usd" field. +func DailyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdNEQ applies the NEQ predicate on the "daily_limit_usd" field. +func DailyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdIn applies the In predicate on the "daily_limit_usd" field. +func DailyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDailyLimitUsd, vs...)) +} + +// DailyLimitUsdNotIn applies the NotIn predicate on the "daily_limit_usd" field. +func DailyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDailyLimitUsd, vs...)) +} + +// DailyLimitUsdGT applies the GT predicate on the "daily_limit_usd" field. +func DailyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdGTE applies the GTE predicate on the "daily_limit_usd" field. +func DailyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdLT applies the LT predicate on the "daily_limit_usd" field. +func DailyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdLTE applies the LTE predicate on the "daily_limit_usd" field. +func DailyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDailyLimitUsd, v)) +} + +// DailyLimitUsdIsNil applies the IsNil predicate on the "daily_limit_usd" field. +func DailyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldDailyLimitUsd)) +} + +// DailyLimitUsdNotNil applies the NotNil predicate on the "daily_limit_usd" field. +func DailyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldDailyLimitUsd)) +} + +// WeeklyLimitUsdEQ applies the EQ predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdNEQ applies the NEQ predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdIn applies the In predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldWeeklyLimitUsd, vs...)) +} + +// WeeklyLimitUsdNotIn applies the NotIn predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldWeeklyLimitUsd, vs...)) +} + +// WeeklyLimitUsdGT applies the GT predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdGTE applies the GTE predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdLT applies the LT predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdLTE applies the LTE predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldWeeklyLimitUsd, v)) +} + +// WeeklyLimitUsdIsNil applies the IsNil predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldWeeklyLimitUsd)) +} + +// WeeklyLimitUsdNotNil applies the NotNil predicate on the "weekly_limit_usd" field. +func WeeklyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldWeeklyLimitUsd)) +} + +// MonthlyLimitUsdEQ applies the EQ predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdNEQ applies the NEQ predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdIn applies the In predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldMonthlyLimitUsd, vs...)) +} + +// MonthlyLimitUsdNotIn applies the NotIn predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldMonthlyLimitUsd, vs...)) +} + +// MonthlyLimitUsdGT applies the GT predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdGTE applies the GTE predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdLT applies the LT predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdLTE applies the LTE predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldMonthlyLimitUsd, v)) +} + +// MonthlyLimitUsdIsNil applies the IsNil predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldMonthlyLimitUsd)) +} + +// MonthlyLimitUsdNotNil applies the NotNil predicate on the "monthly_limit_usd" field. +func MonthlyLimitUsdNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldMonthlyLimitUsd)) +} + +// DefaultValidityDaysEQ applies the EQ predicate on the "default_validity_days" field. +func DefaultValidityDaysEQ(v int) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldDefaultValidityDays, v)) +} + +// DefaultValidityDaysNEQ applies the NEQ predicate on the "default_validity_days" field. +func DefaultValidityDaysNEQ(v int) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldDefaultValidityDays, v)) +} + +// DefaultValidityDaysIn applies the In predicate on the "default_validity_days" field. +func DefaultValidityDaysIn(vs ...int) predicate.Group { + return predicate.Group(sql.FieldIn(FieldDefaultValidityDays, vs...)) +} + +// DefaultValidityDaysNotIn applies the NotIn predicate on the "default_validity_days" field. +func DefaultValidityDaysNotIn(vs ...int) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldDefaultValidityDays, vs...)) +} + +// DefaultValidityDaysGT applies the GT predicate on the "default_validity_days" field. +func DefaultValidityDaysGT(v int) predicate.Group { + return predicate.Group(sql.FieldGT(FieldDefaultValidityDays, v)) +} + +// DefaultValidityDaysGTE applies the GTE predicate on the "default_validity_days" field. +func DefaultValidityDaysGTE(v int) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldDefaultValidityDays, v)) +} + +// DefaultValidityDaysLT applies the LT predicate on the "default_validity_days" field. +func DefaultValidityDaysLT(v int) predicate.Group { + return predicate.Group(sql.FieldLT(FieldDefaultValidityDays, v)) +} + +// DefaultValidityDaysLTE applies the LTE predicate on the "default_validity_days" field. +func DefaultValidityDaysLTE(v int) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldDefaultValidityDays, v)) +} + +// ImagePrice1kEQ applies the EQ predicate on the "image_price_1k" field. +func ImagePrice1kEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice1k, v)) +} + +// ImagePrice1kNEQ applies the NEQ predicate on the "image_price_1k" field. +func ImagePrice1kNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldImagePrice1k, v)) +} + +// ImagePrice1kIn applies the In predicate on the "image_price_1k" field. +func ImagePrice1kIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldImagePrice1k, vs...)) +} + +// ImagePrice1kNotIn applies the NotIn predicate on the "image_price_1k" field. +func ImagePrice1kNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldImagePrice1k, vs...)) +} + +// ImagePrice1kGT applies the GT predicate on the "image_price_1k" field. +func ImagePrice1kGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldImagePrice1k, v)) +} + +// ImagePrice1kGTE applies the GTE predicate on the "image_price_1k" field. +func ImagePrice1kGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldImagePrice1k, v)) +} + +// ImagePrice1kLT applies the LT predicate on the "image_price_1k" field. +func ImagePrice1kLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldImagePrice1k, v)) +} + +// ImagePrice1kLTE applies the LTE predicate on the "image_price_1k" field. +func ImagePrice1kLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldImagePrice1k, v)) +} + +// ImagePrice1kIsNil applies the IsNil predicate on the "image_price_1k" field. +func ImagePrice1kIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldImagePrice1k)) +} + +// ImagePrice1kNotNil applies the NotNil predicate on the "image_price_1k" field. +func ImagePrice1kNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldImagePrice1k)) +} + +// ImagePrice2kEQ applies the EQ predicate on the "image_price_2k" field. +func ImagePrice2kEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice2k, v)) +} + +// ImagePrice2kNEQ applies the NEQ predicate on the "image_price_2k" field. +func ImagePrice2kNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldImagePrice2k, v)) +} + +// ImagePrice2kIn applies the In predicate on the "image_price_2k" field. +func ImagePrice2kIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldImagePrice2k, vs...)) +} + +// ImagePrice2kNotIn applies the NotIn predicate on the "image_price_2k" field. +func ImagePrice2kNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldImagePrice2k, vs...)) +} + +// ImagePrice2kGT applies the GT predicate on the "image_price_2k" field. +func ImagePrice2kGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldImagePrice2k, v)) +} + +// ImagePrice2kGTE applies the GTE predicate on the "image_price_2k" field. +func ImagePrice2kGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldImagePrice2k, v)) +} + +// ImagePrice2kLT applies the LT predicate on the "image_price_2k" field. +func ImagePrice2kLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldImagePrice2k, v)) +} + +// ImagePrice2kLTE applies the LTE predicate on the "image_price_2k" field. +func ImagePrice2kLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldImagePrice2k, v)) +} + +// ImagePrice2kIsNil applies the IsNil predicate on the "image_price_2k" field. +func ImagePrice2kIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldImagePrice2k)) +} + +// ImagePrice2kNotNil applies the NotNil predicate on the "image_price_2k" field. +func ImagePrice2kNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldImagePrice2k)) +} + +// ImagePrice4kEQ applies the EQ predicate on the "image_price_4k" field. +func ImagePrice4kEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldImagePrice4k, v)) +} + +// ImagePrice4kNEQ applies the NEQ predicate on the "image_price_4k" field. +func ImagePrice4kNEQ(v float64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldImagePrice4k, v)) +} + +// ImagePrice4kIn applies the In predicate on the "image_price_4k" field. +func ImagePrice4kIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldImagePrice4k, vs...)) +} + +// ImagePrice4kNotIn applies the NotIn predicate on the "image_price_4k" field. +func ImagePrice4kNotIn(vs ...float64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldImagePrice4k, vs...)) +} + +// ImagePrice4kGT applies the GT predicate on the "image_price_4k" field. +func ImagePrice4kGT(v float64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldImagePrice4k, v)) +} + +// ImagePrice4kGTE applies the GTE predicate on the "image_price_4k" field. +func ImagePrice4kGTE(v float64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldImagePrice4k, v)) +} + +// ImagePrice4kLT applies the LT predicate on the "image_price_4k" field. +func ImagePrice4kLT(v float64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldImagePrice4k, v)) +} + +// ImagePrice4kLTE applies the LTE predicate on the "image_price_4k" field. +func ImagePrice4kLTE(v float64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldImagePrice4k, v)) +} + +// ImagePrice4kIsNil applies the IsNil predicate on the "image_price_4k" field. +func ImagePrice4kIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldImagePrice4k)) +} + +// ImagePrice4kNotNil applies the NotNil predicate on the "image_price_4k" field. +func ImagePrice4kNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldImagePrice4k)) +} + +// ClaudeCodeOnlyEQ applies the EQ predicate on the "claude_code_only" field. +func ClaudeCodeOnlyEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldClaudeCodeOnly, v)) +} + +// ClaudeCodeOnlyNEQ applies the NEQ predicate on the "claude_code_only" field. +func ClaudeCodeOnlyNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldClaudeCodeOnly, v)) +} + +// FallbackGroupIDEQ applies the EQ predicate on the "fallback_group_id" field. +func FallbackGroupIDEQ(v int64) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDNEQ applies the NEQ predicate on the "fallback_group_id" field. +func FallbackGroupIDNEQ(v int64) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDIn applies the In predicate on the "fallback_group_id" field. +func FallbackGroupIDIn(vs ...int64) predicate.Group { + return predicate.Group(sql.FieldIn(FieldFallbackGroupID, vs...)) +} + +// FallbackGroupIDNotIn applies the NotIn predicate on the "fallback_group_id" field. +func FallbackGroupIDNotIn(vs ...int64) predicate.Group { + return predicate.Group(sql.FieldNotIn(FieldFallbackGroupID, vs...)) +} + +// FallbackGroupIDGT applies the GT predicate on the "fallback_group_id" field. +func FallbackGroupIDGT(v int64) predicate.Group { + return predicate.Group(sql.FieldGT(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDGTE applies the GTE predicate on the "fallback_group_id" field. +func FallbackGroupIDGTE(v int64) predicate.Group { + return predicate.Group(sql.FieldGTE(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDLT applies the LT predicate on the "fallback_group_id" field. +func FallbackGroupIDLT(v int64) predicate.Group { + return predicate.Group(sql.FieldLT(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDLTE applies the LTE predicate on the "fallback_group_id" field. +func FallbackGroupIDLTE(v int64) predicate.Group { + return predicate.Group(sql.FieldLTE(FieldFallbackGroupID, v)) +} + +// FallbackGroupIDIsNil applies the IsNil predicate on the "fallback_group_id" field. +func FallbackGroupIDIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldFallbackGroupID)) +} + +// FallbackGroupIDNotNil applies the NotNil predicate on the "fallback_group_id" field. +func FallbackGroupIDNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldFallbackGroupID)) +} + +// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. +func HasAPIKeys() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAPIKeysWith applies the HasEdge predicate on the "api_keys" edge with a given conditions (other predicates). +func HasAPIKeysWith(preds ...predicate.APIKey) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAPIKeysStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasRedeemCodes applies the HasEdge predicate on the "redeem_codes" edge. +func HasRedeemCodes() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRedeemCodesWith applies the HasEdge predicate on the "redeem_codes" edge with a given conditions (other predicates). +func HasRedeemCodesWith(preds ...predicate.RedeemCode) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newRedeemCodesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSubscriptions applies the HasEdge predicate on the "subscriptions" edge. +func HasSubscriptions() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSubscriptionsWith applies the HasEdge predicate on the "subscriptions" edge with a given conditions (other predicates). +func HasSubscriptionsWith(preds ...predicate.UserSubscription) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge. +func HasUsageLogs() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates). +func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newUsageLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccounts applies the HasEdge predicate on the "accounts" edge. +func HasAccounts() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AccountsTable, AccountsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountsWith applies the HasEdge predicate on the "accounts" edge with a given conditions (other predicates). +func HasAccountsWith(preds ...predicate.Account) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAccountsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAllowedUsers applies the HasEdge predicate on the "allowed_users" edge. +func HasAllowedUsers() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, AllowedUsersTable, AllowedUsersPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowedUsersWith applies the HasEdge predicate on the "allowed_users" edge with a given conditions (other predicates). +func HasAllowedUsersWith(preds ...predicate.User) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAllowedUsersStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccountGroups applies the HasEdge predicate on the "account_groups" edge. +func HasAccountGroups() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountGroupsWith applies the HasEdge predicate on the "account_groups" edge with a given conditions (other predicates). +func HasAccountGroupsWith(preds ...predicate.AccountGroup) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newAccountGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge. +func HasUserAllowedGroups() predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserAllowedGroupsWith applies the HasEdge predicate on the "user_allowed_groups" edge with a given conditions (other predicates). +func HasUserAllowedGroupsWith(preds ...predicate.UserAllowedGroup) predicate.Group { + return predicate.Group(func(s *sql.Selector) { + step := newUserAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Group) predicate.Group { + return predicate.Group(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Group) predicate.Group { + return predicate.Group(sql.NotPredicates(p)) +} diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go new file mode 100644 index 00000000..59229402 --- /dev/null +++ b/backend/ent/group_create.go @@ -0,0 +1,2129 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupCreate is the builder for creating a Group entity. +type GroupCreate struct { + config + mutation *GroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *GroupCreate) SetCreatedAt(v time.Time) *GroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableCreatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *GroupCreate) SetUpdatedAt(v time.Time) *GroupCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableUpdatedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *GroupCreate) SetDeletedAt(v time.Time) *GroupCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDeletedAt(v *time.Time) *GroupCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *GroupCreate) SetName(v string) *GroupCreate { + _c.mutation.SetName(v) + return _c +} + +// SetDescription sets the "description" field. +func (_c *GroupCreate) SetDescription(v string) *GroupCreate { + _c.mutation.SetDescription(v) + return _c +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDescription(v *string) *GroupCreate { + if v != nil { + _c.SetDescription(*v) + } + return _c +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_c *GroupCreate) SetRateMultiplier(v float64) *GroupCreate { + _c.mutation.SetRateMultiplier(v) + return _c +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_c *GroupCreate) SetNillableRateMultiplier(v *float64) *GroupCreate { + if v != nil { + _c.SetRateMultiplier(*v) + } + return _c +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_c *GroupCreate) SetIsExclusive(v bool) *GroupCreate { + _c.mutation.SetIsExclusive(v) + return _c +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_c *GroupCreate) SetNillableIsExclusive(v *bool) *GroupCreate { + if v != nil { + _c.SetIsExclusive(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *GroupCreate) SetStatus(v string) *GroupCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *GroupCreate) SetNillableStatus(v *string) *GroupCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetPlatform sets the "platform" field. +func (_c *GroupCreate) SetPlatform(v string) *GroupCreate { + _c.mutation.SetPlatform(v) + return _c +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_c *GroupCreate) SetNillablePlatform(v *string) *GroupCreate { + if v != nil { + _c.SetPlatform(*v) + } + return _c +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_c *GroupCreate) SetSubscriptionType(v string) *GroupCreate { + _c.mutation.SetSubscriptionType(v) + return _c +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_c *GroupCreate) SetNillableSubscriptionType(v *string) *GroupCreate { + if v != nil { + _c.SetSubscriptionType(*v) + } + return _c +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_c *GroupCreate) SetDailyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetDailyLimitUsd(v) + return _c +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDailyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetDailyLimitUsd(*v) + } + return _c +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_c *GroupCreate) SetWeeklyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetWeeklyLimitUsd(v) + return _c +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableWeeklyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetWeeklyLimitUsd(*v) + } + return _c +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_c *GroupCreate) SetMonthlyLimitUsd(v float64) *GroupCreate { + _c.mutation.SetMonthlyLimitUsd(v) + return _c +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_c *GroupCreate) SetNillableMonthlyLimitUsd(v *float64) *GroupCreate { + if v != nil { + _c.SetMonthlyLimitUsd(*v) + } + return _c +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (_c *GroupCreate) SetDefaultValidityDays(v int) *GroupCreate { + _c.mutation.SetDefaultValidityDays(v) + return _c +} + +// SetNillableDefaultValidityDays sets the "default_validity_days" field if the given value is not nil. +func (_c *GroupCreate) SetNillableDefaultValidityDays(v *int) *GroupCreate { + if v != nil { + _c.SetDefaultValidityDays(*v) + } + return _c +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (_c *GroupCreate) SetImagePrice1k(v float64) *GroupCreate { + _c.mutation.SetImagePrice1k(v) + return _c +} + +// SetNillableImagePrice1k sets the "image_price_1k" field if the given value is not nil. +func (_c *GroupCreate) SetNillableImagePrice1k(v *float64) *GroupCreate { + if v != nil { + _c.SetImagePrice1k(*v) + } + return _c +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (_c *GroupCreate) SetImagePrice2k(v float64) *GroupCreate { + _c.mutation.SetImagePrice2k(v) + return _c +} + +// SetNillableImagePrice2k sets the "image_price_2k" field if the given value is not nil. +func (_c *GroupCreate) SetNillableImagePrice2k(v *float64) *GroupCreate { + if v != nil { + _c.SetImagePrice2k(*v) + } + return _c +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (_c *GroupCreate) SetImagePrice4k(v float64) *GroupCreate { + _c.mutation.SetImagePrice4k(v) + return _c +} + +// SetNillableImagePrice4k sets the "image_price_4k" field if the given value is not nil. +func (_c *GroupCreate) SetNillableImagePrice4k(v *float64) *GroupCreate { + if v != nil { + _c.SetImagePrice4k(*v) + } + return _c +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (_c *GroupCreate) SetClaudeCodeOnly(v bool) *GroupCreate { + _c.mutation.SetClaudeCodeOnly(v) + return _c +} + +// SetNillableClaudeCodeOnly sets the "claude_code_only" field if the given value is not nil. +func (_c *GroupCreate) SetNillableClaudeCodeOnly(v *bool) *GroupCreate { + if v != nil { + _c.SetClaudeCodeOnly(*v) + } + return _c +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (_c *GroupCreate) SetFallbackGroupID(v int64) *GroupCreate { + _c.mutation.SetFallbackGroupID(v) + return _c +} + +// SetNillableFallbackGroupID sets the "fallback_group_id" field if the given value is not nil. +func (_c *GroupCreate) SetNillableFallbackGroupID(v *int64) *GroupCreate { + if v != nil { + _c.SetFallbackGroupID(*v) + } + return _c +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAPIKeyIDs(ids...) + return _c +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_c *GroupCreate) AddAPIKeys(v ...*APIKey) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_c *GroupCreate) AddRedeemCodeIDs(ids ...int64) *GroupCreate { + _c.mutation.AddRedeemCodeIDs(ids...) + return _c +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_c *GroupCreate) AddRedeemCodes(v ...*RedeemCode) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_c *GroupCreate) AddSubscriptionIDs(ids ...int64) *GroupCreate { + _c.mutation.AddSubscriptionIDs(ids...) + return _c +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_c *GroupCreate) AddSubscriptions(v ...*UserSubscription) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddSubscriptionIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_c *GroupCreate) AddUsageLogIDs(ids ...int64) *GroupCreate { + _c.mutation.AddUsageLogIDs(ids...) + return _c +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_c *GroupCreate) AddUsageLogs(v ...*UsageLog) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageLogIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_c *GroupCreate) AddAccountIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAccountIDs(ids...) + return _c +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_c *GroupCreate) AddAccounts(v ...*Account) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_c *GroupCreate) AddAllowedUserIDs(ids ...int64) *GroupCreate { + _c.mutation.AddAllowedUserIDs(ids...) + return _c +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_c *GroupCreate) AddAllowedUsers(v ...*User) *GroupCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_c *GroupCreate) Mutation() *GroupMutation { + return _c.mutation +} + +// Save creates the Group in the database. +func (_c *GroupCreate) Save(ctx context.Context) (*Group, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *GroupCreate) SaveX(ctx context.Context) *Group { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *GroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *GroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *GroupCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if group.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized group.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := group.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if group.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + v := group.DefaultRateMultiplier + _c.mutation.SetRateMultiplier(v) + } + if _, ok := _c.mutation.IsExclusive(); !ok { + v := group.DefaultIsExclusive + _c.mutation.SetIsExclusive(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := group.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.Platform(); !ok { + v := group.DefaultPlatform + _c.mutation.SetPlatform(v) + } + if _, ok := _c.mutation.SubscriptionType(); !ok { + v := group.DefaultSubscriptionType + _c.mutation.SetSubscriptionType(v) + } + if _, ok := _c.mutation.DefaultValidityDays(); !ok { + v := group.DefaultDefaultValidityDays + _c.mutation.SetDefaultValidityDays(v) + } + if _, ok := _c.mutation.ClaudeCodeOnly(); !ok { + v := group.DefaultClaudeCodeOnly + _c.mutation.SetClaudeCodeOnly(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *GroupCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Group.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Group.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Group.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "Group.rate_multiplier"`)} + } + if _, ok := _c.mutation.IsExclusive(); !ok { + return &ValidationError{Name: "is_exclusive", err: errors.New(`ent: missing required field "Group.is_exclusive"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Group.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if _, ok := _c.mutation.Platform(); !ok { + return &ValidationError{Name: "platform", err: errors.New(`ent: missing required field "Group.platform"`)} + } + if v, ok := _c.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if _, ok := _c.mutation.SubscriptionType(); !ok { + return &ValidationError{Name: "subscription_type", err: errors.New(`ent: missing required field "Group.subscription_type"`)} + } + if v, ok := _c.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + if _, ok := _c.mutation.DefaultValidityDays(); !ok { + return &ValidationError{Name: "default_validity_days", err: errors.New(`ent: missing required field "Group.default_validity_days"`)} + } + if _, ok := _c.mutation.ClaudeCodeOnly(); !ok { + return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)} + } + return nil +} + +func (_c *GroupCreate) sqlSave(ctx context.Context) (*Group, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { + var ( + _node = &Group{config: _c.config} + _spec = sqlgraph.NewCreateSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(group.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + _node.Description = &value + } + if value, ok := _c.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + _node.RateMultiplier = value + } + if value, ok := _c.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + _node.IsExclusive = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + _node.Platform = value + } + if value, ok := _c.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + _node.SubscriptionType = value + } + if value, ok := _c.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + _node.DailyLimitUsd = &value + } + if value, ok := _c.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + _node.WeeklyLimitUsd = &value + } + if value, ok := _c.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + _node.MonthlyLimitUsd = &value + } + if value, ok := _c.mutation.DefaultValidityDays(); ok { + _spec.SetField(group.FieldDefaultValidityDays, field.TypeInt, value) + _node.DefaultValidityDays = value + } + if value, ok := _c.mutation.ImagePrice1k(); ok { + _spec.SetField(group.FieldImagePrice1k, field.TypeFloat64, value) + _node.ImagePrice1k = &value + } + if value, ok := _c.mutation.ImagePrice2k(); ok { + _spec.SetField(group.FieldImagePrice2k, field.TypeFloat64, value) + _node.ImagePrice2k = &value + } + if value, ok := _c.mutation.ImagePrice4k(); ok { + _spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value) + _node.ImagePrice4k = &value + } + if value, ok := _c.mutation.ClaudeCodeOnly(); ok { + _spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value) + _node.ClaudeCodeOnly = value + } + if value, ok := _c.mutation.FallbackGroupID(); ok { + _spec.SetField(group.FieldFallbackGroupID, field.TypeInt64, value) + _node.FallbackGroupID = &value + } + if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _c.config, mutation: newAccountGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _c.config, mutation: newUserAllowedGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Group.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GroupUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *GroupCreate) OnConflict(opts ...sql.ConflictOption) *GroupUpsertOne { + _c.conflict = opts + return &GroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *GroupCreate) OnConflictColumns(columns ...string) *GroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &GroupUpsertOne{ + create: _c, + } +} + +type ( + // GroupUpsertOne is the builder for "upsert"-ing + // one Group node. + GroupUpsertOne struct { + create *GroupCreate + } + + // GroupUpsert is the "OnConflict" setter. + GroupUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsert) SetUpdatedAt(v time.Time) *GroupUpsert { + u.Set(group.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsert) UpdateUpdatedAt() *GroupUpsert { + u.SetExcluded(group.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsert) SetDeletedAt(v time.Time) *GroupUpsert { + u.Set(group.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDeletedAt() *GroupUpsert { + u.SetExcluded(group.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsert) ClearDeletedAt() *GroupUpsert { + u.SetNull(group.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *GroupUpsert) SetName(v string) *GroupUpsert { + u.Set(group.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsert) UpdateName() *GroupUpsert { + u.SetExcluded(group.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *GroupUpsert) SetDescription(v string) *GroupUpsert { + u.Set(group.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDescription() *GroupUpsert { + u.SetExcluded(group.FieldDescription) + return u +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsert) ClearDescription() *GroupUpsert { + u.SetNull(group.FieldDescription) + return u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsert) SetRateMultiplier(v float64) *GroupUpsert { + u.Set(group.FieldRateMultiplier, v) + return u +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsert) UpdateRateMultiplier() *GroupUpsert { + u.SetExcluded(group.FieldRateMultiplier) + return u +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsert) AddRateMultiplier(v float64) *GroupUpsert { + u.Add(group.FieldRateMultiplier, v) + return u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsert) SetIsExclusive(v bool) *GroupUpsert { + u.Set(group.FieldIsExclusive, v) + return u +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsert) UpdateIsExclusive() *GroupUpsert { + u.SetExcluded(group.FieldIsExclusive) + return u +} + +// SetStatus sets the "status" field. +func (u *GroupUpsert) SetStatus(v string) *GroupUpsert { + u.Set(group.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsert) UpdateStatus() *GroupUpsert { + u.SetExcluded(group.FieldStatus) + return u +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsert) SetPlatform(v string) *GroupUpsert { + u.Set(group.FieldPlatform, v) + return u +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsert) UpdatePlatform() *GroupUpsert { + u.SetExcluded(group.FieldPlatform) + return u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsert) SetSubscriptionType(v string) *GroupUpsert { + u.Set(group.FieldSubscriptionType, v) + return u +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsert) UpdateSubscriptionType() *GroupUpsert { + u.SetExcluded(group.FieldSubscriptionType) + return u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsert) SetDailyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldDailyLimitUsd, v) + return u +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDailyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldDailyLimitUsd) + return u +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsert) AddDailyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldDailyLimitUsd, v) + return u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsert) ClearDailyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldDailyLimitUsd) + return u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsert) SetWeeklyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldWeeklyLimitUsd, v) + return u +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateWeeklyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldWeeklyLimitUsd) + return u +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsert) AddWeeklyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldWeeklyLimitUsd, v) + return u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsert) ClearWeeklyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldWeeklyLimitUsd) + return u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsert) SetMonthlyLimitUsd(v float64) *GroupUpsert { + u.Set(group.FieldMonthlyLimitUsd, v) + return u +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsert) UpdateMonthlyLimitUsd() *GroupUpsert { + u.SetExcluded(group.FieldMonthlyLimitUsd) + return u +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsert) AddMonthlyLimitUsd(v float64) *GroupUpsert { + u.Add(group.FieldMonthlyLimitUsd, v) + return u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsert) ClearMonthlyLimitUsd() *GroupUpsert { + u.SetNull(group.FieldMonthlyLimitUsd) + return u +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (u *GroupUpsert) SetDefaultValidityDays(v int) *GroupUpsert { + u.Set(group.FieldDefaultValidityDays, v) + return u +} + +// UpdateDefaultValidityDays sets the "default_validity_days" field to the value that was provided on create. +func (u *GroupUpsert) UpdateDefaultValidityDays() *GroupUpsert { + u.SetExcluded(group.FieldDefaultValidityDays) + return u +} + +// AddDefaultValidityDays adds v to the "default_validity_days" field. +func (u *GroupUpsert) AddDefaultValidityDays(v int) *GroupUpsert { + u.Add(group.FieldDefaultValidityDays, v) + return u +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (u *GroupUpsert) SetImagePrice1k(v float64) *GroupUpsert { + u.Set(group.FieldImagePrice1k, v) + return u +} + +// UpdateImagePrice1k sets the "image_price_1k" field to the value that was provided on create. +func (u *GroupUpsert) UpdateImagePrice1k() *GroupUpsert { + u.SetExcluded(group.FieldImagePrice1k) + return u +} + +// AddImagePrice1k adds v to the "image_price_1k" field. +func (u *GroupUpsert) AddImagePrice1k(v float64) *GroupUpsert { + u.Add(group.FieldImagePrice1k, v) + return u +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (u *GroupUpsert) ClearImagePrice1k() *GroupUpsert { + u.SetNull(group.FieldImagePrice1k) + return u +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (u *GroupUpsert) SetImagePrice2k(v float64) *GroupUpsert { + u.Set(group.FieldImagePrice2k, v) + return u +} + +// UpdateImagePrice2k sets the "image_price_2k" field to the value that was provided on create. +func (u *GroupUpsert) UpdateImagePrice2k() *GroupUpsert { + u.SetExcluded(group.FieldImagePrice2k) + return u +} + +// AddImagePrice2k adds v to the "image_price_2k" field. +func (u *GroupUpsert) AddImagePrice2k(v float64) *GroupUpsert { + u.Add(group.FieldImagePrice2k, v) + return u +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (u *GroupUpsert) ClearImagePrice2k() *GroupUpsert { + u.SetNull(group.FieldImagePrice2k) + return u +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (u *GroupUpsert) SetImagePrice4k(v float64) *GroupUpsert { + u.Set(group.FieldImagePrice4k, v) + return u +} + +// UpdateImagePrice4k sets the "image_price_4k" field to the value that was provided on create. +func (u *GroupUpsert) UpdateImagePrice4k() *GroupUpsert { + u.SetExcluded(group.FieldImagePrice4k) + return u +} + +// AddImagePrice4k adds v to the "image_price_4k" field. +func (u *GroupUpsert) AddImagePrice4k(v float64) *GroupUpsert { + u.Add(group.FieldImagePrice4k, v) + return u +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (u *GroupUpsert) ClearImagePrice4k() *GroupUpsert { + u.SetNull(group.FieldImagePrice4k) + return u +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (u *GroupUpsert) SetClaudeCodeOnly(v bool) *GroupUpsert { + u.Set(group.FieldClaudeCodeOnly, v) + return u +} + +// UpdateClaudeCodeOnly sets the "claude_code_only" field to the value that was provided on create. +func (u *GroupUpsert) UpdateClaudeCodeOnly() *GroupUpsert { + u.SetExcluded(group.FieldClaudeCodeOnly) + return u +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (u *GroupUpsert) SetFallbackGroupID(v int64) *GroupUpsert { + u.Set(group.FieldFallbackGroupID, v) + return u +} + +// UpdateFallbackGroupID sets the "fallback_group_id" field to the value that was provided on create. +func (u *GroupUpsert) UpdateFallbackGroupID() *GroupUpsert { + u.SetExcluded(group.FieldFallbackGroupID) + return u +} + +// AddFallbackGroupID adds v to the "fallback_group_id" field. +func (u *GroupUpsert) AddFallbackGroupID(v int64) *GroupUpsert { + u.Add(group.FieldFallbackGroupID, v) + return u +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (u *GroupUpsert) ClearFallbackGroupID() *GroupUpsert { + u.SetNull(group.FieldFallbackGroupID) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *GroupUpsertOne) UpdateNewValues() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(group.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GroupUpsertOne) Ignore() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GroupUpsertOne) DoNothing() *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GroupCreate.OnConflict +// documentation for more info. +func (u *GroupUpsertOne) Update(set func(*GroupUpsert)) *GroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsertOne) SetUpdatedAt(v time.Time) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateUpdatedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsertOne) SetDeletedAt(v time.Time) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDeletedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsertOne) ClearDeletedAt() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *GroupUpsertOne) SetName(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateName() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *GroupUpsertOne) SetDescription(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDescription() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsertOne) ClearDescription() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDescription() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsertOne) SetRateMultiplier(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsertOne) AddRateMultiplier(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateRateMultiplier() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsertOne) SetIsExclusive(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetIsExclusive(v) + }) +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateIsExclusive() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateIsExclusive() + }) +} + +// SetStatus sets the "status" field. +func (u *GroupUpsertOne) SetStatus(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateStatus() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateStatus() + }) +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsertOne) SetPlatform(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdatePlatform() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdatePlatform() + }) +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsertOne) SetSubscriptionType(v string) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetSubscriptionType(v) + }) +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateSubscriptionType() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateSubscriptionType() + }) +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsertOne) SetDailyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDailyLimitUsd(v) + }) +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsertOne) AddDailyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddDailyLimitUsd(v) + }) +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDailyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDailyLimitUsd() + }) +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsertOne) ClearDailyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearDailyLimitUsd() + }) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsertOne) SetWeeklyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetWeeklyLimitUsd(v) + }) +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsertOne) AddWeeklyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddWeeklyLimitUsd(v) + }) +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateWeeklyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateWeeklyLimitUsd() + }) +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsertOne) ClearWeeklyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearWeeklyLimitUsd() + }) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsertOne) SetMonthlyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetMonthlyLimitUsd(v) + }) +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsertOne) AddMonthlyLimitUsd(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddMonthlyLimitUsd(v) + }) +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateMonthlyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateMonthlyLimitUsd() + }) +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsertOne) ClearMonthlyLimitUsd() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearMonthlyLimitUsd() + }) +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (u *GroupUpsertOne) SetDefaultValidityDays(v int) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetDefaultValidityDays(v) + }) +} + +// AddDefaultValidityDays adds v to the "default_validity_days" field. +func (u *GroupUpsertOne) AddDefaultValidityDays(v int) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddDefaultValidityDays(v) + }) +} + +// UpdateDefaultValidityDays sets the "default_validity_days" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateDefaultValidityDays() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateDefaultValidityDays() + }) +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (u *GroupUpsertOne) SetImagePrice1k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice1k(v) + }) +} + +// AddImagePrice1k adds v to the "image_price_1k" field. +func (u *GroupUpsertOne) AddImagePrice1k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice1k(v) + }) +} + +// UpdateImagePrice1k sets the "image_price_1k" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateImagePrice1k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice1k() + }) +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (u *GroupUpsertOne) ClearImagePrice1k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice1k() + }) +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (u *GroupUpsertOne) SetImagePrice2k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice2k(v) + }) +} + +// AddImagePrice2k adds v to the "image_price_2k" field. +func (u *GroupUpsertOne) AddImagePrice2k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice2k(v) + }) +} + +// UpdateImagePrice2k sets the "image_price_2k" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateImagePrice2k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice2k() + }) +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (u *GroupUpsertOne) ClearImagePrice2k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice2k() + }) +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (u *GroupUpsertOne) SetImagePrice4k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice4k(v) + }) +} + +// AddImagePrice4k adds v to the "image_price_4k" field. +func (u *GroupUpsertOne) AddImagePrice4k(v float64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice4k(v) + }) +} + +// UpdateImagePrice4k sets the "image_price_4k" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateImagePrice4k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice4k() + }) +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (u *GroupUpsertOne) ClearImagePrice4k() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice4k() + }) +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (u *GroupUpsertOne) SetClaudeCodeOnly(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetClaudeCodeOnly(v) + }) +} + +// UpdateClaudeCodeOnly sets the "claude_code_only" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateClaudeCodeOnly() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateClaudeCodeOnly() + }) +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (u *GroupUpsertOne) SetFallbackGroupID(v int64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetFallbackGroupID(v) + }) +} + +// AddFallbackGroupID adds v to the "fallback_group_id" field. +func (u *GroupUpsertOne) AddFallbackGroupID(v int64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.AddFallbackGroupID(v) + }) +} + +// UpdateFallbackGroupID sets the "fallback_group_id" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateFallbackGroupID() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateFallbackGroupID() + }) +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (u *GroupUpsertOne) ClearFallbackGroupID() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearFallbackGroupID() + }) +} + +// Exec executes the query. +func (u *GroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *GroupUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *GroupUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// GroupCreateBulk is the builder for creating many Group entities in bulk. +type GroupCreateBulk struct { + config + err error + builders []*GroupCreate + conflict []sql.ConflictOption +} + +// Save creates the Group entities in the database. +func (_c *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Group, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*GroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *GroupCreateBulk) SaveX(ctx context.Context) []*Group { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *GroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *GroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Group.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.GroupUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *GroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *GroupUpsertBulk { + _c.conflict = opts + return &GroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *GroupCreateBulk) OnConflictColumns(columns ...string) *GroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &GroupUpsertBulk{ + create: _c, + } +} + +// GroupUpsertBulk is the builder for "upsert"-ing +// a bulk of Group nodes. +type GroupUpsertBulk struct { + create *GroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *GroupUpsertBulk) UpdateNewValues() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(group.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Group.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *GroupUpsertBulk) Ignore() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *GroupUpsertBulk) DoNothing() *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the GroupCreateBulk.OnConflict +// documentation for more info. +func (u *GroupUpsertBulk) Update(set func(*GroupUpsert)) *GroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&GroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *GroupUpsertBulk) SetUpdatedAt(v time.Time) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateUpdatedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *GroupUpsertBulk) SetDeletedAt(v time.Time) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDeletedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *GroupUpsertBulk) ClearDeletedAt() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *GroupUpsertBulk) SetName(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateName() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *GroupUpsertBulk) SetDescription(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDescription() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *GroupUpsertBulk) ClearDescription() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDescription() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *GroupUpsertBulk) SetRateMultiplier(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *GroupUpsertBulk) AddRateMultiplier(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateRateMultiplier() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetIsExclusive sets the "is_exclusive" field. +func (u *GroupUpsertBulk) SetIsExclusive(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetIsExclusive(v) + }) +} + +// UpdateIsExclusive sets the "is_exclusive" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateIsExclusive() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateIsExclusive() + }) +} + +// SetStatus sets the "status" field. +func (u *GroupUpsertBulk) SetStatus(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateStatus() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateStatus() + }) +} + +// SetPlatform sets the "platform" field. +func (u *GroupUpsertBulk) SetPlatform(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetPlatform(v) + }) +} + +// UpdatePlatform sets the "platform" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdatePlatform() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdatePlatform() + }) +} + +// SetSubscriptionType sets the "subscription_type" field. +func (u *GroupUpsertBulk) SetSubscriptionType(v string) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetSubscriptionType(v) + }) +} + +// UpdateSubscriptionType sets the "subscription_type" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateSubscriptionType() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateSubscriptionType() + }) +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (u *GroupUpsertBulk) SetDailyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDailyLimitUsd(v) + }) +} + +// AddDailyLimitUsd adds v to the "daily_limit_usd" field. +func (u *GroupUpsertBulk) AddDailyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddDailyLimitUsd(v) + }) +} + +// UpdateDailyLimitUsd sets the "daily_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDailyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDailyLimitUsd() + }) +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (u *GroupUpsertBulk) ClearDailyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearDailyLimitUsd() + }) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) SetWeeklyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetWeeklyLimitUsd(v) + }) +} + +// AddWeeklyLimitUsd adds v to the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) AddWeeklyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddWeeklyLimitUsd(v) + }) +} + +// UpdateWeeklyLimitUsd sets the "weekly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateWeeklyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateWeeklyLimitUsd() + }) +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (u *GroupUpsertBulk) ClearWeeklyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearWeeklyLimitUsd() + }) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) SetMonthlyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetMonthlyLimitUsd(v) + }) +} + +// AddMonthlyLimitUsd adds v to the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) AddMonthlyLimitUsd(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddMonthlyLimitUsd(v) + }) +} + +// UpdateMonthlyLimitUsd sets the "monthly_limit_usd" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateMonthlyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateMonthlyLimitUsd() + }) +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (u *GroupUpsertBulk) ClearMonthlyLimitUsd() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearMonthlyLimitUsd() + }) +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (u *GroupUpsertBulk) SetDefaultValidityDays(v int) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetDefaultValidityDays(v) + }) +} + +// AddDefaultValidityDays adds v to the "default_validity_days" field. +func (u *GroupUpsertBulk) AddDefaultValidityDays(v int) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddDefaultValidityDays(v) + }) +} + +// UpdateDefaultValidityDays sets the "default_validity_days" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateDefaultValidityDays() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateDefaultValidityDays() + }) +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (u *GroupUpsertBulk) SetImagePrice1k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice1k(v) + }) +} + +// AddImagePrice1k adds v to the "image_price_1k" field. +func (u *GroupUpsertBulk) AddImagePrice1k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice1k(v) + }) +} + +// UpdateImagePrice1k sets the "image_price_1k" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateImagePrice1k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice1k() + }) +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (u *GroupUpsertBulk) ClearImagePrice1k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice1k() + }) +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (u *GroupUpsertBulk) SetImagePrice2k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice2k(v) + }) +} + +// AddImagePrice2k adds v to the "image_price_2k" field. +func (u *GroupUpsertBulk) AddImagePrice2k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice2k(v) + }) +} + +// UpdateImagePrice2k sets the "image_price_2k" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateImagePrice2k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice2k() + }) +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (u *GroupUpsertBulk) ClearImagePrice2k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice2k() + }) +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (u *GroupUpsertBulk) SetImagePrice4k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetImagePrice4k(v) + }) +} + +// AddImagePrice4k adds v to the "image_price_4k" field. +func (u *GroupUpsertBulk) AddImagePrice4k(v float64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddImagePrice4k(v) + }) +} + +// UpdateImagePrice4k sets the "image_price_4k" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateImagePrice4k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateImagePrice4k() + }) +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (u *GroupUpsertBulk) ClearImagePrice4k() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearImagePrice4k() + }) +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (u *GroupUpsertBulk) SetClaudeCodeOnly(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetClaudeCodeOnly(v) + }) +} + +// UpdateClaudeCodeOnly sets the "claude_code_only" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateClaudeCodeOnly() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateClaudeCodeOnly() + }) +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (u *GroupUpsertBulk) SetFallbackGroupID(v int64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetFallbackGroupID(v) + }) +} + +// AddFallbackGroupID adds v to the "fallback_group_id" field. +func (u *GroupUpsertBulk) AddFallbackGroupID(v int64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.AddFallbackGroupID(v) + }) +} + +// UpdateFallbackGroupID sets the "fallback_group_id" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateFallbackGroupID() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateFallbackGroupID() + }) +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (u *GroupUpsertBulk) ClearFallbackGroupID() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearFallbackGroupID() + }) +} + +// Exec executes the query. +func (u *GroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the GroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for GroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *GroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/group_delete.go b/backend/ent/group_delete.go new file mode 100644 index 00000000..6587466f --- /dev/null +++ b/backend/ent/group_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// GroupDelete is the builder for deleting a Group entity. +type GroupDelete struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupDelete builder. +func (_d *GroupDelete) Where(ps ...predicate.Group) *GroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *GroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *GroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *GroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// GroupDeleteOne is the builder for deleting a single Group entity. +type GroupDeleteOne struct { + _d *GroupDelete +} + +// Where appends a list predicates to the GroupDelete builder. +func (_d *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *GroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{group.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *GroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/group_query.go b/backend/ent/group_query.go new file mode 100644 index 00000000..d4cc4f8d --- /dev/null +++ b/backend/ent/group_query.go @@ -0,0 +1,1232 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupQuery is the builder for querying Group entities. +type GroupQuery struct { + config + ctx *QueryContext + order []group.OrderOption + inters []Interceptor + predicates []predicate.Group + withAPIKeys *APIKeyQuery + withRedeemCodes *RedeemCodeQuery + withSubscriptions *UserSubscriptionQuery + withUsageLogs *UsageLogQuery + withAccounts *AccountQuery + withAllowedUsers *UserQuery + withAccountGroups *AccountGroupQuery + withUserAllowedGroups *UserAllowedGroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the GroupQuery builder. +func (_q *GroupQuery) Where(ps ...predicate.Group) *GroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *GroupQuery) Limit(limit int) *GroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *GroupQuery) Offset(offset int) *GroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *GroupQuery) Unique(unique bool) *GroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *GroupQuery) Order(o ...group.OrderOption) *GroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAPIKeys chains the current query on the "api_keys" edge. +func (_q *GroupQuery) QueryAPIKeys() *APIKeyQuery { + query := (&APIKeyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.APIKeysTable, group.APIKeysColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryRedeemCodes chains the current query on the "redeem_codes" edge. +func (_q *GroupQuery) QueryRedeemCodes() *RedeemCodeQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.RedeemCodesTable, group.RedeemCodesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySubscriptions chains the current query on the "subscriptions" edge. +func (_q *GroupQuery) QuerySubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.SubscriptionsTable, group.SubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsageLogs chains the current query on the "usage_logs" edge. +func (_q *GroupQuery) QueryUsageLogs() *UsageLogQuery { + query := (&UsageLogClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, group.UsageLogsTable, group.UsageLogsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccounts chains the current query on the "accounts" edge. +func (_q *GroupQuery) QueryAccounts() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AccountsTable, group.AccountsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAllowedUsers chains the current query on the "allowed_users" edge. +func (_q *GroupQuery) QueryAllowedUsers() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, group.AllowedUsersTable, group.AllowedUsersPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccountGroups chains the current query on the "account_groups" edge. +func (_q *GroupQuery) QueryAccountGroups() *AccountGroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(accountgroup.Table, accountgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.AccountGroupsTable, group.AccountGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge. +func (_q *GroupQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(group.Table, group.FieldID, selector), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.GroupColumn), + sqlgraph.Edge(sqlgraph.O2M, true, group.UserAllowedGroupsTable, group.UserAllowedGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Group entity from the query. +// Returns a *NotFoundError when no Group was found. +func (_q *GroupQuery) First(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{group.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *GroupQuery) FirstX(ctx context.Context) *Group { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Group ID from the query. +// Returns a *NotFoundError when no Group ID was found. +func (_q *GroupQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{group.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *GroupQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Group entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Group entity is found. +// Returns a *NotFoundError when no Group entities are found. +func (_q *GroupQuery) Only(ctx context.Context) (*Group, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{group.Label} + default: + return nil, &NotSingularError{group.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *GroupQuery) OnlyX(ctx context.Context) *Group { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Group ID in the query. +// Returns a *NotSingularError when more than one Group ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *GroupQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{group.Label} + default: + err = &NotSingularError{group.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *GroupQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Groups. +func (_q *GroupQuery) All(ctx context.Context) ([]*Group, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Group, *GroupQuery]() + return withInterceptors[[]*Group](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *GroupQuery) AllX(ctx context.Context) []*Group { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Group IDs. +func (_q *GroupQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(group.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *GroupQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *GroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*GroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *GroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *GroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *GroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the GroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *GroupQuery) Clone() *GroupQuery { + if _q == nil { + return nil + } + return &GroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]group.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Group{}, _q.predicates...), + withAPIKeys: _q.withAPIKeys.Clone(), + withRedeemCodes: _q.withRedeemCodes.Clone(), + withSubscriptions: _q.withSubscriptions.Clone(), + withUsageLogs: _q.withUsageLogs.Clone(), + withAccounts: _q.withAccounts.Clone(), + withAllowedUsers: _q.withAllowedUsers.Clone(), + withAccountGroups: _q.withAccountGroups.Clone(), + withUserAllowedGroups: _q.withUserAllowedGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAPIKeys tells the query-builder to eager-load the nodes that are connected to +// the "api_keys" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAPIKeys(opts ...func(*APIKeyQuery)) *GroupQuery { + query := (&APIKeyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAPIKeys = query + return _q +} + +// WithRedeemCodes tells the query-builder to eager-load the nodes that are connected to +// the "redeem_codes" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithRedeemCodes(opts ...func(*RedeemCodeQuery)) *GroupQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withRedeemCodes = query + return _q +} + +// WithSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithSubscriptions(opts ...func(*UserSubscriptionQuery)) *GroupQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withSubscriptions = query + return _q +} + +// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to +// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *GroupQuery { + query := (&UsageLogClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageLogs = query + return _q +} + +// WithAccounts tells the query-builder to eager-load the nodes that are connected to +// the "accounts" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAccounts(opts ...func(*AccountQuery)) *GroupQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccounts = query + return _q +} + +// WithAllowedUsers tells the query-builder to eager-load the nodes that are connected to +// the "allowed_users" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAllowedUsers(opts ...func(*UserQuery)) *GroupQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAllowedUsers = query + return _q +} + +// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to +// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *GroupQuery { + query := (&AccountGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccountGroups = query + return _q +} + +// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *GroupQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *GroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUserAllowedGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Group.Query(). +// GroupBy(group.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *GroupQuery) GroupBy(field string, fields ...string) *GroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &GroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = group.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Group.Query(). +// Select(group.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *GroupQuery) Select(fields ...string) *GroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &GroupSelect{GroupQuery: _q} + sbuild.label = group.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a GroupSelect configured with the given aggregations. +func (_q *GroupQuery) Aggregate(fns ...AggregateFunc) *GroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *GroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !group.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, error) { + var ( + nodes = []*Group{} + _spec = _q.querySpec() + loadedTypes = [8]bool{ + _q.withAPIKeys != nil, + _q.withRedeemCodes != nil, + _q.withSubscriptions != nil, + _q.withUsageLogs != nil, + _q.withAccounts != nil, + _q.withAllowedUsers != nil, + _q.withAccountGroups != nil, + _q.withUserAllowedGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Group).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Group{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAPIKeys; query != nil { + if err := _q.loadAPIKeys(ctx, query, nodes, + func(n *Group) { n.Edges.APIKeys = []*APIKey{} }, + func(n *Group, e *APIKey) { n.Edges.APIKeys = append(n.Edges.APIKeys, e) }); err != nil { + return nil, err + } + } + if query := _q.withRedeemCodes; query != nil { + if err := _q.loadRedeemCodes(ctx, query, nodes, + func(n *Group) { n.Edges.RedeemCodes = []*RedeemCode{} }, + func(n *Group, e *RedeemCode) { n.Edges.RedeemCodes = append(n.Edges.RedeemCodes, e) }); err != nil { + return nil, err + } + } + if query := _q.withSubscriptions; query != nil { + if err := _q.loadSubscriptions(ctx, query, nodes, + func(n *Group) { n.Edges.Subscriptions = []*UserSubscription{} }, + func(n *Group, e *UserSubscription) { n.Edges.Subscriptions = append(n.Edges.Subscriptions, e) }); err != nil { + return nil, err + } + } + if query := _q.withUsageLogs; query != nil { + if err := _q.loadUsageLogs(ctx, query, nodes, + func(n *Group) { n.Edges.UsageLogs = []*UsageLog{} }, + func(n *Group, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccounts; query != nil { + if err := _q.loadAccounts(ctx, query, nodes, + func(n *Group) { n.Edges.Accounts = []*Account{} }, + func(n *Group, e *Account) { n.Edges.Accounts = append(n.Edges.Accounts, e) }); err != nil { + return nil, err + } + } + if query := _q.withAllowedUsers; query != nil { + if err := _q.loadAllowedUsers(ctx, query, nodes, + func(n *Group) { n.Edges.AllowedUsers = []*User{} }, + func(n *Group, e *User) { n.Edges.AllowedUsers = append(n.Edges.AllowedUsers, e) }); err != nil { + return nil, err + } + } + if query := _q.withAccountGroups; query != nil { + if err := _q.loadAccountGroups(ctx, query, nodes, + func(n *Group) { n.Edges.AccountGroups = []*AccountGroup{} }, + func(n *Group, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil { + return nil, err + } + } + if query := _q.withUserAllowedGroups; query != nil { + if err := _q.loadUserAllowedGroups(ctx, query, nodes, + func(n *Group) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} }, + func(n *Group, e *UserAllowedGroup) { n.Edges.UserAllowedGroups = append(n.Edges.UserAllowedGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *GroupQuery) loadAPIKeys(ctx context.Context, query *APIKeyQuery, nodes []*Group, init func(*Group), assign func(*Group, *APIKey)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(apikey.FieldGroupID) + } + query.Where(predicate.APIKey(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.APIKeysColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + if fk == nil { + return fmt.Errorf(`foreign-key "group_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadRedeemCodes(ctx context.Context, query *RedeemCodeQuery, nodes []*Group, init func(*Group), assign func(*Group, *RedeemCode)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(redeemcode.FieldGroupID) + } + query.Where(predicate.RedeemCode(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.RedeemCodesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + if fk == nil { + return fmt.Errorf(`foreign-key "group_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*Group, init func(*Group), assign func(*Group, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldGroupID) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.SubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*Group, init func(*Group), assign func(*Group, *UsageLog)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usagelog.FieldGroupID) + } + query.Where(predicate.UsageLog(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.UsageLogsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + if fk == nil { + return fmt.Errorf(`foreign-key "group_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadAccounts(ctx context.Context, query *AccountQuery, nodes []*Group, init func(*Group), assign func(*Group, *Account)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Group) + nids := make(map[int64]map[*Group]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(group.AccountsTable) + s.Join(joinT).On(s.C(account.FieldID), joinT.C(group.AccountsPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(group.AccountsPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(group.AccountsPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Group]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Account](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "accounts" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *GroupQuery) loadAllowedUsers(ctx context.Context, query *UserQuery, nodes []*Group, init func(*Group), assign func(*Group, *User)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*Group) + nids := make(map[int64]map[*Group]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(group.AllowedUsersTable) + s.Join(joinT).On(s.C(user.FieldID), joinT.C(group.AllowedUsersPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(group.AllowedUsersPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(group.AllowedUsersPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*Group]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*User](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowed_users" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *GroupQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Group, init func(*Group), assign func(*Group, *AccountGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(accountgroup.FieldGroupID) + } + query.Where(predicate.AccountGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.AccountGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} +func (_q *GroupQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*Group, init func(*Group), assign func(*Group, *UserAllowedGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Group) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userallowedgroup.FieldGroupID) + } + query.Where(predicate.UserAllowedGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(group.UserAllowedGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.GroupID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *GroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for i := range fields { + if fields[i] != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(group.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = group.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *GroupQuery) ForUpdate(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *GroupQuery) ForShare(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// GroupGroupBy is the group-by builder for Group entities. +type GroupGroupBy struct { + selector + build *GroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *GroupGroupBy) Aggregate(fns ...AggregateFunc) *GroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *GroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *GroupGroupBy) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// GroupSelect is the builder for selecting fields of Group entities. +type GroupSelect struct { + *GroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *GroupSelect) Aggregate(fns ...AggregateFunc) *GroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *GroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*GroupQuery, *GroupSelect](ctx, _s.GroupQuery, _s, _s.inters, v) +} + +func (_s *GroupSelect) sqlScan(ctx context.Context, root *GroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go new file mode 100644 index 00000000..1a6f15ec --- /dev/null +++ b/backend/ent/group_update.go @@ -0,0 +1,2226 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// GroupUpdate is the builder for updating Group entities. +type GroupUpdate struct { + config + hooks []Hook + mutation *GroupMutation +} + +// Where appends a list predicates to the GroupUpdate builder. +func (_u *GroupUpdate) Where(ps ...predicate.Group) *GroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *GroupUpdate) SetUpdatedAt(v time.Time) *GroupUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *GroupUpdate) SetDeletedAt(v time.Time) *GroupUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDeletedAt(v *time.Time) *GroupUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *GroupUpdate) ClearDeletedAt() *GroupUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *GroupUpdate) SetName(v string) *GroupUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableName(v *string) *GroupUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *GroupUpdate) SetDescription(v string) *GroupUpdate { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDescription(v *string) *GroupUpdate { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// ClearDescription clears the value of the "description" field. +func (_u *GroupUpdate) ClearDescription() *GroupUpdate { + _u.mutation.ClearDescription() + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *GroupUpdate) SetRateMultiplier(v float64) *GroupUpdate { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableRateMultiplier(v *float64) *GroupUpdate { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *GroupUpdate) AddRateMultiplier(v float64) *GroupUpdate { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_u *GroupUpdate) SetIsExclusive(v bool) *GroupUpdate { + _u.mutation.SetIsExclusive(v) + return _u +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableIsExclusive(v *bool) *GroupUpdate { + if v != nil { + _u.SetIsExclusive(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *GroupUpdate) SetStatus(v string) *GroupUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableStatus(v *string) *GroupUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *GroupUpdate) SetPlatform(v string) *GroupUpdate { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *GroupUpdate) SetNillablePlatform(v *string) *GroupUpdate { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_u *GroupUpdate) SetSubscriptionType(v string) *GroupUpdate { + _u.mutation.SetSubscriptionType(v) + return _u +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableSubscriptionType(v *string) *GroupUpdate { + if v != nil { + _u.SetSubscriptionType(*v) + } + return _u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_u *GroupUpdate) SetDailyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetDailyLimitUsd() + _u.mutation.SetDailyLimitUsd(v) + return _u +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDailyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetDailyLimitUsd(*v) + } + return _u +} + +// AddDailyLimitUsd adds value to the "daily_limit_usd" field. +func (_u *GroupUpdate) AddDailyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddDailyLimitUsd(v) + return _u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (_u *GroupUpdate) ClearDailyLimitUsd() *GroupUpdate { + _u.mutation.ClearDailyLimitUsd() + return _u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_u *GroupUpdate) SetWeeklyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetWeeklyLimitUsd() + _u.mutation.SetWeeklyLimitUsd(v) + return _u +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableWeeklyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetWeeklyLimitUsd(*v) + } + return _u +} + +// AddWeeklyLimitUsd adds value to the "weekly_limit_usd" field. +func (_u *GroupUpdate) AddWeeklyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddWeeklyLimitUsd(v) + return _u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (_u *GroupUpdate) ClearWeeklyLimitUsd() *GroupUpdate { + _u.mutation.ClearWeeklyLimitUsd() + return _u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_u *GroupUpdate) SetMonthlyLimitUsd(v float64) *GroupUpdate { + _u.mutation.ResetMonthlyLimitUsd() + _u.mutation.SetMonthlyLimitUsd(v) + return _u +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableMonthlyLimitUsd(v *float64) *GroupUpdate { + if v != nil { + _u.SetMonthlyLimitUsd(*v) + } + return _u +} + +// AddMonthlyLimitUsd adds value to the "monthly_limit_usd" field. +func (_u *GroupUpdate) AddMonthlyLimitUsd(v float64) *GroupUpdate { + _u.mutation.AddMonthlyLimitUsd(v) + return _u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (_u *GroupUpdate) ClearMonthlyLimitUsd() *GroupUpdate { + _u.mutation.ClearMonthlyLimitUsd() + return _u +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (_u *GroupUpdate) SetDefaultValidityDays(v int) *GroupUpdate { + _u.mutation.ResetDefaultValidityDays() + _u.mutation.SetDefaultValidityDays(v) + return _u +} + +// SetNillableDefaultValidityDays sets the "default_validity_days" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableDefaultValidityDays(v *int) *GroupUpdate { + if v != nil { + _u.SetDefaultValidityDays(*v) + } + return _u +} + +// AddDefaultValidityDays adds value to the "default_validity_days" field. +func (_u *GroupUpdate) AddDefaultValidityDays(v int) *GroupUpdate { + _u.mutation.AddDefaultValidityDays(v) + return _u +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (_u *GroupUpdate) SetImagePrice1k(v float64) *GroupUpdate { + _u.mutation.ResetImagePrice1k() + _u.mutation.SetImagePrice1k(v) + return _u +} + +// SetNillableImagePrice1k sets the "image_price_1k" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableImagePrice1k(v *float64) *GroupUpdate { + if v != nil { + _u.SetImagePrice1k(*v) + } + return _u +} + +// AddImagePrice1k adds value to the "image_price_1k" field. +func (_u *GroupUpdate) AddImagePrice1k(v float64) *GroupUpdate { + _u.mutation.AddImagePrice1k(v) + return _u +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (_u *GroupUpdate) ClearImagePrice1k() *GroupUpdate { + _u.mutation.ClearImagePrice1k() + return _u +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (_u *GroupUpdate) SetImagePrice2k(v float64) *GroupUpdate { + _u.mutation.ResetImagePrice2k() + _u.mutation.SetImagePrice2k(v) + return _u +} + +// SetNillableImagePrice2k sets the "image_price_2k" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableImagePrice2k(v *float64) *GroupUpdate { + if v != nil { + _u.SetImagePrice2k(*v) + } + return _u +} + +// AddImagePrice2k adds value to the "image_price_2k" field. +func (_u *GroupUpdate) AddImagePrice2k(v float64) *GroupUpdate { + _u.mutation.AddImagePrice2k(v) + return _u +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (_u *GroupUpdate) ClearImagePrice2k() *GroupUpdate { + _u.mutation.ClearImagePrice2k() + return _u +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (_u *GroupUpdate) SetImagePrice4k(v float64) *GroupUpdate { + _u.mutation.ResetImagePrice4k() + _u.mutation.SetImagePrice4k(v) + return _u +} + +// SetNillableImagePrice4k sets the "image_price_4k" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableImagePrice4k(v *float64) *GroupUpdate { + if v != nil { + _u.SetImagePrice4k(*v) + } + return _u +} + +// AddImagePrice4k adds value to the "image_price_4k" field. +func (_u *GroupUpdate) AddImagePrice4k(v float64) *GroupUpdate { + _u.mutation.AddImagePrice4k(v) + return _u +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (_u *GroupUpdate) ClearImagePrice4k() *GroupUpdate { + _u.mutation.ClearImagePrice4k() + return _u +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (_u *GroupUpdate) SetClaudeCodeOnly(v bool) *GroupUpdate { + _u.mutation.SetClaudeCodeOnly(v) + return _u +} + +// SetNillableClaudeCodeOnly sets the "claude_code_only" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableClaudeCodeOnly(v *bool) *GroupUpdate { + if v != nil { + _u.SetClaudeCodeOnly(*v) + } + return _u +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (_u *GroupUpdate) SetFallbackGroupID(v int64) *GroupUpdate { + _u.mutation.ResetFallbackGroupID() + _u.mutation.SetFallbackGroupID(v) + return _u +} + +// SetNillableFallbackGroupID sets the "fallback_group_id" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableFallbackGroupID(v *int64) *GroupUpdate { + if v != nil { + _u.SetFallbackGroupID(*v) + } + return _u +} + +// AddFallbackGroupID adds value to the "fallback_group_id" field. +func (_u *GroupUpdate) AddFallbackGroupID(v int64) *GroupUpdate { + _u.mutation.AddFallbackGroupID(v) + return _u +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (_u *GroupUpdate) ClearFallbackGroupID() *GroupUpdate { + _u.mutation.ClearFallbackGroupID() + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_u *GroupUpdate) AddAPIKeys(v ...*APIKey) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *GroupUpdate) AddRedeemCodeIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdate) AddRedeemCodes(v ...*RedeemCode) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *GroupUpdate) AddSubscriptionIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdate) AddSubscriptions(v ...*UserSubscription) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *GroupUpdate) AddUsageLogIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *GroupUpdate) AddUsageLogs(v ...*UsageLog) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *GroupUpdate) AddAccountIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *GroupUpdate) AddAccounts(v ...*Account) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_u *GroupUpdate) AddAllowedUserIDs(ids ...int64) *GroupUpdate { + _u.mutation.AddAllowedUserIDs(ids...) + return _u +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_u *GroupUpdate) AddAllowedUsers(v ...*User) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_u *GroupUpdate) Mutation() *GroupMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the APIKey entity. +func (_u *GroupUpdate) ClearAPIKeys() *GroupUpdate { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to APIKey entities by IDs. +func (_u *GroupUpdate) RemoveAPIKeyIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to APIKey entities. +func (_u *GroupUpdate) RemoveAPIKeys(v ...*APIKey) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdate) ClearRedeemCodes() *GroupUpdate { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *GroupUpdate) RemoveRedeemCodeIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *GroupUpdate) RemoveRedeemCodes(v ...*RedeemCode) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdate) ClearSubscriptions() *GroupUpdate { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *GroupUpdate) RemoveSubscriptionIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *GroupUpdate) RemoveSubscriptions(v ...*UserSubscription) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *GroupUpdate) ClearUsageLogs() *GroupUpdate { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *GroupUpdate) RemoveUsageLogIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *GroupUpdate) RemoveUsageLogs(v ...*UsageLog) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *GroupUpdate) ClearAccounts() *GroupUpdate { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *GroupUpdate) RemoveAccountIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *GroupUpdate) RemoveAccounts(v ...*Account) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// ClearAllowedUsers clears all "allowed_users" edges to the User entity. +func (_u *GroupUpdate) ClearAllowedUsers() *GroupUpdate { + _u.mutation.ClearAllowedUsers() + return _u +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to User entities by IDs. +func (_u *GroupUpdate) RemoveAllowedUserIDs(ids ...int64) *GroupUpdate { + _u.mutation.RemoveAllowedUserIDs(ids...) + return _u +} + +// RemoveAllowedUsers removes "allowed_users" edges to User entities. +func (_u *GroupUpdate) RemoveAllowedUsers(v ...*User) *GroupUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedUserIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *GroupUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *GroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *GroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *GroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *GroupUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *GroupUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if v, ok := _u.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + return nil +} + +func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if _u.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + } + if value, ok := _u.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyLimitUsd(); ok { + _spec.AddField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.DailyLimitUsdCleared() { + _spec.ClearField(group.FieldDailyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyLimitUsd(); ok { + _spec.AddField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.WeeklyLimitUsdCleared() { + _spec.ClearField(group.FieldWeeklyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyLimitUsd(); ok { + _spec.AddField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.MonthlyLimitUsdCleared() { + _spec.ClearField(group.FieldMonthlyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.DefaultValidityDays(); ok { + _spec.SetField(group.FieldDefaultValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDefaultValidityDays(); ok { + _spec.AddField(group.FieldDefaultValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.ImagePrice1k(); ok { + _spec.SetField(group.FieldImagePrice1k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice1k(); ok { + _spec.AddField(group.FieldImagePrice1k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice1kCleared() { + _spec.ClearField(group.FieldImagePrice1k, field.TypeFloat64) + } + if value, ok := _u.mutation.ImagePrice2k(); ok { + _spec.SetField(group.FieldImagePrice2k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice2k(); ok { + _spec.AddField(group.FieldImagePrice2k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice2kCleared() { + _spec.ClearField(group.FieldImagePrice2k, field.TypeFloat64) + } + if value, ok := _u.mutation.ImagePrice4k(); ok { + _spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice4k(); ok { + _spec.AddField(group.FieldImagePrice4k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice4kCleared() { + _spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64) + } + if value, ok := _u.mutation.ClaudeCodeOnly(); ok { + _spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value) + } + if value, ok := _u.mutation.FallbackGroupID(); ok { + _spec.SetField(group.FieldFallbackGroupID, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedFallbackGroupID(); ok { + _spec.AddField(group.FieldFallbackGroupID, field.TypeInt64, value) + } + if _u.mutation.FallbackGroupIDCleared() { + _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedUsersIDs(); len(nodes) > 0 && !_u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// GroupUpdateOne is the builder for updating a single Group entity. +type GroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *GroupMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *GroupUpdateOne) SetUpdatedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *GroupUpdateOne) SetDeletedAt(v time.Time) *GroupUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDeletedAt(v *time.Time) *GroupUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *GroupUpdateOne) ClearDeletedAt() *GroupUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *GroupUpdateOne) SetName(v string) *GroupUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableName(v *string) *GroupUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *GroupUpdateOne) SetDescription(v string) *GroupUpdateOne { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDescription(v *string) *GroupUpdateOne { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// ClearDescription clears the value of the "description" field. +func (_u *GroupUpdateOne) ClearDescription() *GroupUpdateOne { + _u.mutation.ClearDescription() + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *GroupUpdateOne) SetRateMultiplier(v float64) *GroupUpdateOne { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableRateMultiplier(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *GroupUpdateOne) AddRateMultiplier(v float64) *GroupUpdateOne { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetIsExclusive sets the "is_exclusive" field. +func (_u *GroupUpdateOne) SetIsExclusive(v bool) *GroupUpdateOne { + _u.mutation.SetIsExclusive(v) + return _u +} + +// SetNillableIsExclusive sets the "is_exclusive" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableIsExclusive(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetIsExclusive(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *GroupUpdateOne) SetStatus(v string) *GroupUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableStatus(v *string) *GroupUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetPlatform sets the "platform" field. +func (_u *GroupUpdateOne) SetPlatform(v string) *GroupUpdateOne { + _u.mutation.SetPlatform(v) + return _u +} + +// SetNillablePlatform sets the "platform" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillablePlatform(v *string) *GroupUpdateOne { + if v != nil { + _u.SetPlatform(*v) + } + return _u +} + +// SetSubscriptionType sets the "subscription_type" field. +func (_u *GroupUpdateOne) SetSubscriptionType(v string) *GroupUpdateOne { + _u.mutation.SetSubscriptionType(v) + return _u +} + +// SetNillableSubscriptionType sets the "subscription_type" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableSubscriptionType(v *string) *GroupUpdateOne { + if v != nil { + _u.SetSubscriptionType(*v) + } + return _u +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (_u *GroupUpdateOne) SetDailyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetDailyLimitUsd() + _u.mutation.SetDailyLimitUsd(v) + return _u +} + +// SetNillableDailyLimitUsd sets the "daily_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDailyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetDailyLimitUsd(*v) + } + return _u +} + +// AddDailyLimitUsd adds value to the "daily_limit_usd" field. +func (_u *GroupUpdateOne) AddDailyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddDailyLimitUsd(v) + return _u +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (_u *GroupUpdateOne) ClearDailyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearDailyLimitUsd() + return _u +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) SetWeeklyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetWeeklyLimitUsd() + _u.mutation.SetWeeklyLimitUsd(v) + return _u +} + +// SetNillableWeeklyLimitUsd sets the "weekly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableWeeklyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetWeeklyLimitUsd(*v) + } + return _u +} + +// AddWeeklyLimitUsd adds value to the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) AddWeeklyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddWeeklyLimitUsd(v) + return _u +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (_u *GroupUpdateOne) ClearWeeklyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearWeeklyLimitUsd() + return _u +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) SetMonthlyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.ResetMonthlyLimitUsd() + _u.mutation.SetMonthlyLimitUsd(v) + return _u +} + +// SetNillableMonthlyLimitUsd sets the "monthly_limit_usd" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableMonthlyLimitUsd(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetMonthlyLimitUsd(*v) + } + return _u +} + +// AddMonthlyLimitUsd adds value to the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) AddMonthlyLimitUsd(v float64) *GroupUpdateOne { + _u.mutation.AddMonthlyLimitUsd(v) + return _u +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (_u *GroupUpdateOne) ClearMonthlyLimitUsd() *GroupUpdateOne { + _u.mutation.ClearMonthlyLimitUsd() + return _u +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (_u *GroupUpdateOne) SetDefaultValidityDays(v int) *GroupUpdateOne { + _u.mutation.ResetDefaultValidityDays() + _u.mutation.SetDefaultValidityDays(v) + return _u +} + +// SetNillableDefaultValidityDays sets the "default_validity_days" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableDefaultValidityDays(v *int) *GroupUpdateOne { + if v != nil { + _u.SetDefaultValidityDays(*v) + } + return _u +} + +// AddDefaultValidityDays adds value to the "default_validity_days" field. +func (_u *GroupUpdateOne) AddDefaultValidityDays(v int) *GroupUpdateOne { + _u.mutation.AddDefaultValidityDays(v) + return _u +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (_u *GroupUpdateOne) SetImagePrice1k(v float64) *GroupUpdateOne { + _u.mutation.ResetImagePrice1k() + _u.mutation.SetImagePrice1k(v) + return _u +} + +// SetNillableImagePrice1k sets the "image_price_1k" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableImagePrice1k(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetImagePrice1k(*v) + } + return _u +} + +// AddImagePrice1k adds value to the "image_price_1k" field. +func (_u *GroupUpdateOne) AddImagePrice1k(v float64) *GroupUpdateOne { + _u.mutation.AddImagePrice1k(v) + return _u +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (_u *GroupUpdateOne) ClearImagePrice1k() *GroupUpdateOne { + _u.mutation.ClearImagePrice1k() + return _u +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (_u *GroupUpdateOne) SetImagePrice2k(v float64) *GroupUpdateOne { + _u.mutation.ResetImagePrice2k() + _u.mutation.SetImagePrice2k(v) + return _u +} + +// SetNillableImagePrice2k sets the "image_price_2k" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableImagePrice2k(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetImagePrice2k(*v) + } + return _u +} + +// AddImagePrice2k adds value to the "image_price_2k" field. +func (_u *GroupUpdateOne) AddImagePrice2k(v float64) *GroupUpdateOne { + _u.mutation.AddImagePrice2k(v) + return _u +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (_u *GroupUpdateOne) ClearImagePrice2k() *GroupUpdateOne { + _u.mutation.ClearImagePrice2k() + return _u +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (_u *GroupUpdateOne) SetImagePrice4k(v float64) *GroupUpdateOne { + _u.mutation.ResetImagePrice4k() + _u.mutation.SetImagePrice4k(v) + return _u +} + +// SetNillableImagePrice4k sets the "image_price_4k" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableImagePrice4k(v *float64) *GroupUpdateOne { + if v != nil { + _u.SetImagePrice4k(*v) + } + return _u +} + +// AddImagePrice4k adds value to the "image_price_4k" field. +func (_u *GroupUpdateOne) AddImagePrice4k(v float64) *GroupUpdateOne { + _u.mutation.AddImagePrice4k(v) + return _u +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (_u *GroupUpdateOne) ClearImagePrice4k() *GroupUpdateOne { + _u.mutation.ClearImagePrice4k() + return _u +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (_u *GroupUpdateOne) SetClaudeCodeOnly(v bool) *GroupUpdateOne { + _u.mutation.SetClaudeCodeOnly(v) + return _u +} + +// SetNillableClaudeCodeOnly sets the "claude_code_only" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableClaudeCodeOnly(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetClaudeCodeOnly(*v) + } + return _u +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (_u *GroupUpdateOne) SetFallbackGroupID(v int64) *GroupUpdateOne { + _u.mutation.ResetFallbackGroupID() + _u.mutation.SetFallbackGroupID(v) + return _u +} + +// SetNillableFallbackGroupID sets the "fallback_group_id" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableFallbackGroupID(v *int64) *GroupUpdateOne { + if v != nil { + _u.SetFallbackGroupID(*v) + } + return _u +} + +// AddFallbackGroupID adds value to the "fallback_group_id" field. +func (_u *GroupUpdateOne) AddFallbackGroupID(v int64) *GroupUpdateOne { + _u.mutation.AddFallbackGroupID(v) + return _u +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (_u *GroupUpdateOne) ClearFallbackGroupID() *GroupUpdateOne { + _u.mutation.ClearFallbackGroupID() + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_u *GroupUpdateOne) AddAPIKeys(v ...*APIKey) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *GroupUpdateOne) AddRedeemCodeIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdateOne) AddRedeemCodes(v ...*RedeemCode) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *GroupUpdateOne) AddSubscriptionIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdateOne) AddSubscriptions(v ...*UserSubscription) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *GroupUpdateOne) AddUsageLogIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *GroupUpdateOne) AddUsageLogs(v ...*UsageLog) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *GroupUpdateOne) AddAccountIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *GroupUpdateOne) AddAccounts(v ...*Account) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by IDs. +func (_u *GroupUpdateOne) AddAllowedUserIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.AddAllowedUserIDs(ids...) + return _u +} + +// AddAllowedUsers adds the "allowed_users" edges to the User entity. +func (_u *GroupUpdateOne) AddAllowedUsers(v ...*User) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedUserIDs(ids...) +} + +// Mutation returns the GroupMutation object of the builder. +func (_u *GroupUpdateOne) Mutation() *GroupMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the APIKey entity. +func (_u *GroupUpdateOne) ClearAPIKeys() *GroupUpdateOne { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to APIKey entities by IDs. +func (_u *GroupUpdateOne) RemoveAPIKeyIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to APIKey entities. +func (_u *GroupUpdateOne) RemoveAPIKeys(v ...*APIKey) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *GroupUpdateOne) ClearRedeemCodes() *GroupUpdateOne { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *GroupUpdateOne) RemoveRedeemCodeIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *GroupUpdateOne) RemoveRedeemCodes(v ...*RedeemCode) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *GroupUpdateOne) ClearSubscriptions() *GroupUpdateOne { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *GroupUpdateOne) RemoveSubscriptionIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *GroupUpdateOne) RemoveSubscriptions(v ...*UserSubscription) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *GroupUpdateOne) ClearUsageLogs() *GroupUpdateOne { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *GroupUpdateOne) RemoveUsageLogIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *GroupUpdateOne) RemoveUsageLogs(v ...*UsageLog) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *GroupUpdateOne) ClearAccounts() *GroupUpdateOne { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *GroupUpdateOne) RemoveAccountIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *GroupUpdateOne) RemoveAccounts(v ...*Account) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// ClearAllowedUsers clears all "allowed_users" edges to the User entity. +func (_u *GroupUpdateOne) ClearAllowedUsers() *GroupUpdateOne { + _u.mutation.ClearAllowedUsers() + return _u +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to User entities by IDs. +func (_u *GroupUpdateOne) RemoveAllowedUserIDs(ids ...int64) *GroupUpdateOne { + _u.mutation.RemoveAllowedUserIDs(ids...) + return _u +} + +// RemoveAllowedUsers removes "allowed_users" edges to User entities. +func (_u *GroupUpdateOne) RemoveAllowedUsers(v ...*User) *GroupUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedUserIDs(ids...) +} + +// Where appends a list predicates to the GroupUpdate builder. +func (_u *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Group entity. +func (_u *GroupUpdateOne) Save(ctx context.Context) (*Group, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *GroupUpdateOne) SaveX(ctx context.Context) *Group { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *GroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *GroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *GroupUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if group.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized group.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := group.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *GroupUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := group.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Group.name": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := group.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Group.status": %w`, err)} + } + } + if v, ok := _u.mutation.Platform(); ok { + if err := group.PlatformValidator(v); err != nil { + return &ValidationError{Name: "platform", err: fmt.Errorf(`ent: validator failed for field "Group.platform": %w`, err)} + } + } + if v, ok := _u.mutation.SubscriptionType(); ok { + if err := group.SubscriptionTypeValidator(v); err != nil { + return &ValidationError{Name: "subscription_type", err: fmt.Errorf(`ent: validator failed for field "Group.subscription_type": %w`, err)} + } + } + return nil +} + +func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(group.Table, group.Columns, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Group.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, group.FieldID) + for _, f := range fields { + if !group.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != group.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(group.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(group.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(group.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(group.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(group.FieldDescription, field.TypeString, value) + } + if _u.mutation.DescriptionCleared() { + _spec.ClearField(group.FieldDescription, field.TypeString) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(group.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.IsExclusive(); ok { + _spec.SetField(group.FieldIsExclusive, field.TypeBool, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(group.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Platform(); ok { + _spec.SetField(group.FieldPlatform, field.TypeString, value) + } + if value, ok := _u.mutation.SubscriptionType(); ok { + _spec.SetField(group.FieldSubscriptionType, field.TypeString, value) + } + if value, ok := _u.mutation.DailyLimitUsd(); ok { + _spec.SetField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyLimitUsd(); ok { + _spec.AddField(group.FieldDailyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.DailyLimitUsdCleared() { + _spec.ClearField(group.FieldDailyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.WeeklyLimitUsd(); ok { + _spec.SetField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyLimitUsd(); ok { + _spec.AddField(group.FieldWeeklyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.WeeklyLimitUsdCleared() { + _spec.ClearField(group.FieldWeeklyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.MonthlyLimitUsd(); ok { + _spec.SetField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyLimitUsd(); ok { + _spec.AddField(group.FieldMonthlyLimitUsd, field.TypeFloat64, value) + } + if _u.mutation.MonthlyLimitUsdCleared() { + _spec.ClearField(group.FieldMonthlyLimitUsd, field.TypeFloat64) + } + if value, ok := _u.mutation.DefaultValidityDays(); ok { + _spec.SetField(group.FieldDefaultValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDefaultValidityDays(); ok { + _spec.AddField(group.FieldDefaultValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.ImagePrice1k(); ok { + _spec.SetField(group.FieldImagePrice1k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice1k(); ok { + _spec.AddField(group.FieldImagePrice1k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice1kCleared() { + _spec.ClearField(group.FieldImagePrice1k, field.TypeFloat64) + } + if value, ok := _u.mutation.ImagePrice2k(); ok { + _spec.SetField(group.FieldImagePrice2k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice2k(); ok { + _spec.AddField(group.FieldImagePrice2k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice2kCleared() { + _spec.ClearField(group.FieldImagePrice2k, field.TypeFloat64) + } + if value, ok := _u.mutation.ImagePrice4k(); ok { + _spec.SetField(group.FieldImagePrice4k, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedImagePrice4k(); ok { + _spec.AddField(group.FieldImagePrice4k, field.TypeFloat64, value) + } + if _u.mutation.ImagePrice4kCleared() { + _spec.ClearField(group.FieldImagePrice4k, field.TypeFloat64) + } + if value, ok := _u.mutation.ClaudeCodeOnly(); ok { + _spec.SetField(group.FieldClaudeCodeOnly, field.TypeBool, value) + } + if value, ok := _u.mutation.FallbackGroupID(); ok { + _spec.SetField(group.FieldFallbackGroupID, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedFallbackGroupID(); ok { + _spec.AddField(group.FieldFallbackGroupID, field.TypeInt64, value) + } + if _u.mutation.FallbackGroupIDCleared() { + _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.APIKeysTable, + Columns: []string{group.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.RedeemCodesTable, + Columns: []string{group.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.SubscriptionsTable, + Columns: []string{group.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: group.UsageLogsTable, + Columns: []string{group.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AccountsTable, + Columns: group.AccountsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &AccountGroupCreate{config: _u.config, mutation: newAccountGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedUsersIDs(); len(nodes) > 0 && !_u.mutation.AllowedUsersCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedUsersIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: group.AllowedUsersTable, + Columns: group.AllowedUsersPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Group{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{group.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go new file mode 100644 index 00000000..532b0d2c --- /dev/null +++ b/backend/ent/hook/hook.go @@ -0,0 +1,367 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/Wei-Shaw/sub2api/ent" +) + +// The APIKeyFunc type is an adapter to allow the use of ordinary +// function as APIKey mutator. +type APIKeyFunc func(context.Context, *ent.APIKeyMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f APIKeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.APIKeyMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.APIKeyMutation", m) +} + +// The AccountFunc type is an adapter to allow the use of ordinary +// function as Account mutator. +type AccountFunc func(context.Context, *ent.AccountMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AccountFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AccountMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountMutation", m) +} + +// The AccountGroupFunc type is an adapter to allow the use of ordinary +// function as AccountGroup mutator. +type AccountGroupFunc func(context.Context, *ent.AccountGroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AccountGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AccountGroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountGroupMutation", m) +} + +// The GroupFunc type is an adapter to allow the use of ordinary +// function as Group mutator. +type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.GroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m) +} + +// The PromoCodeFunc type is an adapter to allow the use of ordinary +// function as PromoCode mutator. +type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PromoCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PromoCodeMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeMutation", m) +} + +// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary +// function as PromoCodeUsage mutator. +type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PromoCodeUsageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PromoCodeUsageMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeUsageMutation", m) +} + +// The ProxyFunc type is an adapter to allow the use of ordinary +// function as Proxy mutator. +type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ProxyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ProxyMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProxyMutation", m) +} + +// The RedeemCodeFunc type is an adapter to allow the use of ordinary +// function as RedeemCode mutator. +type RedeemCodeFunc func(context.Context, *ent.RedeemCodeMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f RedeemCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.RedeemCodeMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RedeemCodeMutation", m) +} + +// The SettingFunc type is an adapter to allow the use of ordinary +// function as Setting mutator. +type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.SettingMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m) +} + +// The UsageLogFunc type is an adapter to allow the use of ordinary +// function as UsageLog mutator. +type UsageLogFunc func(context.Context, *ent.UsageLogMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UsageLogFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UsageLogMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UsageLogMutation", m) +} + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary +// function as UserAllowedGroup mutator. +type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAllowedGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAllowedGroupMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAllowedGroupMutation", m) +} + +// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary +// function as UserAttributeDefinition mutator. +type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAttributeDefinitionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAttributeDefinitionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeDefinitionMutation", m) +} + +// The UserAttributeValueFunc type is an adapter to allow the use of ordinary +// function as UserAttributeValue mutator. +type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserAttributeValueFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserAttributeValueMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeValueMutation", m) +} + +// The UserSubscriptionFunc type is an adapter to allow the use of ordinary +// function as UserSubscription mutator. +type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserSubscriptionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserSubscriptionMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserSubscriptionMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go new file mode 100644 index 00000000..765d39b4 --- /dev/null +++ b/backend/ent/intercept/intercept.go @@ -0,0 +1,569 @@ +// Code generated by ent, DO NOT EDIT. + +package intercept + +import ( + "context" + "fmt" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// The Query interface represents an operation that queries a graph. +// By using this interface, users can write generic code that manipulates +// query builders of different types. +type Query interface { + // Type returns the string representation of the query type. + Type() string + // Limit the number of records to be returned by this query. + Limit(int) + // Offset to start from. + Offset(int) + // Unique configures the query builder to filter duplicate records. + Unique(bool) + // Order specifies how the records should be ordered. + Order(...func(*sql.Selector)) + // WhereP appends storage-level predicates to the query builder. Using this method, users + // can use type-assertion to append predicates that do not depend on any generated package. + WhereP(...func(*sql.Selector)) +} + +// The Func type is an adapter that allows ordinary functions to be used as interceptors. +// Unlike traversal functions, interceptors are skipped during graph traversals. Note that the +// implementation of Func is different from the one defined in entgo.io/ent.InterceptFunc. +type Func func(context.Context, Query) error + +// Intercept calls f(ctx, q) and then applied the next Querier. +func (f Func) Intercept(next ent.Querier) ent.Querier { + return ent.QuerierFunc(func(ctx context.Context, q ent.Query) (ent.Value, error) { + query, err := NewQuery(q) + if err != nil { + return nil, err + } + if err := f(ctx, query); err != nil { + return nil, err + } + return next.Query(ctx, q) + }) +} + +// The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser. +// If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f. +type TraverseFunc func(context.Context, Query) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseFunc) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseFunc) Traverse(ctx context.Context, q ent.Query) error { + query, err := NewQuery(q) + if err != nil { + return err + } + return f(ctx, query) +} + +// The APIKeyFunc type is an adapter to allow the use of ordinary function as a Querier. +type APIKeyFunc func(context.Context, *ent.APIKeyQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f APIKeyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.APIKeyQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.APIKeyQuery", q) +} + +// The TraverseAPIKey type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAPIKey func(context.Context, *ent.APIKeyQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAPIKey) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAPIKey) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.APIKeyQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.APIKeyQuery", q) +} + +// The AccountFunc type is an adapter to allow the use of ordinary function as a Querier. +type AccountFunc func(context.Context, *ent.AccountQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AccountFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AccountQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q) +} + +// The TraverseAccount type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAccount func(context.Context, *ent.AccountQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAccount) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAccount) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AccountQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q) +} + +// The AccountGroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type AccountGroupFunc func(context.Context, *ent.AccountGroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f AccountGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.AccountGroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) +} + +// The TraverseAccountGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseAccountGroup func(context.Context, *ent.AccountGroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseAccountGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseAccountGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.AccountGroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q) +} + +// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f GroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.GroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) +} + +// The TraverseGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseGroup func(context.Context, *ent.GroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.GroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) +} + +// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier. +type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f PromoCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.PromoCodeQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q) +} + +// The TraversePromoCode type is an adapter to allow the use of ordinary function as Traverser. +type TraversePromoCode func(context.Context, *ent.PromoCodeQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraversePromoCode) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraversePromoCode) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.PromoCodeQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q) +} + +// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary function as a Querier. +type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f PromoCodeUsageFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.PromoCodeUsageQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q) +} + +// The TraversePromoCodeUsage type is an adapter to allow the use of ordinary function as Traverser. +type TraversePromoCodeUsage func(context.Context, *ent.PromoCodeUsageQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraversePromoCodeUsage) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraversePromoCodeUsage) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.PromoCodeUsageQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q) +} + +// The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier. +type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f ProxyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.ProxyQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q) +} + +// The TraverseProxy type is an adapter to allow the use of ordinary function as Traverser. +type TraverseProxy func(context.Context, *ent.ProxyQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseProxy) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseProxy) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.ProxyQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q) +} + +// The RedeemCodeFunc type is an adapter to allow the use of ordinary function as a Querier. +type RedeemCodeFunc func(context.Context, *ent.RedeemCodeQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f RedeemCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.RedeemCodeQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q) +} + +// The TraverseRedeemCode type is an adapter to allow the use of ordinary function as Traverser. +type TraverseRedeemCode func(context.Context, *ent.RedeemCodeQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseRedeemCode) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseRedeemCode) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.RedeemCodeQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q) +} + +// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier. +type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f SettingFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.SettingQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) +} + +// The TraverseSetting type is an adapter to allow the use of ordinary function as Traverser. +type TraverseSetting func(context.Context, *ent.SettingQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseSetting) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.SettingQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q) +} + +// The UsageLogFunc type is an adapter to allow the use of ordinary function as a Querier. +type UsageLogFunc func(context.Context, *ent.UsageLogQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UsageLogFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UsageLogQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UsageLogQuery", q) +} + +// The TraverseUsageLog type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUsageLog func(context.Context, *ent.UsageLogQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUsageLog) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUsageLog) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UsageLogQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UsageLogQuery", q) +} + +// The UserFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserFunc func(context.Context, *ent.UserQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q) +} + +// The TraverseUser type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUser func(context.Context, *ent.UserQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUser) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUser) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q) +} + +// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAllowedGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAllowedGroupQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q) +} + +// The TraverseUserAllowedGroup type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAllowedGroup func(context.Context, *ent.UserAllowedGroupQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAllowedGroup) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAllowedGroup) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAllowedGroupQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q) +} + +// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAttributeDefinitionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q) +} + +// The TraverseUserAttributeDefinition type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAttributeDefinition func(context.Context, *ent.UserAttributeDefinitionQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAttributeDefinition) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAttributeDefinition) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q) +} + +// The UserAttributeValueFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserAttributeValueFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserAttributeValueQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q) +} + +// The TraverseUserAttributeValue type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserAttributeValue func(context.Context, *ent.UserAttributeValueQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserAttributeValue) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserAttributeValue) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserAttributeValueQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q) +} + +// The UserSubscriptionFunc type is an adapter to allow the use of ordinary function as a Querier. +type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f UserSubscriptionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.UserSubscriptionQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q) +} + +// The TraverseUserSubscription type is an adapter to allow the use of ordinary function as Traverser. +type TraverseUserSubscription func(context.Context, *ent.UserSubscriptionQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseUserSubscription) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseUserSubscription) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.UserSubscriptionQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q) +} + +// NewQuery returns the generic Query interface for the given typed query. +func NewQuery(q ent.Query) (Query, error) { + switch q := q.(type) { + case *ent.APIKeyQuery: + return &query[*ent.APIKeyQuery, predicate.APIKey, apikey.OrderOption]{typ: ent.TypeAPIKey, tq: q}, nil + case *ent.AccountQuery: + return &query[*ent.AccountQuery, predicate.Account, account.OrderOption]{typ: ent.TypeAccount, tq: q}, nil + case *ent.AccountGroupQuery: + return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil + case *ent.GroupQuery: + return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil + case *ent.PromoCodeQuery: + return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil + case *ent.PromoCodeUsageQuery: + return &query[*ent.PromoCodeUsageQuery, predicate.PromoCodeUsage, promocodeusage.OrderOption]{typ: ent.TypePromoCodeUsage, tq: q}, nil + case *ent.ProxyQuery: + return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil + case *ent.RedeemCodeQuery: + return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil + case *ent.SettingQuery: + return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil + case *ent.UsageLogQuery: + return &query[*ent.UsageLogQuery, predicate.UsageLog, usagelog.OrderOption]{typ: ent.TypeUsageLog, tq: q}, nil + case *ent.UserQuery: + return &query[*ent.UserQuery, predicate.User, user.OrderOption]{typ: ent.TypeUser, tq: q}, nil + case *ent.UserAllowedGroupQuery: + return &query[*ent.UserAllowedGroupQuery, predicate.UserAllowedGroup, userallowedgroup.OrderOption]{typ: ent.TypeUserAllowedGroup, tq: q}, nil + case *ent.UserAttributeDefinitionQuery: + return &query[*ent.UserAttributeDefinitionQuery, predicate.UserAttributeDefinition, userattributedefinition.OrderOption]{typ: ent.TypeUserAttributeDefinition, tq: q}, nil + case *ent.UserAttributeValueQuery: + return &query[*ent.UserAttributeValueQuery, predicate.UserAttributeValue, userattributevalue.OrderOption]{typ: ent.TypeUserAttributeValue, tq: q}, nil + case *ent.UserSubscriptionQuery: + return &query[*ent.UserSubscriptionQuery, predicate.UserSubscription, usersubscription.OrderOption]{typ: ent.TypeUserSubscription, tq: q}, nil + default: + return nil, fmt.Errorf("unknown query type %T", q) + } +} + +type query[T any, P ~func(*sql.Selector), R ~func(*sql.Selector)] struct { + typ string + tq interface { + Limit(int) T + Offset(int) T + Unique(bool) T + Order(...R) T + Where(...P) T + } +} + +func (q query[T, P, R]) Type() string { + return q.typ +} + +func (q query[T, P, R]) Limit(limit int) { + q.tq.Limit(limit) +} + +func (q query[T, P, R]) Offset(offset int) { + q.tq.Offset(offset) +} + +func (q query[T, P, R]) Unique(unique bool) { + q.tq.Unique(unique) +} + +func (q query[T, P, R]) Order(orders ...func(*sql.Selector)) { + rs := make([]R, len(orders)) + for i := range orders { + rs[i] = orders[i] + } + q.tq.Order(rs...) +} + +func (q query[T, P, R]) WhereP(ps ...func(*sql.Selector)) { + p := make([]P, len(ps)) + for i := range ps { + p[i] = ps[i] + } + q.tq.Where(p...) +} diff --git a/backend/ent/migrate/migrate.go b/backend/ent/migrate/migrate.go new file mode 100644 index 00000000..1956a6bf --- /dev/null +++ b/backend/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go new file mode 100644 index 00000000..d769f611 --- /dev/null +++ b/backend/ent/migrate/schema.go @@ -0,0 +1,882 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // APIKeysColumns holds the columns for the "api_keys" table. + APIKeysColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "key", Type: field.TypeString, Unique: true, Size: 128}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true}, + {Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true}, + {Name: "group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "user_id", Type: field.TypeInt64}, + } + // APIKeysTable holds the schema information for the "api_keys" table. + APIKeysTable = &schema.Table{ + Name: "api_keys", + Columns: APIKeysColumns, + PrimaryKey: []*schema.Column{APIKeysColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "api_keys_groups_api_keys", + Columns: []*schema.Column{APIKeysColumns[9]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "api_keys_users_api_keys", + Columns: []*schema.Column{APIKeysColumns[10]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "apikey_user_id", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[10]}, + }, + { + Name: "apikey_group_id", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[9]}, + }, + { + Name: "apikey_status", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[6]}, + }, + { + Name: "apikey_deleted_at", + Unique: false, + Columns: []*schema.Column{APIKeysColumns[3]}, + }, + }, + } + // AccountsColumns holds the columns for the "accounts" table. + AccountsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "platform", Type: field.TypeString, Size: 50}, + {Name: "type", Type: field.TypeString, Size: 20}, + {Name: "credentials", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "concurrency", Type: field.TypeInt, Default: 3}, + {Name: "priority", Type: field.TypeInt, Default: 50}, + {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "error_message", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "last_used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "auto_pause_on_expired", Type: field.TypeBool, Default: true}, + {Name: "schedulable", Type: field.TypeBool, Default: true}, + {Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20}, + {Name: "proxy_id", Type: field.TypeInt64, Nullable: true}, + } + // AccountsTable holds the schema information for the "accounts" table. + AccountsTable = &schema.Table{ + Name: "accounts", + Columns: AccountsColumns, + PrimaryKey: []*schema.Column{AccountsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "accounts_proxies_proxy", + Columns: []*schema.Column{AccountsColumns[25]}, + RefColumns: []*schema.Column{ProxiesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "account_platform", + Unique: false, + Columns: []*schema.Column{AccountsColumns[6]}, + }, + { + Name: "account_type", + Unique: false, + Columns: []*schema.Column{AccountsColumns[7]}, + }, + { + Name: "account_status", + Unique: false, + Columns: []*schema.Column{AccountsColumns[13]}, + }, + { + Name: "account_proxy_id", + Unique: false, + Columns: []*schema.Column{AccountsColumns[25]}, + }, + { + Name: "account_priority", + Unique: false, + Columns: []*schema.Column{AccountsColumns[11]}, + }, + { + Name: "account_last_used_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[15]}, + }, + { + Name: "account_schedulable", + Unique: false, + Columns: []*schema.Column{AccountsColumns[18]}, + }, + { + Name: "account_rate_limited_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[19]}, + }, + { + Name: "account_rate_limit_reset_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[20]}, + }, + { + Name: "account_overload_until", + Unique: false, + Columns: []*schema.Column{AccountsColumns[21]}, + }, + { + Name: "account_deleted_at", + Unique: false, + Columns: []*schema.Column{AccountsColumns[3]}, + }, + }, + } + // AccountGroupsColumns holds the columns for the "account_groups" table. + AccountGroupsColumns = []*schema.Column{ + {Name: "priority", Type: field.TypeInt, Default: 50}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "account_id", Type: field.TypeInt64}, + {Name: "group_id", Type: field.TypeInt64}, + } + // AccountGroupsTable holds the schema information for the "account_groups" table. + AccountGroupsTable = &schema.Table{ + Name: "account_groups", + Columns: AccountGroupsColumns, + PrimaryKey: []*schema.Column{AccountGroupsColumns[2], AccountGroupsColumns[3]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "account_groups_accounts_account", + Columns: []*schema.Column{AccountGroupsColumns[2]}, + RefColumns: []*schema.Column{AccountsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "account_groups_groups_group", + Columns: []*schema.Column{AccountGroupsColumns[3]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "accountgroup_group_id", + Unique: false, + Columns: []*schema.Column{AccountGroupsColumns[3]}, + }, + { + Name: "accountgroup_priority", + Unique: false, + Columns: []*schema.Column{AccountGroupsColumns[0]}, + }, + }, + } + // GroupsColumns holds the columns for the "groups" table. + GroupsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "description", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "is_exclusive", Type: field.TypeBool, Default: false}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "platform", Type: field.TypeString, Size: 50, Default: "anthropic"}, + {Name: "subscription_type", Type: field.TypeString, Size: 20, Default: "standard"}, + {Name: "daily_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "weekly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "monthly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "default_validity_days", Type: field.TypeInt, Default: 30}, + {Name: "image_price_1k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "image_price_2k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "claude_code_only", Type: field.TypeBool, Default: false}, + {Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true}, + } + // GroupsTable holds the schema information for the "groups" table. + GroupsTable = &schema.Table{ + Name: "groups", + Columns: GroupsColumns, + PrimaryKey: []*schema.Column{GroupsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "group_status", + Unique: false, + Columns: []*schema.Column{GroupsColumns[8]}, + }, + { + Name: "group_platform", + Unique: false, + Columns: []*schema.Column{GroupsColumns[9]}, + }, + { + Name: "group_subscription_type", + Unique: false, + Columns: []*schema.Column{GroupsColumns[10]}, + }, + { + Name: "group_is_exclusive", + Unique: false, + Columns: []*schema.Column{GroupsColumns[7]}, + }, + { + Name: "group_deleted_at", + Unique: false, + Columns: []*schema.Column{GroupsColumns[3]}, + }, + }, + } + // PromoCodesColumns holds the columns for the "promo_codes" table. + PromoCodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "code", Type: field.TypeString, Unique: true, Size: 32}, + {Name: "bonus_amount", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "max_uses", Type: field.TypeInt, Default: 0}, + {Name: "used_count", Type: field.TypeInt, Default: 0}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // PromoCodesTable holds the schema information for the "promo_codes" table. + PromoCodesTable = &schema.Table{ + Name: "promo_codes", + Columns: PromoCodesColumns, + PrimaryKey: []*schema.Column{PromoCodesColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "promocode_status", + Unique: false, + Columns: []*schema.Column{PromoCodesColumns[5]}, + }, + { + Name: "promocode_expires_at", + Unique: false, + Columns: []*schema.Column{PromoCodesColumns[6]}, + }, + }, + } + // PromoCodeUsagesColumns holds the columns for the "promo_code_usages" table. + PromoCodeUsagesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "bonus_amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "used_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "promo_code_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + } + // PromoCodeUsagesTable holds the schema information for the "promo_code_usages" table. + PromoCodeUsagesTable = &schema.Table{ + Name: "promo_code_usages", + Columns: PromoCodeUsagesColumns, + PrimaryKey: []*schema.Column{PromoCodeUsagesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "promo_code_usages_promo_codes_usage_records", + Columns: []*schema.Column{PromoCodeUsagesColumns[3]}, + RefColumns: []*schema.Column{PromoCodesColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "promo_code_usages_users_promo_code_usages", + Columns: []*schema.Column{PromoCodeUsagesColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "promocodeusage_promo_code_id", + Unique: false, + Columns: []*schema.Column{PromoCodeUsagesColumns[3]}, + }, + { + Name: "promocodeusage_user_id", + Unique: false, + Columns: []*schema.Column{PromoCodeUsagesColumns[4]}, + }, + { + Name: "promocodeusage_promo_code_id_user_id", + Unique: true, + Columns: []*schema.Column{PromoCodeUsagesColumns[3], PromoCodeUsagesColumns[4]}, + }, + }, + } + // ProxiesColumns holds the columns for the "proxies" table. + ProxiesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "name", Type: field.TypeString, Size: 100}, + {Name: "protocol", Type: field.TypeString, Size: 20}, + {Name: "host", Type: field.TypeString, Size: 255}, + {Name: "port", Type: field.TypeInt}, + {Name: "username", Type: field.TypeString, Nullable: true, Size: 100}, + {Name: "password", Type: field.TypeString, Nullable: true, Size: 100}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + } + // ProxiesTable holds the schema information for the "proxies" table. + ProxiesTable = &schema.Table{ + Name: "proxies", + Columns: ProxiesColumns, + PrimaryKey: []*schema.Column{ProxiesColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "proxy_status", + Unique: false, + Columns: []*schema.Column{ProxiesColumns[10]}, + }, + { + Name: "proxy_deleted_at", + Unique: false, + Columns: []*schema.Column{ProxiesColumns[3]}, + }, + }, + } + // RedeemCodesColumns holds the columns for the "redeem_codes" table. + RedeemCodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "code", Type: field.TypeString, Unique: true, Size: 32}, + {Name: "type", Type: field.TypeString, Size: 20, Default: "balance"}, + {Name: "value", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "unused"}, + {Name: "used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "validity_days", Type: field.TypeInt, Default: 30}, + {Name: "group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "used_by", Type: field.TypeInt64, Nullable: true}, + } + // RedeemCodesTable holds the schema information for the "redeem_codes" table. + RedeemCodesTable = &schema.Table{ + Name: "redeem_codes", + Columns: RedeemCodesColumns, + PrimaryKey: []*schema.Column{RedeemCodesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "redeem_codes_groups_redeem_codes", + Columns: []*schema.Column{RedeemCodesColumns[9]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "redeem_codes_users_redeem_codes", + Columns: []*schema.Column{RedeemCodesColumns[10]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "redeemcode_status", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[4]}, + }, + { + Name: "redeemcode_used_by", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[10]}, + }, + { + Name: "redeemcode_group_id", + Unique: false, + Columns: []*schema.Column{RedeemCodesColumns[9]}, + }, + }, + } + // SettingsColumns holds the columns for the "settings" table. + SettingsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "key", Type: field.TypeString, Unique: true, Size: 100}, + {Name: "value", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // SettingsTable holds the schema information for the "settings" table. + SettingsTable = &schema.Table{ + Name: "settings", + Columns: SettingsColumns, + PrimaryKey: []*schema.Column{SettingsColumns[0]}, + } + // UsageLogsColumns holds the columns for the "usage_logs" table. + UsageLogsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "request_id", Type: field.TypeString, Size: 64}, + {Name: "model", Type: field.TypeString, Size: 100}, + {Name: "input_tokens", Type: field.TypeInt, Default: 0}, + {Name: "output_tokens", Type: field.TypeInt, Default: 0}, + {Name: "cache_creation_tokens", Type: field.TypeInt, Default: 0}, + {Name: "cache_read_tokens", Type: field.TypeInt, Default: 0}, + {Name: "cache_creation_5m_tokens", Type: field.TypeInt, Default: 0}, + {Name: "cache_creation_1h_tokens", Type: field.TypeInt, Default: 0}, + {Name: "input_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "output_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "cache_creation_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "cache_read_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "total_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "actual_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "account_rate_multiplier", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "billing_type", Type: field.TypeInt8, Default: 0}, + {Name: "stream", Type: field.TypeBool, Default: false}, + {Name: "duration_ms", Type: field.TypeInt, Nullable: true}, + {Name: "first_token_ms", Type: field.TypeInt, Nullable: true}, + {Name: "user_agent", Type: field.TypeString, Nullable: true, Size: 512}, + {Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45}, + {Name: "image_count", Type: field.TypeInt, Default: 0}, + {Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "api_key_id", Type: field.TypeInt64}, + {Name: "account_id", Type: field.TypeInt64}, + {Name: "group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "subscription_id", Type: field.TypeInt64, Nullable: true}, + } + // UsageLogsTable holds the schema information for the "usage_logs" table. + UsageLogsTable = &schema.Table{ + Name: "usage_logs", + Columns: UsageLogsColumns, + PrimaryKey: []*schema.Column{UsageLogsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "usage_logs_api_keys_usage_logs", + Columns: []*schema.Column{UsageLogsColumns[26]}, + RefColumns: []*schema.Column{APIKeysColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "usage_logs_accounts_usage_logs", + Columns: []*schema.Column{UsageLogsColumns[27]}, + RefColumns: []*schema.Column{AccountsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "usage_logs_groups_usage_logs", + Columns: []*schema.Column{UsageLogsColumns[28]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "usage_logs_users_usage_logs", + Columns: []*schema.Column{UsageLogsColumns[29]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "usage_logs_user_subscriptions_usage_logs", + Columns: []*schema.Column{UsageLogsColumns[30]}, + RefColumns: []*schema.Column{UserSubscriptionsColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "usagelog_user_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[29]}, + }, + { + Name: "usagelog_api_key_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[26]}, + }, + { + Name: "usagelog_account_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[27]}, + }, + { + Name: "usagelog_group_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[28]}, + }, + { + Name: "usagelog_subscription_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[30]}, + }, + { + Name: "usagelog_created_at", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[25]}, + }, + { + Name: "usagelog_model", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[2]}, + }, + { + Name: "usagelog_request_id", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[1]}, + }, + { + Name: "usagelog_user_id_created_at", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[25]}, + }, + { + Name: "usagelog_api_key_id_created_at", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[26], UsageLogsColumns[25]}, + }, + }, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "email", Type: field.TypeString, Size: 255}, + {Name: "password_hash", Type: field.TypeString, Size: 255}, + {Name: "role", Type: field.TypeString, Size: 20, Default: "user"}, + {Name: "balance", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "concurrency", Type: field.TypeInt, Default: 5}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "username", Type: field.TypeString, Size: 100, Default: ""}, + {Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "user_status", + Unique: false, + Columns: []*schema.Column{UsersColumns[9]}, + }, + { + Name: "user_deleted_at", + Unique: false, + Columns: []*schema.Column{UsersColumns[3]}, + }, + }, + } + // UserAllowedGroupsColumns holds the columns for the "user_allowed_groups" table. + UserAllowedGroupsColumns = []*schema.Column{ + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "group_id", Type: field.TypeInt64}, + } + // UserAllowedGroupsTable holds the schema information for the "user_allowed_groups" table. + UserAllowedGroupsTable = &schema.Table{ + Name: "user_allowed_groups", + Columns: UserAllowedGroupsColumns, + PrimaryKey: []*schema.Column{UserAllowedGroupsColumns[1], UserAllowedGroupsColumns[2]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_allowed_groups_users_user", + Columns: []*schema.Column{UserAllowedGroupsColumns[1]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_allowed_groups_groups_group", + Columns: []*schema.Column{UserAllowedGroupsColumns[2]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "userallowedgroup_group_id", + Unique: false, + Columns: []*schema.Column{UserAllowedGroupsColumns[2]}, + }, + }, + } + // UserAttributeDefinitionsColumns holds the columns for the "user_attribute_definitions" table. + UserAttributeDefinitionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "key", Type: field.TypeString, Size: 100}, + {Name: "name", Type: field.TypeString, Size: 255}, + {Name: "description", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}}, + {Name: "type", Type: field.TypeString, Size: 20}, + {Name: "options", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "required", Type: field.TypeBool, Default: false}, + {Name: "validation", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "placeholder", Type: field.TypeString, Size: 255, Default: ""}, + {Name: "display_order", Type: field.TypeInt, Default: 0}, + {Name: "enabled", Type: field.TypeBool, Default: true}, + } + // UserAttributeDefinitionsTable holds the schema information for the "user_attribute_definitions" table. + UserAttributeDefinitionsTable = &schema.Table{ + Name: "user_attribute_definitions", + Columns: UserAttributeDefinitionsColumns, + PrimaryKey: []*schema.Column{UserAttributeDefinitionsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "userattributedefinition_key", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[4]}, + }, + { + Name: "userattributedefinition_enabled", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[13]}, + }, + { + Name: "userattributedefinition_display_order", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[12]}, + }, + { + Name: "userattributedefinition_deleted_at", + Unique: false, + Columns: []*schema.Column{UserAttributeDefinitionsColumns[3]}, + }, + }, + } + // UserAttributeValuesColumns holds the columns for the "user_attribute_values" table. + UserAttributeValuesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "value", Type: field.TypeString, Size: 2147483647, Default: ""}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "attribute_id", Type: field.TypeInt64}, + } + // UserAttributeValuesTable holds the schema information for the "user_attribute_values" table. + UserAttributeValuesTable = &schema.Table{ + Name: "user_attribute_values", + Columns: UserAttributeValuesColumns, + PrimaryKey: []*schema.Column{UserAttributeValuesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_attribute_values_users_attribute_values", + Columns: []*schema.Column{UserAttributeValuesColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_attribute_values_user_attribute_definitions_values", + Columns: []*schema.Column{UserAttributeValuesColumns[5]}, + RefColumns: []*schema.Column{UserAttributeDefinitionsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "userattributevalue_user_id_attribute_id", + Unique: true, + Columns: []*schema.Column{UserAttributeValuesColumns[4], UserAttributeValuesColumns[5]}, + }, + { + Name: "userattributevalue_attribute_id", + Unique: false, + Columns: []*schema.Column{UserAttributeValuesColumns[5]}, + }, + }, + } + // UserSubscriptionsColumns holds the columns for the "user_subscriptions" table. + UserSubscriptionsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "starts_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "expires_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "daily_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "weekly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "monthly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "daily_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "weekly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "monthly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, + {Name: "assigned_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "group_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + {Name: "assigned_by", Type: field.TypeInt64, Nullable: true}, + } + // UserSubscriptionsTable holds the schema information for the "user_subscriptions" table. + UserSubscriptionsTable = &schema.Table{ + Name: "user_subscriptions", + Columns: UserSubscriptionsColumns, + PrimaryKey: []*schema.Column{UserSubscriptionsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "user_subscriptions_groups_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[15]}, + RefColumns: []*schema.Column{GroupsColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_subscriptions_users_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[16]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "user_subscriptions_users_assigned_subscriptions", + Columns: []*schema.Column{UserSubscriptionsColumns[17]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + Indexes: []*schema.Index{ + { + Name: "usersubscription_user_id", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[16]}, + }, + { + Name: "usersubscription_group_id", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[15]}, + }, + { + Name: "usersubscription_status", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[6]}, + }, + { + Name: "usersubscription_expires_at", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[5]}, + }, + { + Name: "usersubscription_assigned_by", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[17]}, + }, + { + Name: "usersubscription_user_id_group_id", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[16], UserSubscriptionsColumns[15]}, + }, + { + Name: "usersubscription_deleted_at", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[3]}, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + APIKeysTable, + AccountsTable, + AccountGroupsTable, + GroupsTable, + PromoCodesTable, + PromoCodeUsagesTable, + ProxiesTable, + RedeemCodesTable, + SettingsTable, + UsageLogsTable, + UsersTable, + UserAllowedGroupsTable, + UserAttributeDefinitionsTable, + UserAttributeValuesTable, + UserSubscriptionsTable, + } +) + +func init() { + APIKeysTable.ForeignKeys[0].RefTable = GroupsTable + APIKeysTable.ForeignKeys[1].RefTable = UsersTable + APIKeysTable.Annotation = &entsql.Annotation{ + Table: "api_keys", + } + AccountsTable.ForeignKeys[0].RefTable = ProxiesTable + AccountsTable.Annotation = &entsql.Annotation{ + Table: "accounts", + } + AccountGroupsTable.ForeignKeys[0].RefTable = AccountsTable + AccountGroupsTable.ForeignKeys[1].RefTable = GroupsTable + AccountGroupsTable.Annotation = &entsql.Annotation{ + Table: "account_groups", + } + GroupsTable.Annotation = &entsql.Annotation{ + Table: "groups", + } + PromoCodesTable.Annotation = &entsql.Annotation{ + Table: "promo_codes", + } + PromoCodeUsagesTable.ForeignKeys[0].RefTable = PromoCodesTable + PromoCodeUsagesTable.ForeignKeys[1].RefTable = UsersTable + PromoCodeUsagesTable.Annotation = &entsql.Annotation{ + Table: "promo_code_usages", + } + ProxiesTable.Annotation = &entsql.Annotation{ + Table: "proxies", + } + RedeemCodesTable.ForeignKeys[0].RefTable = GroupsTable + RedeemCodesTable.ForeignKeys[1].RefTable = UsersTable + RedeemCodesTable.Annotation = &entsql.Annotation{ + Table: "redeem_codes", + } + SettingsTable.Annotation = &entsql.Annotation{ + Table: "settings", + } + UsageLogsTable.ForeignKeys[0].RefTable = APIKeysTable + UsageLogsTable.ForeignKeys[1].RefTable = AccountsTable + UsageLogsTable.ForeignKeys[2].RefTable = GroupsTable + UsageLogsTable.ForeignKeys[3].RefTable = UsersTable + UsageLogsTable.ForeignKeys[4].RefTable = UserSubscriptionsTable + UsageLogsTable.Annotation = &entsql.Annotation{ + Table: "usage_logs", + } + UsersTable.Annotation = &entsql.Annotation{ + Table: "users", + } + UserAllowedGroupsTable.ForeignKeys[0].RefTable = UsersTable + UserAllowedGroupsTable.ForeignKeys[1].RefTable = GroupsTable + UserAllowedGroupsTable.Annotation = &entsql.Annotation{ + Table: "user_allowed_groups", + } + UserAttributeDefinitionsTable.Annotation = &entsql.Annotation{ + Table: "user_attribute_definitions", + } + UserAttributeValuesTable.ForeignKeys[0].RefTable = UsersTable + UserAttributeValuesTable.ForeignKeys[1].RefTable = UserAttributeDefinitionsTable + UserAttributeValuesTable.Annotation = &entsql.Annotation{ + Table: "user_attribute_values", + } + UserSubscriptionsTable.ForeignKeys[0].RefTable = GroupsTable + UserSubscriptionsTable.ForeignKeys[1].RefTable = UsersTable + UserSubscriptionsTable.ForeignKeys[2].RefTable = UsersTable + UserSubscriptionsTable.Annotation = &entsql.Annotation{ + Table: "user_subscriptions", + } +} diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go new file mode 100644 index 00000000..3509efed --- /dev/null +++ b/backend/ent/mutation.go @@ -0,0 +1,18629 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAPIKey = "APIKey" + TypeAccount = "Account" + TypeAccountGroup = "AccountGroup" + TypeGroup = "Group" + TypePromoCode = "PromoCode" + TypePromoCodeUsage = "PromoCodeUsage" + TypeProxy = "Proxy" + TypeRedeemCode = "RedeemCode" + TypeSetting = "Setting" + TypeUsageLog = "UsageLog" + TypeUser = "User" + TypeUserAllowedGroup = "UserAllowedGroup" + TypeUserAttributeDefinition = "UserAttributeDefinition" + TypeUserAttributeValue = "UserAttributeValue" + TypeUserSubscription = "UserSubscription" +) + +// APIKeyMutation represents an operation that mutates the APIKey nodes in the graph. +type APIKeyMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + key *string + name *string + status *string + ip_whitelist *[]string + appendip_whitelist []string + ip_blacklist *[]string + appendip_blacklist []string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + done bool + oldValue func(context.Context) (*APIKey, error) + predicates []predicate.APIKey +} + +var _ ent.Mutation = (*APIKeyMutation)(nil) + +// apikeyOption allows management of the mutation configuration using functional options. +type apikeyOption func(*APIKeyMutation) + +// newAPIKeyMutation creates new mutation for the APIKey entity. +func newAPIKeyMutation(c config, op Op, opts ...apikeyOption) *APIKeyMutation { + m := &APIKeyMutation{ + config: c, + op: op, + typ: TypeAPIKey, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAPIKeyID sets the ID field of the mutation. +func withAPIKeyID(id int64) apikeyOption { + return func(m *APIKeyMutation) { + var ( + err error + once sync.Once + value *APIKey + ) + m.oldValue = func(ctx context.Context) (*APIKey, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().APIKey.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAPIKey sets the old APIKey of the mutation. +func withAPIKey(node *APIKey) apikeyOption { + return func(m *APIKeyMutation) { + m.oldValue = func(context.Context) (*APIKey, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m APIKeyMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m APIKeyMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *APIKeyMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *APIKeyMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().APIKey.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *APIKeyMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *APIKeyMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *APIKeyMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *APIKeyMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *APIKeyMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *APIKeyMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *APIKeyMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *APIKeyMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *APIKeyMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[apikey.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *APIKeyMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[apikey.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *APIKeyMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, apikey.FieldDeletedAt) +} + +// SetUserID sets the "user_id" field. +func (m *APIKeyMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *APIKeyMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *APIKeyMutation) ResetUserID() { + m.user = nil +} + +// SetKey sets the "key" field. +func (m *APIKeyMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *APIKeyMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *APIKeyMutation) ResetKey() { + m.key = nil +} + +// SetName sets the "name" field. +func (m *APIKeyMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *APIKeyMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *APIKeyMutation) ResetName() { + m.name = nil +} + +// SetGroupID sets the "group_id" field. +func (m *APIKeyMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *APIKeyMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ClearGroupID clears the value of the "group_id" field. +func (m *APIKeyMutation) ClearGroupID() { + m.group = nil + m.clearedFields[apikey.FieldGroupID] = struct{}{} +} + +// GroupIDCleared returns if the "group_id" field was cleared in this mutation. +func (m *APIKeyMutation) GroupIDCleared() bool { + _, ok := m.clearedFields[apikey.FieldGroupID] + return ok +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *APIKeyMutation) ResetGroupID() { + m.group = nil + delete(m.clearedFields, apikey.FieldGroupID) +} + +// SetStatus sets the "status" field. +func (m *APIKeyMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *APIKeyMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *APIKeyMutation) ResetStatus() { + m.status = nil +} + +// SetIPWhitelist sets the "ip_whitelist" field. +func (m *APIKeyMutation) SetIPWhitelist(s []string) { + m.ip_whitelist = &s + m.appendip_whitelist = nil +} + +// IPWhitelist returns the value of the "ip_whitelist" field in the mutation. +func (m *APIKeyMutation) IPWhitelist() (r []string, exists bool) { + v := m.ip_whitelist + if v == nil { + return + } + return *v, true +} + +// OldIPWhitelist returns the old "ip_whitelist" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldIPWhitelist(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPWhitelist is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPWhitelist requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPWhitelist: %w", err) + } + return oldValue.IPWhitelist, nil +} + +// AppendIPWhitelist adds s to the "ip_whitelist" field. +func (m *APIKeyMutation) AppendIPWhitelist(s []string) { + m.appendip_whitelist = append(m.appendip_whitelist, s...) +} + +// AppendedIPWhitelist returns the list of values that were appended to the "ip_whitelist" field in this mutation. +func (m *APIKeyMutation) AppendedIPWhitelist() ([]string, bool) { + if len(m.appendip_whitelist) == 0 { + return nil, false + } + return m.appendip_whitelist, true +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (m *APIKeyMutation) ClearIPWhitelist() { + m.ip_whitelist = nil + m.appendip_whitelist = nil + m.clearedFields[apikey.FieldIPWhitelist] = struct{}{} +} + +// IPWhitelistCleared returns if the "ip_whitelist" field was cleared in this mutation. +func (m *APIKeyMutation) IPWhitelistCleared() bool { + _, ok := m.clearedFields[apikey.FieldIPWhitelist] + return ok +} + +// ResetIPWhitelist resets all changes to the "ip_whitelist" field. +func (m *APIKeyMutation) ResetIPWhitelist() { + m.ip_whitelist = nil + m.appendip_whitelist = nil + delete(m.clearedFields, apikey.FieldIPWhitelist) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (m *APIKeyMutation) SetIPBlacklist(s []string) { + m.ip_blacklist = &s + m.appendip_blacklist = nil +} + +// IPBlacklist returns the value of the "ip_blacklist" field in the mutation. +func (m *APIKeyMutation) IPBlacklist() (r []string, exists bool) { + v := m.ip_blacklist + if v == nil { + return + } + return *v, true +} + +// OldIPBlacklist returns the old "ip_blacklist" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldIPBlacklist(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPBlacklist is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPBlacklist requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPBlacklist: %w", err) + } + return oldValue.IPBlacklist, nil +} + +// AppendIPBlacklist adds s to the "ip_blacklist" field. +func (m *APIKeyMutation) AppendIPBlacklist(s []string) { + m.appendip_blacklist = append(m.appendip_blacklist, s...) +} + +// AppendedIPBlacklist returns the list of values that were appended to the "ip_blacklist" field in this mutation. +func (m *APIKeyMutation) AppendedIPBlacklist() ([]string, bool) { + if len(m.appendip_blacklist) == 0 { + return nil, false + } + return m.appendip_blacklist, true +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (m *APIKeyMutation) ClearIPBlacklist() { + m.ip_blacklist = nil + m.appendip_blacklist = nil + m.clearedFields[apikey.FieldIPBlacklist] = struct{}{} +} + +// IPBlacklistCleared returns if the "ip_blacklist" field was cleared in this mutation. +func (m *APIKeyMutation) IPBlacklistCleared() bool { + _, ok := m.clearedFields[apikey.FieldIPBlacklist] + return ok +} + +// ResetIPBlacklist resets all changes to the "ip_blacklist" field. +func (m *APIKeyMutation) ResetIPBlacklist() { + m.ip_blacklist = nil + m.appendip_blacklist = nil + delete(m.clearedFields, apikey.FieldIPBlacklist) +} + +// ClearUser clears the "user" edge to the User entity. +func (m *APIKeyMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[apikey.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *APIKeyMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *APIKeyMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *APIKeyMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *APIKeyMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[apikey.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *APIKeyMutation) GroupCleared() bool { + return m.GroupIDCleared() || m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *APIKeyMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *APIKeyMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by ids. +func (m *APIKeyMutation) AddUsageLogIDs(ids ...int64) { + if m.usage_logs == nil { + m.usage_logs = make(map[int64]struct{}) + } + for i := range ids { + m.usage_logs[ids[i]] = struct{}{} + } +} + +// ClearUsageLogs clears the "usage_logs" edge to the UsageLog entity. +func (m *APIKeyMutation) ClearUsageLogs() { + m.clearedusage_logs = true +} + +// UsageLogsCleared reports if the "usage_logs" edge to the UsageLog entity was cleared. +func (m *APIKeyMutation) UsageLogsCleared() bool { + return m.clearedusage_logs +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to the UsageLog entity by IDs. +func (m *APIKeyMutation) RemoveUsageLogIDs(ids ...int64) { + if m.removedusage_logs == nil { + m.removedusage_logs = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_logs, ids[i]) + m.removedusage_logs[ids[i]] = struct{}{} + } +} + +// RemovedUsageLogs returns the removed IDs of the "usage_logs" edge to the UsageLog entity. +func (m *APIKeyMutation) RemovedUsageLogsIDs() (ids []int64) { + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return +} + +// UsageLogsIDs returns the "usage_logs" edge IDs in the mutation. +func (m *APIKeyMutation) UsageLogsIDs() (ids []int64) { + for id := range m.usage_logs { + ids = append(ids, id) + } + return +} + +// ResetUsageLogs resets all changes to the "usage_logs" edge. +func (m *APIKeyMutation) ResetUsageLogs() { + m.usage_logs = nil + m.clearedusage_logs = false + m.removedusage_logs = nil +} + +// Where appends a list predicates to the APIKeyMutation builder. +func (m *APIKeyMutation) Where(ps ...predicate.APIKey) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the APIKeyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *APIKeyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.APIKey, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *APIKeyMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *APIKeyMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (APIKey). +func (m *APIKeyMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *APIKeyMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.created_at != nil { + fields = append(fields, apikey.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, apikey.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, apikey.FieldDeletedAt) + } + if m.user != nil { + fields = append(fields, apikey.FieldUserID) + } + if m.key != nil { + fields = append(fields, apikey.FieldKey) + } + if m.name != nil { + fields = append(fields, apikey.FieldName) + } + if m.group != nil { + fields = append(fields, apikey.FieldGroupID) + } + if m.status != nil { + fields = append(fields, apikey.FieldStatus) + } + if m.ip_whitelist != nil { + fields = append(fields, apikey.FieldIPWhitelist) + } + if m.ip_blacklist != nil { + fields = append(fields, apikey.FieldIPBlacklist) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *APIKeyMutation) Field(name string) (ent.Value, bool) { + switch name { + case apikey.FieldCreatedAt: + return m.CreatedAt() + case apikey.FieldUpdatedAt: + return m.UpdatedAt() + case apikey.FieldDeletedAt: + return m.DeletedAt() + case apikey.FieldUserID: + return m.UserID() + case apikey.FieldKey: + return m.Key() + case apikey.FieldName: + return m.Name() + case apikey.FieldGroupID: + return m.GroupID() + case apikey.FieldStatus: + return m.Status() + case apikey.FieldIPWhitelist: + return m.IPWhitelist() + case apikey.FieldIPBlacklist: + return m.IPBlacklist() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *APIKeyMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case apikey.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case apikey.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case apikey.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case apikey.FieldUserID: + return m.OldUserID(ctx) + case apikey.FieldKey: + return m.OldKey(ctx) + case apikey.FieldName: + return m.OldName(ctx) + case apikey.FieldGroupID: + return m.OldGroupID(ctx) + case apikey.FieldStatus: + return m.OldStatus(ctx) + case apikey.FieldIPWhitelist: + return m.OldIPWhitelist(ctx) + case apikey.FieldIPBlacklist: + return m.OldIPBlacklist(ctx) + } + return nil, fmt.Errorf("unknown APIKey field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *APIKeyMutation) SetField(name string, value ent.Value) error { + switch name { + case apikey.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case apikey.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case apikey.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case apikey.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case apikey.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case apikey.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case apikey.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case apikey.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case apikey.FieldIPWhitelist: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPWhitelist(v) + return nil + case apikey.FieldIPBlacklist: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPBlacklist(v) + return nil + } + return fmt.Errorf("unknown APIKey field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *APIKeyMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *APIKeyMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *APIKeyMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown APIKey numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *APIKeyMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(apikey.FieldDeletedAt) { + fields = append(fields, apikey.FieldDeletedAt) + } + if m.FieldCleared(apikey.FieldGroupID) { + fields = append(fields, apikey.FieldGroupID) + } + if m.FieldCleared(apikey.FieldIPWhitelist) { + fields = append(fields, apikey.FieldIPWhitelist) + } + if m.FieldCleared(apikey.FieldIPBlacklist) { + fields = append(fields, apikey.FieldIPBlacklist) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *APIKeyMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *APIKeyMutation) ClearField(name string) error { + switch name { + case apikey.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case apikey.FieldGroupID: + m.ClearGroupID() + return nil + case apikey.FieldIPWhitelist: + m.ClearIPWhitelist() + return nil + case apikey.FieldIPBlacklist: + m.ClearIPBlacklist() + return nil + } + return fmt.Errorf("unknown APIKey nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *APIKeyMutation) ResetField(name string) error { + switch name { + case apikey.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case apikey.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case apikey.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case apikey.FieldUserID: + m.ResetUserID() + return nil + case apikey.FieldKey: + m.ResetKey() + return nil + case apikey.FieldName: + m.ResetName() + return nil + case apikey.FieldGroupID: + m.ResetGroupID() + return nil + case apikey.FieldStatus: + m.ResetStatus() + return nil + case apikey.FieldIPWhitelist: + m.ResetIPWhitelist() + return nil + case apikey.FieldIPBlacklist: + m.ResetIPBlacklist() + return nil + } + return fmt.Errorf("unknown APIKey field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *APIKeyMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.user != nil { + edges = append(edges, apikey.EdgeUser) + } + if m.group != nil { + edges = append(edges, apikey.EdgeGroup) + } + if m.usage_logs != nil { + edges = append(edges, apikey.EdgeUsageLogs) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *APIKeyMutation) AddedIDs(name string) []ent.Value { + switch name { + case apikey.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case apikey.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case apikey.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.usage_logs)) + for id := range m.usage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *APIKeyMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedusage_logs != nil { + edges = append(edges, apikey.EdgeUsageLogs) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *APIKeyMutation) RemovedIDs(name string) []ent.Value { + switch name { + case apikey.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.removedusage_logs)) + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *APIKeyMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.cleareduser { + edges = append(edges, apikey.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, apikey.EdgeGroup) + } + if m.clearedusage_logs { + edges = append(edges, apikey.EdgeUsageLogs) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *APIKeyMutation) EdgeCleared(name string) bool { + switch name { + case apikey.EdgeUser: + return m.cleareduser + case apikey.EdgeGroup: + return m.clearedgroup + case apikey.EdgeUsageLogs: + return m.clearedusage_logs + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *APIKeyMutation) ClearEdge(name string) error { + switch name { + case apikey.EdgeUser: + m.ClearUser() + return nil + case apikey.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown APIKey unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *APIKeyMutation) ResetEdge(name string) error { + switch name { + case apikey.EdgeUser: + m.ResetUser() + return nil + case apikey.EdgeGroup: + m.ResetGroup() + return nil + case apikey.EdgeUsageLogs: + m.ResetUsageLogs() + return nil + } + return fmt.Errorf("unknown APIKey edge %s", name) +} + +// AccountMutation represents an operation that mutates the Account nodes in the graph. +type AccountMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + notes *string + platform *string + _type *string + credentials *map[string]interface{} + extra *map[string]interface{} + concurrency *int + addconcurrency *int + priority *int + addpriority *int + rate_multiplier *float64 + addrate_multiplier *float64 + status *string + error_message *string + last_used_at *time.Time + expires_at *time.Time + auto_pause_on_expired *bool + schedulable *bool + rate_limited_at *time.Time + rate_limit_reset_at *time.Time + overload_until *time.Time + session_window_start *time.Time + session_window_end *time.Time + session_window_status *string + clearedFields map[string]struct{} + groups map[int64]struct{} + removedgroups map[int64]struct{} + clearedgroups bool + proxy *int64 + clearedproxy bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + done bool + oldValue func(context.Context) (*Account, error) + predicates []predicate.Account +} + +var _ ent.Mutation = (*AccountMutation)(nil) + +// accountOption allows management of the mutation configuration using functional options. +type accountOption func(*AccountMutation) + +// newAccountMutation creates new mutation for the Account entity. +func newAccountMutation(c config, op Op, opts ...accountOption) *AccountMutation { + m := &AccountMutation{ + config: c, + op: op, + typ: TypeAccount, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAccountID sets the ID field of the mutation. +func withAccountID(id int64) accountOption { + return func(m *AccountMutation) { + var ( + err error + once sync.Once + value *Account + ) + m.oldValue = func(ctx context.Context) (*Account, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Account.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAccount sets the old Account of the mutation. +func withAccount(node *Account) accountOption { + return func(m *AccountMutation) { + m.oldValue = func(context.Context) (*Account, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AccountMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AccountMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AccountMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AccountMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Account.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *AccountMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AccountMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AccountMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AccountMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AccountMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AccountMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *AccountMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *AccountMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *AccountMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[account.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *AccountMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[account.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *AccountMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, account.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *AccountMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *AccountMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *AccountMutation) ResetName() { + m.name = nil +} + +// SetNotes sets the "notes" field. +func (m *AccountMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *AccountMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *AccountMutation) ClearNotes() { + m.notes = nil + m.clearedFields[account.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *AccountMutation) NotesCleared() bool { + _, ok := m.clearedFields[account.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *AccountMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, account.FieldNotes) +} + +// SetPlatform sets the "platform" field. +func (m *AccountMutation) SetPlatform(s string) { + m.platform = &s +} + +// Platform returns the value of the "platform" field in the mutation. +func (m *AccountMutation) Platform() (r string, exists bool) { + v := m.platform + if v == nil { + return + } + return *v, true +} + +// OldPlatform returns the old "platform" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldPlatform(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlatform is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlatform requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlatform: %w", err) + } + return oldValue.Platform, nil +} + +// ResetPlatform resets all changes to the "platform" field. +func (m *AccountMutation) ResetPlatform() { + m.platform = nil +} + +// SetType sets the "type" field. +func (m *AccountMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *AccountMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *AccountMutation) ResetType() { + m._type = nil +} + +// SetCredentials sets the "credentials" field. +func (m *AccountMutation) SetCredentials(value map[string]interface{}) { + m.credentials = &value +} + +// Credentials returns the value of the "credentials" field in the mutation. +func (m *AccountMutation) Credentials() (r map[string]interface{}, exists bool) { + v := m.credentials + if v == nil { + return + } + return *v, true +} + +// OldCredentials returns the old "credentials" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldCredentials(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCredentials is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCredentials requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCredentials: %w", err) + } + return oldValue.Credentials, nil +} + +// ResetCredentials resets all changes to the "credentials" field. +func (m *AccountMutation) ResetCredentials() { + m.credentials = nil +} + +// SetExtra sets the "extra" field. +func (m *AccountMutation) SetExtra(value map[string]interface{}) { + m.extra = &value +} + +// Extra returns the value of the "extra" field in the mutation. +func (m *AccountMutation) Extra() (r map[string]interface{}, exists bool) { + v := m.extra + if v == nil { + return + } + return *v, true +} + +// OldExtra returns the old "extra" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldExtra(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtra is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtra requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtra: %w", err) + } + return oldValue.Extra, nil +} + +// ResetExtra resets all changes to the "extra" field. +func (m *AccountMutation) ResetExtra() { + m.extra = nil +} + +// SetProxyID sets the "proxy_id" field. +func (m *AccountMutation) SetProxyID(i int64) { + m.proxy = &i +} + +// ProxyID returns the value of the "proxy_id" field in the mutation. +func (m *AccountMutation) ProxyID() (r int64, exists bool) { + v := m.proxy + if v == nil { + return + } + return *v, true +} + +// OldProxyID returns the old "proxy_id" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldProxyID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProxyID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProxyID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProxyID: %w", err) + } + return oldValue.ProxyID, nil +} + +// ClearProxyID clears the value of the "proxy_id" field. +func (m *AccountMutation) ClearProxyID() { + m.proxy = nil + m.clearedFields[account.FieldProxyID] = struct{}{} +} + +// ProxyIDCleared returns if the "proxy_id" field was cleared in this mutation. +func (m *AccountMutation) ProxyIDCleared() bool { + _, ok := m.clearedFields[account.FieldProxyID] + return ok +} + +// ResetProxyID resets all changes to the "proxy_id" field. +func (m *AccountMutation) ResetProxyID() { + m.proxy = nil + delete(m.clearedFields, account.FieldProxyID) +} + +// SetConcurrency sets the "concurrency" field. +func (m *AccountMutation) SetConcurrency(i int) { + m.concurrency = &i + m.addconcurrency = nil +} + +// Concurrency returns the value of the "concurrency" field in the mutation. +func (m *AccountMutation) Concurrency() (r int, exists bool) { + v := m.concurrency + if v == nil { + return + } + return *v, true +} + +// OldConcurrency returns the old "concurrency" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldConcurrency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConcurrency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConcurrency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConcurrency: %w", err) + } + return oldValue.Concurrency, nil +} + +// AddConcurrency adds i to the "concurrency" field. +func (m *AccountMutation) AddConcurrency(i int) { + if m.addconcurrency != nil { + *m.addconcurrency += i + } else { + m.addconcurrency = &i + } +} + +// AddedConcurrency returns the value that was added to the "concurrency" field in this mutation. +func (m *AccountMutation) AddedConcurrency() (r int, exists bool) { + v := m.addconcurrency + if v == nil { + return + } + return *v, true +} + +// ResetConcurrency resets all changes to the "concurrency" field. +func (m *AccountMutation) ResetConcurrency() { + m.concurrency = nil + m.addconcurrency = nil +} + +// SetPriority sets the "priority" field. +func (m *AccountMutation) SetPriority(i int) { + m.priority = &i + m.addpriority = nil +} + +// Priority returns the value of the "priority" field in the mutation. +func (m *AccountMutation) Priority() (r int, exists bool) { + v := m.priority + if v == nil { + return + } + return *v, true +} + +// OldPriority returns the old "priority" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldPriority(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPriority is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPriority requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPriority: %w", err) + } + return oldValue.Priority, nil +} + +// AddPriority adds i to the "priority" field. +func (m *AccountMutation) AddPriority(i int) { + if m.addpriority != nil { + *m.addpriority += i + } else { + m.addpriority = &i + } +} + +// AddedPriority returns the value that was added to the "priority" field in this mutation. +func (m *AccountMutation) AddedPriority() (r int, exists bool) { + v := m.addpriority + if v == nil { + return + } + return *v, true +} + +// ResetPriority resets all changes to the "priority" field. +func (m *AccountMutation) ResetPriority() { + m.priority = nil + m.addpriority = nil +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (m *AccountMutation) SetRateMultiplier(f float64) { + m.rate_multiplier = &f + m.addrate_multiplier = nil +} + +// RateMultiplier returns the value of the "rate_multiplier" field in the mutation. +func (m *AccountMutation) RateMultiplier() (r float64, exists bool) { + v := m.rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldRateMultiplier returns the old "rate_multiplier" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err) + } + return oldValue.RateMultiplier, nil +} + +// AddRateMultiplier adds f to the "rate_multiplier" field. +func (m *AccountMutation) AddRateMultiplier(f float64) { + if m.addrate_multiplier != nil { + *m.addrate_multiplier += f + } else { + m.addrate_multiplier = &f + } +} + +// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation. +func (m *AccountMutation) AddedRateMultiplier() (r float64, exists bool) { + v := m.addrate_multiplier + if v == nil { + return + } + return *v, true +} + +// ResetRateMultiplier resets all changes to the "rate_multiplier" field. +func (m *AccountMutation) ResetRateMultiplier() { + m.rate_multiplier = nil + m.addrate_multiplier = nil +} + +// SetStatus sets the "status" field. +func (m *AccountMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *AccountMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *AccountMutation) ResetStatus() { + m.status = nil +} + +// SetErrorMessage sets the "error_message" field. +func (m *AccountMutation) SetErrorMessage(s string) { + m.error_message = &s +} + +// ErrorMessage returns the value of the "error_message" field in the mutation. +func (m *AccountMutation) ErrorMessage() (r string, exists bool) { + v := m.error_message + if v == nil { + return + } + return *v, true +} + +// OldErrorMessage returns the old "error_message" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldErrorMessage(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) + } + return oldValue.ErrorMessage, nil +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (m *AccountMutation) ClearErrorMessage() { + m.error_message = nil + m.clearedFields[account.FieldErrorMessage] = struct{}{} +} + +// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. +func (m *AccountMutation) ErrorMessageCleared() bool { + _, ok := m.clearedFields[account.FieldErrorMessage] + return ok +} + +// ResetErrorMessage resets all changes to the "error_message" field. +func (m *AccountMutation) ResetErrorMessage() { + m.error_message = nil + delete(m.clearedFields, account.FieldErrorMessage) +} + +// SetLastUsedAt sets the "last_used_at" field. +func (m *AccountMutation) SetLastUsedAt(t time.Time) { + m.last_used_at = &t +} + +// LastUsedAt returns the value of the "last_used_at" field in the mutation. +func (m *AccountMutation) LastUsedAt() (r time.Time, exists bool) { + v := m.last_used_at + if v == nil { + return + } + return *v, true +} + +// OldLastUsedAt returns the old "last_used_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldLastUsedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLastUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLastUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLastUsedAt: %w", err) + } + return oldValue.LastUsedAt, nil +} + +// ClearLastUsedAt clears the value of the "last_used_at" field. +func (m *AccountMutation) ClearLastUsedAt() { + m.last_used_at = nil + m.clearedFields[account.FieldLastUsedAt] = struct{}{} +} + +// LastUsedAtCleared returns if the "last_used_at" field was cleared in this mutation. +func (m *AccountMutation) LastUsedAtCleared() bool { + _, ok := m.clearedFields[account.FieldLastUsedAt] + return ok +} + +// ResetLastUsedAt resets all changes to the "last_used_at" field. +func (m *AccountMutation) ResetLastUsedAt() { + m.last_used_at = nil + delete(m.clearedFields, account.FieldLastUsedAt) +} + +// SetExpiresAt sets the "expires_at" field. +func (m *AccountMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *AccountMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldExpiresAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (m *AccountMutation) ClearExpiresAt() { + m.expires_at = nil + m.clearedFields[account.FieldExpiresAt] = struct{}{} +} + +// ExpiresAtCleared returns if the "expires_at" field was cleared in this mutation. +func (m *AccountMutation) ExpiresAtCleared() bool { + _, ok := m.clearedFields[account.FieldExpiresAt] + return ok +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *AccountMutation) ResetExpiresAt() { + m.expires_at = nil + delete(m.clearedFields, account.FieldExpiresAt) +} + +// SetAutoPauseOnExpired sets the "auto_pause_on_expired" field. +func (m *AccountMutation) SetAutoPauseOnExpired(b bool) { + m.auto_pause_on_expired = &b +} + +// AutoPauseOnExpired returns the value of the "auto_pause_on_expired" field in the mutation. +func (m *AccountMutation) AutoPauseOnExpired() (r bool, exists bool) { + v := m.auto_pause_on_expired + if v == nil { + return + } + return *v, true +} + +// OldAutoPauseOnExpired returns the old "auto_pause_on_expired" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldAutoPauseOnExpired(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAutoPauseOnExpired is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAutoPauseOnExpired requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAutoPauseOnExpired: %w", err) + } + return oldValue.AutoPauseOnExpired, nil +} + +// ResetAutoPauseOnExpired resets all changes to the "auto_pause_on_expired" field. +func (m *AccountMutation) ResetAutoPauseOnExpired() { + m.auto_pause_on_expired = nil +} + +// SetSchedulable sets the "schedulable" field. +func (m *AccountMutation) SetSchedulable(b bool) { + m.schedulable = &b +} + +// Schedulable returns the value of the "schedulable" field in the mutation. +func (m *AccountMutation) Schedulable() (r bool, exists bool) { + v := m.schedulable + if v == nil { + return + } + return *v, true +} + +// OldSchedulable returns the old "schedulable" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSchedulable(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSchedulable is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSchedulable requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSchedulable: %w", err) + } + return oldValue.Schedulable, nil +} + +// ResetSchedulable resets all changes to the "schedulable" field. +func (m *AccountMutation) ResetSchedulable() { + m.schedulable = nil +} + +// SetRateLimitedAt sets the "rate_limited_at" field. +func (m *AccountMutation) SetRateLimitedAt(t time.Time) { + m.rate_limited_at = &t +} + +// RateLimitedAt returns the value of the "rate_limited_at" field in the mutation. +func (m *AccountMutation) RateLimitedAt() (r time.Time, exists bool) { + v := m.rate_limited_at + if v == nil { + return + } + return *v, true +} + +// OldRateLimitedAt returns the old "rate_limited_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateLimitedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateLimitedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateLimitedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateLimitedAt: %w", err) + } + return oldValue.RateLimitedAt, nil +} + +// ClearRateLimitedAt clears the value of the "rate_limited_at" field. +func (m *AccountMutation) ClearRateLimitedAt() { + m.rate_limited_at = nil + m.clearedFields[account.FieldRateLimitedAt] = struct{}{} +} + +// RateLimitedAtCleared returns if the "rate_limited_at" field was cleared in this mutation. +func (m *AccountMutation) RateLimitedAtCleared() bool { + _, ok := m.clearedFields[account.FieldRateLimitedAt] + return ok +} + +// ResetRateLimitedAt resets all changes to the "rate_limited_at" field. +func (m *AccountMutation) ResetRateLimitedAt() { + m.rate_limited_at = nil + delete(m.clearedFields, account.FieldRateLimitedAt) +} + +// SetRateLimitResetAt sets the "rate_limit_reset_at" field. +func (m *AccountMutation) SetRateLimitResetAt(t time.Time) { + m.rate_limit_reset_at = &t +} + +// RateLimitResetAt returns the value of the "rate_limit_reset_at" field in the mutation. +func (m *AccountMutation) RateLimitResetAt() (r time.Time, exists bool) { + v := m.rate_limit_reset_at + if v == nil { + return + } + return *v, true +} + +// OldRateLimitResetAt returns the old "rate_limit_reset_at" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateLimitResetAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateLimitResetAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateLimitResetAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateLimitResetAt: %w", err) + } + return oldValue.RateLimitResetAt, nil +} + +// ClearRateLimitResetAt clears the value of the "rate_limit_reset_at" field. +func (m *AccountMutation) ClearRateLimitResetAt() { + m.rate_limit_reset_at = nil + m.clearedFields[account.FieldRateLimitResetAt] = struct{}{} +} + +// RateLimitResetAtCleared returns if the "rate_limit_reset_at" field was cleared in this mutation. +func (m *AccountMutation) RateLimitResetAtCleared() bool { + _, ok := m.clearedFields[account.FieldRateLimitResetAt] + return ok +} + +// ResetRateLimitResetAt resets all changes to the "rate_limit_reset_at" field. +func (m *AccountMutation) ResetRateLimitResetAt() { + m.rate_limit_reset_at = nil + delete(m.clearedFields, account.FieldRateLimitResetAt) +} + +// SetOverloadUntil sets the "overload_until" field. +func (m *AccountMutation) SetOverloadUntil(t time.Time) { + m.overload_until = &t +} + +// OverloadUntil returns the value of the "overload_until" field in the mutation. +func (m *AccountMutation) OverloadUntil() (r time.Time, exists bool) { + v := m.overload_until + if v == nil { + return + } + return *v, true +} + +// OldOverloadUntil returns the old "overload_until" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldOverloadUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOverloadUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOverloadUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOverloadUntil: %w", err) + } + return oldValue.OverloadUntil, nil +} + +// ClearOverloadUntil clears the value of the "overload_until" field. +func (m *AccountMutation) ClearOverloadUntil() { + m.overload_until = nil + m.clearedFields[account.FieldOverloadUntil] = struct{}{} +} + +// OverloadUntilCleared returns if the "overload_until" field was cleared in this mutation. +func (m *AccountMutation) OverloadUntilCleared() bool { + _, ok := m.clearedFields[account.FieldOverloadUntil] + return ok +} + +// ResetOverloadUntil resets all changes to the "overload_until" field. +func (m *AccountMutation) ResetOverloadUntil() { + m.overload_until = nil + delete(m.clearedFields, account.FieldOverloadUntil) +} + +// SetSessionWindowStart sets the "session_window_start" field. +func (m *AccountMutation) SetSessionWindowStart(t time.Time) { + m.session_window_start = &t +} + +// SessionWindowStart returns the value of the "session_window_start" field in the mutation. +func (m *AccountMutation) SessionWindowStart() (r time.Time, exists bool) { + v := m.session_window_start + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowStart returns the old "session_window_start" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowStart: %w", err) + } + return oldValue.SessionWindowStart, nil +} + +// ClearSessionWindowStart clears the value of the "session_window_start" field. +func (m *AccountMutation) ClearSessionWindowStart() { + m.session_window_start = nil + m.clearedFields[account.FieldSessionWindowStart] = struct{}{} +} + +// SessionWindowStartCleared returns if the "session_window_start" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowStartCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowStart] + return ok +} + +// ResetSessionWindowStart resets all changes to the "session_window_start" field. +func (m *AccountMutation) ResetSessionWindowStart() { + m.session_window_start = nil + delete(m.clearedFields, account.FieldSessionWindowStart) +} + +// SetSessionWindowEnd sets the "session_window_end" field. +func (m *AccountMutation) SetSessionWindowEnd(t time.Time) { + m.session_window_end = &t +} + +// SessionWindowEnd returns the value of the "session_window_end" field in the mutation. +func (m *AccountMutation) SessionWindowEnd() (r time.Time, exists bool) { + v := m.session_window_end + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowEnd returns the old "session_window_end" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowEnd(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowEnd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowEnd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowEnd: %w", err) + } + return oldValue.SessionWindowEnd, nil +} + +// ClearSessionWindowEnd clears the value of the "session_window_end" field. +func (m *AccountMutation) ClearSessionWindowEnd() { + m.session_window_end = nil + m.clearedFields[account.FieldSessionWindowEnd] = struct{}{} +} + +// SessionWindowEndCleared returns if the "session_window_end" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowEndCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowEnd] + return ok +} + +// ResetSessionWindowEnd resets all changes to the "session_window_end" field. +func (m *AccountMutation) ResetSessionWindowEnd() { + m.session_window_end = nil + delete(m.clearedFields, account.FieldSessionWindowEnd) +} + +// SetSessionWindowStatus sets the "session_window_status" field. +func (m *AccountMutation) SetSessionWindowStatus(s string) { + m.session_window_status = &s +} + +// SessionWindowStatus returns the value of the "session_window_status" field in the mutation. +func (m *AccountMutation) SessionWindowStatus() (r string, exists bool) { + v := m.session_window_status + if v == nil { + return + } + return *v, true +} + +// OldSessionWindowStatus returns the old "session_window_status" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldSessionWindowStatus(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSessionWindowStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSessionWindowStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSessionWindowStatus: %w", err) + } + return oldValue.SessionWindowStatus, nil +} + +// ClearSessionWindowStatus clears the value of the "session_window_status" field. +func (m *AccountMutation) ClearSessionWindowStatus() { + m.session_window_status = nil + m.clearedFields[account.FieldSessionWindowStatus] = struct{}{} +} + +// SessionWindowStatusCleared returns if the "session_window_status" field was cleared in this mutation. +func (m *AccountMutation) SessionWindowStatusCleared() bool { + _, ok := m.clearedFields[account.FieldSessionWindowStatus] + return ok +} + +// ResetSessionWindowStatus resets all changes to the "session_window_status" field. +func (m *AccountMutation) ResetSessionWindowStatus() { + m.session_window_status = nil + delete(m.clearedFields, account.FieldSessionWindowStatus) +} + +// AddGroupIDs adds the "groups" edge to the Group entity by ids. +func (m *AccountMutation) AddGroupIDs(ids ...int64) { + if m.groups == nil { + m.groups = make(map[int64]struct{}) + } + for i := range ids { + m.groups[ids[i]] = struct{}{} + } +} + +// ClearGroups clears the "groups" edge to the Group entity. +func (m *AccountMutation) ClearGroups() { + m.clearedgroups = true +} + +// GroupsCleared reports if the "groups" edge to the Group entity was cleared. +func (m *AccountMutation) GroupsCleared() bool { + return m.clearedgroups +} + +// RemoveGroupIDs removes the "groups" edge to the Group entity by IDs. +func (m *AccountMutation) RemoveGroupIDs(ids ...int64) { + if m.removedgroups == nil { + m.removedgroups = make(map[int64]struct{}) + } + for i := range ids { + delete(m.groups, ids[i]) + m.removedgroups[ids[i]] = struct{}{} + } +} + +// RemovedGroups returns the removed IDs of the "groups" edge to the Group entity. +func (m *AccountMutation) RemovedGroupsIDs() (ids []int64) { + for id := range m.removedgroups { + ids = append(ids, id) + } + return +} + +// GroupsIDs returns the "groups" edge IDs in the mutation. +func (m *AccountMutation) GroupsIDs() (ids []int64) { + for id := range m.groups { + ids = append(ids, id) + } + return +} + +// ResetGroups resets all changes to the "groups" edge. +func (m *AccountMutation) ResetGroups() { + m.groups = nil + m.clearedgroups = false + m.removedgroups = nil +} + +// ClearProxy clears the "proxy" edge to the Proxy entity. +func (m *AccountMutation) ClearProxy() { + m.clearedproxy = true + m.clearedFields[account.FieldProxyID] = struct{}{} +} + +// ProxyCleared reports if the "proxy" edge to the Proxy entity was cleared. +func (m *AccountMutation) ProxyCleared() bool { + return m.ProxyIDCleared() || m.clearedproxy +} + +// ProxyIDs returns the "proxy" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// ProxyID instead. It exists only for internal usage by the builders. +func (m *AccountMutation) ProxyIDs() (ids []int64) { + if id := m.proxy; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetProxy resets all changes to the "proxy" edge. +func (m *AccountMutation) ResetProxy() { + m.proxy = nil + m.clearedproxy = false +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by ids. +func (m *AccountMutation) AddUsageLogIDs(ids ...int64) { + if m.usage_logs == nil { + m.usage_logs = make(map[int64]struct{}) + } + for i := range ids { + m.usage_logs[ids[i]] = struct{}{} + } +} + +// ClearUsageLogs clears the "usage_logs" edge to the UsageLog entity. +func (m *AccountMutation) ClearUsageLogs() { + m.clearedusage_logs = true +} + +// UsageLogsCleared reports if the "usage_logs" edge to the UsageLog entity was cleared. +func (m *AccountMutation) UsageLogsCleared() bool { + return m.clearedusage_logs +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to the UsageLog entity by IDs. +func (m *AccountMutation) RemoveUsageLogIDs(ids ...int64) { + if m.removedusage_logs == nil { + m.removedusage_logs = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_logs, ids[i]) + m.removedusage_logs[ids[i]] = struct{}{} + } +} + +// RemovedUsageLogs returns the removed IDs of the "usage_logs" edge to the UsageLog entity. +func (m *AccountMutation) RemovedUsageLogsIDs() (ids []int64) { + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return +} + +// UsageLogsIDs returns the "usage_logs" edge IDs in the mutation. +func (m *AccountMutation) UsageLogsIDs() (ids []int64) { + for id := range m.usage_logs { + ids = append(ids, id) + } + return +} + +// ResetUsageLogs resets all changes to the "usage_logs" edge. +func (m *AccountMutation) ResetUsageLogs() { + m.usage_logs = nil + m.clearedusage_logs = false + m.removedusage_logs = nil +} + +// Where appends a list predicates to the AccountMutation builder. +func (m *AccountMutation) Where(ps ...predicate.Account) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AccountMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AccountMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Account, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AccountMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AccountMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Account). +func (m *AccountMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AccountMutation) Fields() []string { + fields := make([]string, 0, 25) + if m.created_at != nil { + fields = append(fields, account.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, account.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, account.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, account.FieldName) + } + if m.notes != nil { + fields = append(fields, account.FieldNotes) + } + if m.platform != nil { + fields = append(fields, account.FieldPlatform) + } + if m._type != nil { + fields = append(fields, account.FieldType) + } + if m.credentials != nil { + fields = append(fields, account.FieldCredentials) + } + if m.extra != nil { + fields = append(fields, account.FieldExtra) + } + if m.proxy != nil { + fields = append(fields, account.FieldProxyID) + } + if m.concurrency != nil { + fields = append(fields, account.FieldConcurrency) + } + if m.priority != nil { + fields = append(fields, account.FieldPriority) + } + if m.rate_multiplier != nil { + fields = append(fields, account.FieldRateMultiplier) + } + if m.status != nil { + fields = append(fields, account.FieldStatus) + } + if m.error_message != nil { + fields = append(fields, account.FieldErrorMessage) + } + if m.last_used_at != nil { + fields = append(fields, account.FieldLastUsedAt) + } + if m.expires_at != nil { + fields = append(fields, account.FieldExpiresAt) + } + if m.auto_pause_on_expired != nil { + fields = append(fields, account.FieldAutoPauseOnExpired) + } + if m.schedulable != nil { + fields = append(fields, account.FieldSchedulable) + } + if m.rate_limited_at != nil { + fields = append(fields, account.FieldRateLimitedAt) + } + if m.rate_limit_reset_at != nil { + fields = append(fields, account.FieldRateLimitResetAt) + } + if m.overload_until != nil { + fields = append(fields, account.FieldOverloadUntil) + } + if m.session_window_start != nil { + fields = append(fields, account.FieldSessionWindowStart) + } + if m.session_window_end != nil { + fields = append(fields, account.FieldSessionWindowEnd) + } + if m.session_window_status != nil { + fields = append(fields, account.FieldSessionWindowStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AccountMutation) Field(name string) (ent.Value, bool) { + switch name { + case account.FieldCreatedAt: + return m.CreatedAt() + case account.FieldUpdatedAt: + return m.UpdatedAt() + case account.FieldDeletedAt: + return m.DeletedAt() + case account.FieldName: + return m.Name() + case account.FieldNotes: + return m.Notes() + case account.FieldPlatform: + return m.Platform() + case account.FieldType: + return m.GetType() + case account.FieldCredentials: + return m.Credentials() + case account.FieldExtra: + return m.Extra() + case account.FieldProxyID: + return m.ProxyID() + case account.FieldConcurrency: + return m.Concurrency() + case account.FieldPriority: + return m.Priority() + case account.FieldRateMultiplier: + return m.RateMultiplier() + case account.FieldStatus: + return m.Status() + case account.FieldErrorMessage: + return m.ErrorMessage() + case account.FieldLastUsedAt: + return m.LastUsedAt() + case account.FieldExpiresAt: + return m.ExpiresAt() + case account.FieldAutoPauseOnExpired: + return m.AutoPauseOnExpired() + case account.FieldSchedulable: + return m.Schedulable() + case account.FieldRateLimitedAt: + return m.RateLimitedAt() + case account.FieldRateLimitResetAt: + return m.RateLimitResetAt() + case account.FieldOverloadUntil: + return m.OverloadUntil() + case account.FieldSessionWindowStart: + return m.SessionWindowStart() + case account.FieldSessionWindowEnd: + return m.SessionWindowEnd() + case account.FieldSessionWindowStatus: + return m.SessionWindowStatus() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AccountMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case account.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case account.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case account.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case account.FieldName: + return m.OldName(ctx) + case account.FieldNotes: + return m.OldNotes(ctx) + case account.FieldPlatform: + return m.OldPlatform(ctx) + case account.FieldType: + return m.OldType(ctx) + case account.FieldCredentials: + return m.OldCredentials(ctx) + case account.FieldExtra: + return m.OldExtra(ctx) + case account.FieldProxyID: + return m.OldProxyID(ctx) + case account.FieldConcurrency: + return m.OldConcurrency(ctx) + case account.FieldPriority: + return m.OldPriority(ctx) + case account.FieldRateMultiplier: + return m.OldRateMultiplier(ctx) + case account.FieldStatus: + return m.OldStatus(ctx) + case account.FieldErrorMessage: + return m.OldErrorMessage(ctx) + case account.FieldLastUsedAt: + return m.OldLastUsedAt(ctx) + case account.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case account.FieldAutoPauseOnExpired: + return m.OldAutoPauseOnExpired(ctx) + case account.FieldSchedulable: + return m.OldSchedulable(ctx) + case account.FieldRateLimitedAt: + return m.OldRateLimitedAt(ctx) + case account.FieldRateLimitResetAt: + return m.OldRateLimitResetAt(ctx) + case account.FieldOverloadUntil: + return m.OldOverloadUntil(ctx) + case account.FieldSessionWindowStart: + return m.OldSessionWindowStart(ctx) + case account.FieldSessionWindowEnd: + return m.OldSessionWindowEnd(ctx) + case account.FieldSessionWindowStatus: + return m.OldSessionWindowStatus(ctx) + } + return nil, fmt.Errorf("unknown Account field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountMutation) SetField(name string, value ent.Value) error { + switch name { + case account.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case account.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case account.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case account.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case account.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + case account.FieldPlatform: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlatform(v) + return nil + case account.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case account.FieldCredentials: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCredentials(v) + return nil + case account.FieldExtra: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtra(v) + return nil + case account.FieldProxyID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProxyID(v) + return nil + case account.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConcurrency(v) + return nil + case account.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPriority(v) + return nil + case account.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateMultiplier(v) + return nil + case account.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case account.FieldErrorMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorMessage(v) + return nil + case account.FieldLastUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLastUsedAt(v) + return nil + case account.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case account.FieldAutoPauseOnExpired: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAutoPauseOnExpired(v) + return nil + case account.FieldSchedulable: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSchedulable(v) + return nil + case account.FieldRateLimitedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateLimitedAt(v) + return nil + case account.FieldRateLimitResetAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateLimitResetAt(v) + return nil + case account.FieldOverloadUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOverloadUntil(v) + return nil + case account.FieldSessionWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowStart(v) + return nil + case account.FieldSessionWindowEnd: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowEnd(v) + return nil + case account.FieldSessionWindowStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSessionWindowStatus(v) + return nil + } + return fmt.Errorf("unknown Account field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AccountMutation) AddedFields() []string { + var fields []string + if m.addconcurrency != nil { + fields = append(fields, account.FieldConcurrency) + } + if m.addpriority != nil { + fields = append(fields, account.FieldPriority) + } + if m.addrate_multiplier != nil { + fields = append(fields, account.FieldRateMultiplier) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AccountMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case account.FieldConcurrency: + return m.AddedConcurrency() + case account.FieldPriority: + return m.AddedPriority() + case account.FieldRateMultiplier: + return m.AddedRateMultiplier() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountMutation) AddField(name string, value ent.Value) error { + switch name { + case account.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddConcurrency(v) + return nil + case account.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPriority(v) + return nil + case account.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRateMultiplier(v) + return nil + } + return fmt.Errorf("unknown Account numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AccountMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(account.FieldDeletedAt) { + fields = append(fields, account.FieldDeletedAt) + } + if m.FieldCleared(account.FieldNotes) { + fields = append(fields, account.FieldNotes) + } + if m.FieldCleared(account.FieldProxyID) { + fields = append(fields, account.FieldProxyID) + } + if m.FieldCleared(account.FieldErrorMessage) { + fields = append(fields, account.FieldErrorMessage) + } + if m.FieldCleared(account.FieldLastUsedAt) { + fields = append(fields, account.FieldLastUsedAt) + } + if m.FieldCleared(account.FieldExpiresAt) { + fields = append(fields, account.FieldExpiresAt) + } + if m.FieldCleared(account.FieldRateLimitedAt) { + fields = append(fields, account.FieldRateLimitedAt) + } + if m.FieldCleared(account.FieldRateLimitResetAt) { + fields = append(fields, account.FieldRateLimitResetAt) + } + if m.FieldCleared(account.FieldOverloadUntil) { + fields = append(fields, account.FieldOverloadUntil) + } + if m.FieldCleared(account.FieldSessionWindowStart) { + fields = append(fields, account.FieldSessionWindowStart) + } + if m.FieldCleared(account.FieldSessionWindowEnd) { + fields = append(fields, account.FieldSessionWindowEnd) + } + if m.FieldCleared(account.FieldSessionWindowStatus) { + fields = append(fields, account.FieldSessionWindowStatus) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AccountMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AccountMutation) ClearField(name string) error { + switch name { + case account.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case account.FieldNotes: + m.ClearNotes() + return nil + case account.FieldProxyID: + m.ClearProxyID() + return nil + case account.FieldErrorMessage: + m.ClearErrorMessage() + return nil + case account.FieldLastUsedAt: + m.ClearLastUsedAt() + return nil + case account.FieldExpiresAt: + m.ClearExpiresAt() + return nil + case account.FieldRateLimitedAt: + m.ClearRateLimitedAt() + return nil + case account.FieldRateLimitResetAt: + m.ClearRateLimitResetAt() + return nil + case account.FieldOverloadUntil: + m.ClearOverloadUntil() + return nil + case account.FieldSessionWindowStart: + m.ClearSessionWindowStart() + return nil + case account.FieldSessionWindowEnd: + m.ClearSessionWindowEnd() + return nil + case account.FieldSessionWindowStatus: + m.ClearSessionWindowStatus() + return nil + } + return fmt.Errorf("unknown Account nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AccountMutation) ResetField(name string) error { + switch name { + case account.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case account.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case account.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case account.FieldName: + m.ResetName() + return nil + case account.FieldNotes: + m.ResetNotes() + return nil + case account.FieldPlatform: + m.ResetPlatform() + return nil + case account.FieldType: + m.ResetType() + return nil + case account.FieldCredentials: + m.ResetCredentials() + return nil + case account.FieldExtra: + m.ResetExtra() + return nil + case account.FieldProxyID: + m.ResetProxyID() + return nil + case account.FieldConcurrency: + m.ResetConcurrency() + return nil + case account.FieldPriority: + m.ResetPriority() + return nil + case account.FieldRateMultiplier: + m.ResetRateMultiplier() + return nil + case account.FieldStatus: + m.ResetStatus() + return nil + case account.FieldErrorMessage: + m.ResetErrorMessage() + return nil + case account.FieldLastUsedAt: + m.ResetLastUsedAt() + return nil + case account.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case account.FieldAutoPauseOnExpired: + m.ResetAutoPauseOnExpired() + return nil + case account.FieldSchedulable: + m.ResetSchedulable() + return nil + case account.FieldRateLimitedAt: + m.ResetRateLimitedAt() + return nil + case account.FieldRateLimitResetAt: + m.ResetRateLimitResetAt() + return nil + case account.FieldOverloadUntil: + m.ResetOverloadUntil() + return nil + case account.FieldSessionWindowStart: + m.ResetSessionWindowStart() + return nil + case account.FieldSessionWindowEnd: + m.ResetSessionWindowEnd() + return nil + case account.FieldSessionWindowStatus: + m.ResetSessionWindowStatus() + return nil + } + return fmt.Errorf("unknown Account field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AccountMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.groups != nil { + edges = append(edges, account.EdgeGroups) + } + if m.proxy != nil { + edges = append(edges, account.EdgeProxy) + } + if m.usage_logs != nil { + edges = append(edges, account.EdgeUsageLogs) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AccountMutation) AddedIDs(name string) []ent.Value { + switch name { + case account.EdgeGroups: + ids := make([]ent.Value, 0, len(m.groups)) + for id := range m.groups { + ids = append(ids, id) + } + return ids + case account.EdgeProxy: + if id := m.proxy; id != nil { + return []ent.Value{*id} + } + case account.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.usage_logs)) + for id := range m.usage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AccountMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedgroups != nil { + edges = append(edges, account.EdgeGroups) + } + if m.removedusage_logs != nil { + edges = append(edges, account.EdgeUsageLogs) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AccountMutation) RemovedIDs(name string) []ent.Value { + switch name { + case account.EdgeGroups: + ids := make([]ent.Value, 0, len(m.removedgroups)) + for id := range m.removedgroups { + ids = append(ids, id) + } + return ids + case account.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.removedusage_logs)) + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AccountMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedgroups { + edges = append(edges, account.EdgeGroups) + } + if m.clearedproxy { + edges = append(edges, account.EdgeProxy) + } + if m.clearedusage_logs { + edges = append(edges, account.EdgeUsageLogs) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AccountMutation) EdgeCleared(name string) bool { + switch name { + case account.EdgeGroups: + return m.clearedgroups + case account.EdgeProxy: + return m.clearedproxy + case account.EdgeUsageLogs: + return m.clearedusage_logs + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AccountMutation) ClearEdge(name string) error { + switch name { + case account.EdgeProxy: + m.ClearProxy() + return nil + } + return fmt.Errorf("unknown Account unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AccountMutation) ResetEdge(name string) error { + switch name { + case account.EdgeGroups: + m.ResetGroups() + return nil + case account.EdgeProxy: + m.ResetProxy() + return nil + case account.EdgeUsageLogs: + m.ResetUsageLogs() + return nil + } + return fmt.Errorf("unknown Account edge %s", name) +} + +// AccountGroupMutation represents an operation that mutates the AccountGroup nodes in the graph. +type AccountGroupMutation struct { + config + op Op + typ string + priority *int + addpriority *int + created_at *time.Time + clearedFields map[string]struct{} + account *int64 + clearedaccount bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*AccountGroup, error) + predicates []predicate.AccountGroup +} + +var _ ent.Mutation = (*AccountGroupMutation)(nil) + +// accountgroupOption allows management of the mutation configuration using functional options. +type accountgroupOption func(*AccountGroupMutation) + +// newAccountGroupMutation creates new mutation for the AccountGroup entity. +func newAccountGroupMutation(c config, op Op, opts ...accountgroupOption) *AccountGroupMutation { + m := &AccountGroupMutation{ + config: c, + op: op, + typ: TypeAccountGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AccountGroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AccountGroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetAccountID sets the "account_id" field. +func (m *AccountGroupMutation) SetAccountID(i int64) { + m.account = &i +} + +// AccountID returns the value of the "account_id" field in the mutation. +func (m *AccountGroupMutation) AccountID() (r int64, exists bool) { + v := m.account + if v == nil { + return + } + return *v, true +} + +// ResetAccountID resets all changes to the "account_id" field. +func (m *AccountGroupMutation) ResetAccountID() { + m.account = nil +} + +// SetGroupID sets the "group_id" field. +func (m *AccountGroupMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *AccountGroupMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *AccountGroupMutation) ResetGroupID() { + m.group = nil +} + +// SetPriority sets the "priority" field. +func (m *AccountGroupMutation) SetPriority(i int) { + m.priority = &i + m.addpriority = nil +} + +// Priority returns the value of the "priority" field in the mutation. +func (m *AccountGroupMutation) Priority() (r int, exists bool) { + v := m.priority + if v == nil { + return + } + return *v, true +} + +// AddPriority adds i to the "priority" field. +func (m *AccountGroupMutation) AddPriority(i int) { + if m.addpriority != nil { + *m.addpriority += i + } else { + m.addpriority = &i + } +} + +// AddedPriority returns the value that was added to the "priority" field in this mutation. +func (m *AccountGroupMutation) AddedPriority() (r int, exists bool) { + v := m.addpriority + if v == nil { + return + } + return *v, true +} + +// ResetPriority resets all changes to the "priority" field. +func (m *AccountGroupMutation) ResetPriority() { + m.priority = nil + m.addpriority = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *AccountGroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AccountGroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AccountGroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearAccount clears the "account" edge to the Account entity. +func (m *AccountGroupMutation) ClearAccount() { + m.clearedaccount = true + m.clearedFields[accountgroup.FieldAccountID] = struct{}{} +} + +// AccountCleared reports if the "account" edge to the Account entity was cleared. +func (m *AccountGroupMutation) AccountCleared() bool { + return m.clearedaccount +} + +// AccountIDs returns the "account" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AccountID instead. It exists only for internal usage by the builders. +func (m *AccountGroupMutation) AccountIDs() (ids []int64) { + if id := m.account; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAccount resets all changes to the "account" edge. +func (m *AccountGroupMutation) ResetAccount() { + m.account = nil + m.clearedaccount = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *AccountGroupMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[accountgroup.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *AccountGroupMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *AccountGroupMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *AccountGroupMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the AccountGroupMutation builder. +func (m *AccountGroupMutation) Where(ps ...predicate.AccountGroup) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AccountGroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AccountGroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.AccountGroup, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AccountGroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AccountGroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (AccountGroup). +func (m *AccountGroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AccountGroupMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.account != nil { + fields = append(fields, accountgroup.FieldAccountID) + } + if m.group != nil { + fields = append(fields, accountgroup.FieldGroupID) + } + if m.priority != nil { + fields = append(fields, accountgroup.FieldPriority) + } + if m.created_at != nil { + fields = append(fields, accountgroup.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AccountGroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case accountgroup.FieldAccountID: + return m.AccountID() + case accountgroup.FieldGroupID: + return m.GroupID() + case accountgroup.FieldPriority: + return m.Priority() + case accountgroup.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AccountGroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + return nil, errors.New("edge schema AccountGroup does not support getting old values") +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountGroupMutation) SetField(name string, value ent.Value) error { + switch name { + case accountgroup.FieldAccountID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccountID(v) + return nil + case accountgroup.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case accountgroup.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPriority(v) + return nil + case accountgroup.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown AccountGroup field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AccountGroupMutation) AddedFields() []string { + var fields []string + if m.addpriority != nil { + fields = append(fields, accountgroup.FieldPriority) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AccountGroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case accountgroup.FieldPriority: + return m.AddedPriority() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AccountGroupMutation) AddField(name string, value ent.Value) error { + switch name { + case accountgroup.FieldPriority: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPriority(v) + return nil + } + return fmt.Errorf("unknown AccountGroup numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AccountGroupMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AccountGroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AccountGroupMutation) ClearField(name string) error { + return fmt.Errorf("unknown AccountGroup nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AccountGroupMutation) ResetField(name string) error { + switch name { + case accountgroup.FieldAccountID: + m.ResetAccountID() + return nil + case accountgroup.FieldGroupID: + m.ResetGroupID() + return nil + case accountgroup.FieldPriority: + m.ResetPriority() + return nil + case accountgroup.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown AccountGroup field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AccountGroupMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.account != nil { + edges = append(edges, accountgroup.EdgeAccount) + } + if m.group != nil { + edges = append(edges, accountgroup.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AccountGroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case accountgroup.EdgeAccount: + if id := m.account; id != nil { + return []ent.Value{*id} + } + case accountgroup.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AccountGroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AccountGroupMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AccountGroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedaccount { + edges = append(edges, accountgroup.EdgeAccount) + } + if m.clearedgroup { + edges = append(edges, accountgroup.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AccountGroupMutation) EdgeCleared(name string) bool { + switch name { + case accountgroup.EdgeAccount: + return m.clearedaccount + case accountgroup.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AccountGroupMutation) ClearEdge(name string) error { + switch name { + case accountgroup.EdgeAccount: + m.ClearAccount() + return nil + case accountgroup.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown AccountGroup unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AccountGroupMutation) ResetEdge(name string) error { + switch name { + case accountgroup.EdgeAccount: + m.ResetAccount() + return nil + case accountgroup.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown AccountGroup edge %s", name) +} + +// GroupMutation represents an operation that mutates the Group nodes in the graph. +type GroupMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + description *string + rate_multiplier *float64 + addrate_multiplier *float64 + is_exclusive *bool + status *string + platform *string + subscription_type *string + daily_limit_usd *float64 + adddaily_limit_usd *float64 + weekly_limit_usd *float64 + addweekly_limit_usd *float64 + monthly_limit_usd *float64 + addmonthly_limit_usd *float64 + default_validity_days *int + adddefault_validity_days *int + image_price_1k *float64 + addimage_price_1k *float64 + image_price_2k *float64 + addimage_price_2k *float64 + image_price_4k *float64 + addimage_price_4k *float64 + claude_code_only *bool + fallback_group_id *int64 + addfallback_group_id *int64 + clearedFields map[string]struct{} + api_keys map[int64]struct{} + removedapi_keys map[int64]struct{} + clearedapi_keys bool + redeem_codes map[int64]struct{} + removedredeem_codes map[int64]struct{} + clearedredeem_codes bool + subscriptions map[int64]struct{} + removedsubscriptions map[int64]struct{} + clearedsubscriptions bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + accounts map[int64]struct{} + removedaccounts map[int64]struct{} + clearedaccounts bool + allowed_users map[int64]struct{} + removedallowed_users map[int64]struct{} + clearedallowed_users bool + done bool + oldValue func(context.Context) (*Group, error) + predicates []predicate.Group +} + +var _ ent.Mutation = (*GroupMutation)(nil) + +// groupOption allows management of the mutation configuration using functional options. +type groupOption func(*GroupMutation) + +// newGroupMutation creates new mutation for the Group entity. +func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation { + m := &GroupMutation{ + config: c, + op: op, + typ: TypeGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withGroupID sets the ID field of the mutation. +func withGroupID(id int64) groupOption { + return func(m *GroupMutation) { + var ( + err error + once sync.Once + value *Group + ) + m.oldValue = func(ctx context.Context) (*Group, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Group.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withGroup sets the old Group of the mutation. +func withGroup(node *Group) groupOption { + return func(m *GroupMutation) { + m.oldValue = func(context.Context) (*Group, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m GroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m GroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *GroupMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *GroupMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Group.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *GroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *GroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *GroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *GroupMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *GroupMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *GroupMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *GroupMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *GroupMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *GroupMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[group.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *GroupMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[group.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *GroupMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, group.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *GroupMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *GroupMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *GroupMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *GroupMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *GroupMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDescription(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *GroupMutation) ClearDescription() { + m.description = nil + m.clearedFields[group.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *GroupMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[group.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *GroupMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, group.FieldDescription) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (m *GroupMutation) SetRateMultiplier(f float64) { + m.rate_multiplier = &f + m.addrate_multiplier = nil +} + +// RateMultiplier returns the value of the "rate_multiplier" field in the mutation. +func (m *GroupMutation) RateMultiplier() (r float64, exists bool) { + v := m.rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldRateMultiplier returns the old "rate_multiplier" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err) + } + return oldValue.RateMultiplier, nil +} + +// AddRateMultiplier adds f to the "rate_multiplier" field. +func (m *GroupMutation) AddRateMultiplier(f float64) { + if m.addrate_multiplier != nil { + *m.addrate_multiplier += f + } else { + m.addrate_multiplier = &f + } +} + +// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation. +func (m *GroupMutation) AddedRateMultiplier() (r float64, exists bool) { + v := m.addrate_multiplier + if v == nil { + return + } + return *v, true +} + +// ResetRateMultiplier resets all changes to the "rate_multiplier" field. +func (m *GroupMutation) ResetRateMultiplier() { + m.rate_multiplier = nil + m.addrate_multiplier = nil +} + +// SetIsExclusive sets the "is_exclusive" field. +func (m *GroupMutation) SetIsExclusive(b bool) { + m.is_exclusive = &b +} + +// IsExclusive returns the value of the "is_exclusive" field in the mutation. +func (m *GroupMutation) IsExclusive() (r bool, exists bool) { + v := m.is_exclusive + if v == nil { + return + } + return *v, true +} + +// OldIsExclusive returns the old "is_exclusive" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldIsExclusive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsExclusive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsExclusive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsExclusive: %w", err) + } + return oldValue.IsExclusive, nil +} + +// ResetIsExclusive resets all changes to the "is_exclusive" field. +func (m *GroupMutation) ResetIsExclusive() { + m.is_exclusive = nil +} + +// SetStatus sets the "status" field. +func (m *GroupMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *GroupMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *GroupMutation) ResetStatus() { + m.status = nil +} + +// SetPlatform sets the "platform" field. +func (m *GroupMutation) SetPlatform(s string) { + m.platform = &s +} + +// Platform returns the value of the "platform" field in the mutation. +func (m *GroupMutation) Platform() (r string, exists bool) { + v := m.platform + if v == nil { + return + } + return *v, true +} + +// OldPlatform returns the old "platform" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldPlatform(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlatform is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlatform requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlatform: %w", err) + } + return oldValue.Platform, nil +} + +// ResetPlatform resets all changes to the "platform" field. +func (m *GroupMutation) ResetPlatform() { + m.platform = nil +} + +// SetSubscriptionType sets the "subscription_type" field. +func (m *GroupMutation) SetSubscriptionType(s string) { + m.subscription_type = &s +} + +// SubscriptionType returns the value of the "subscription_type" field in the mutation. +func (m *GroupMutation) SubscriptionType() (r string, exists bool) { + v := m.subscription_type + if v == nil { + return + } + return *v, true +} + +// OldSubscriptionType returns the old "subscription_type" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldSubscriptionType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSubscriptionType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSubscriptionType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSubscriptionType: %w", err) + } + return oldValue.SubscriptionType, nil +} + +// ResetSubscriptionType resets all changes to the "subscription_type" field. +func (m *GroupMutation) ResetSubscriptionType() { + m.subscription_type = nil +} + +// SetDailyLimitUsd sets the "daily_limit_usd" field. +func (m *GroupMutation) SetDailyLimitUsd(f float64) { + m.daily_limit_usd = &f + m.adddaily_limit_usd = nil +} + +// DailyLimitUsd returns the value of the "daily_limit_usd" field in the mutation. +func (m *GroupMutation) DailyLimitUsd() (r float64, exists bool) { + v := m.daily_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldDailyLimitUsd returns the old "daily_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDailyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyLimitUsd: %w", err) + } + return oldValue.DailyLimitUsd, nil +} + +// AddDailyLimitUsd adds f to the "daily_limit_usd" field. +func (m *GroupMutation) AddDailyLimitUsd(f float64) { + if m.adddaily_limit_usd != nil { + *m.adddaily_limit_usd += f + } else { + m.adddaily_limit_usd = &f + } +} + +// AddedDailyLimitUsd returns the value that was added to the "daily_limit_usd" field in this mutation. +func (m *GroupMutation) AddedDailyLimitUsd() (r float64, exists bool) { + v := m.adddaily_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearDailyLimitUsd clears the value of the "daily_limit_usd" field. +func (m *GroupMutation) ClearDailyLimitUsd() { + m.daily_limit_usd = nil + m.adddaily_limit_usd = nil + m.clearedFields[group.FieldDailyLimitUsd] = struct{}{} +} + +// DailyLimitUsdCleared returns if the "daily_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) DailyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldDailyLimitUsd] + return ok +} + +// ResetDailyLimitUsd resets all changes to the "daily_limit_usd" field. +func (m *GroupMutation) ResetDailyLimitUsd() { + m.daily_limit_usd = nil + m.adddaily_limit_usd = nil + delete(m.clearedFields, group.FieldDailyLimitUsd) +} + +// SetWeeklyLimitUsd sets the "weekly_limit_usd" field. +func (m *GroupMutation) SetWeeklyLimitUsd(f float64) { + m.weekly_limit_usd = &f + m.addweekly_limit_usd = nil +} + +// WeeklyLimitUsd returns the value of the "weekly_limit_usd" field in the mutation. +func (m *GroupMutation) WeeklyLimitUsd() (r float64, exists bool) { + v := m.weekly_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldWeeklyLimitUsd returns the old "weekly_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldWeeklyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyLimitUsd: %w", err) + } + return oldValue.WeeklyLimitUsd, nil +} + +// AddWeeklyLimitUsd adds f to the "weekly_limit_usd" field. +func (m *GroupMutation) AddWeeklyLimitUsd(f float64) { + if m.addweekly_limit_usd != nil { + *m.addweekly_limit_usd += f + } else { + m.addweekly_limit_usd = &f + } +} + +// AddedWeeklyLimitUsd returns the value that was added to the "weekly_limit_usd" field in this mutation. +func (m *GroupMutation) AddedWeeklyLimitUsd() (r float64, exists bool) { + v := m.addweekly_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearWeeklyLimitUsd clears the value of the "weekly_limit_usd" field. +func (m *GroupMutation) ClearWeeklyLimitUsd() { + m.weekly_limit_usd = nil + m.addweekly_limit_usd = nil + m.clearedFields[group.FieldWeeklyLimitUsd] = struct{}{} +} + +// WeeklyLimitUsdCleared returns if the "weekly_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) WeeklyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldWeeklyLimitUsd] + return ok +} + +// ResetWeeklyLimitUsd resets all changes to the "weekly_limit_usd" field. +func (m *GroupMutation) ResetWeeklyLimitUsd() { + m.weekly_limit_usd = nil + m.addweekly_limit_usd = nil + delete(m.clearedFields, group.FieldWeeklyLimitUsd) +} + +// SetMonthlyLimitUsd sets the "monthly_limit_usd" field. +func (m *GroupMutation) SetMonthlyLimitUsd(f float64) { + m.monthly_limit_usd = &f + m.addmonthly_limit_usd = nil +} + +// MonthlyLimitUsd returns the value of the "monthly_limit_usd" field in the mutation. +func (m *GroupMutation) MonthlyLimitUsd() (r float64, exists bool) { + v := m.monthly_limit_usd + if v == nil { + return + } + return *v, true +} + +// OldMonthlyLimitUsd returns the old "monthly_limit_usd" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldMonthlyLimitUsd(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyLimitUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyLimitUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyLimitUsd: %w", err) + } + return oldValue.MonthlyLimitUsd, nil +} + +// AddMonthlyLimitUsd adds f to the "monthly_limit_usd" field. +func (m *GroupMutation) AddMonthlyLimitUsd(f float64) { + if m.addmonthly_limit_usd != nil { + *m.addmonthly_limit_usd += f + } else { + m.addmonthly_limit_usd = &f + } +} + +// AddedMonthlyLimitUsd returns the value that was added to the "monthly_limit_usd" field in this mutation. +func (m *GroupMutation) AddedMonthlyLimitUsd() (r float64, exists bool) { + v := m.addmonthly_limit_usd + if v == nil { + return + } + return *v, true +} + +// ClearMonthlyLimitUsd clears the value of the "monthly_limit_usd" field. +func (m *GroupMutation) ClearMonthlyLimitUsd() { + m.monthly_limit_usd = nil + m.addmonthly_limit_usd = nil + m.clearedFields[group.FieldMonthlyLimitUsd] = struct{}{} +} + +// MonthlyLimitUsdCleared returns if the "monthly_limit_usd" field was cleared in this mutation. +func (m *GroupMutation) MonthlyLimitUsdCleared() bool { + _, ok := m.clearedFields[group.FieldMonthlyLimitUsd] + return ok +} + +// ResetMonthlyLimitUsd resets all changes to the "monthly_limit_usd" field. +func (m *GroupMutation) ResetMonthlyLimitUsd() { + m.monthly_limit_usd = nil + m.addmonthly_limit_usd = nil + delete(m.clearedFields, group.FieldMonthlyLimitUsd) +} + +// SetDefaultValidityDays sets the "default_validity_days" field. +func (m *GroupMutation) SetDefaultValidityDays(i int) { + m.default_validity_days = &i + m.adddefault_validity_days = nil +} + +// DefaultValidityDays returns the value of the "default_validity_days" field in the mutation. +func (m *GroupMutation) DefaultValidityDays() (r int, exists bool) { + v := m.default_validity_days + if v == nil { + return + } + return *v, true +} + +// OldDefaultValidityDays returns the old "default_validity_days" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldDefaultValidityDays(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDefaultValidityDays is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDefaultValidityDays requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDefaultValidityDays: %w", err) + } + return oldValue.DefaultValidityDays, nil +} + +// AddDefaultValidityDays adds i to the "default_validity_days" field. +func (m *GroupMutation) AddDefaultValidityDays(i int) { + if m.adddefault_validity_days != nil { + *m.adddefault_validity_days += i + } else { + m.adddefault_validity_days = &i + } +} + +// AddedDefaultValidityDays returns the value that was added to the "default_validity_days" field in this mutation. +func (m *GroupMutation) AddedDefaultValidityDays() (r int, exists bool) { + v := m.adddefault_validity_days + if v == nil { + return + } + return *v, true +} + +// ResetDefaultValidityDays resets all changes to the "default_validity_days" field. +func (m *GroupMutation) ResetDefaultValidityDays() { + m.default_validity_days = nil + m.adddefault_validity_days = nil +} + +// SetImagePrice1k sets the "image_price_1k" field. +func (m *GroupMutation) SetImagePrice1k(f float64) { + m.image_price_1k = &f + m.addimage_price_1k = nil +} + +// ImagePrice1k returns the value of the "image_price_1k" field in the mutation. +func (m *GroupMutation) ImagePrice1k() (r float64, exists bool) { + v := m.image_price_1k + if v == nil { + return + } + return *v, true +} + +// OldImagePrice1k returns the old "image_price_1k" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldImagePrice1k(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImagePrice1k is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImagePrice1k requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImagePrice1k: %w", err) + } + return oldValue.ImagePrice1k, nil +} + +// AddImagePrice1k adds f to the "image_price_1k" field. +func (m *GroupMutation) AddImagePrice1k(f float64) { + if m.addimage_price_1k != nil { + *m.addimage_price_1k += f + } else { + m.addimage_price_1k = &f + } +} + +// AddedImagePrice1k returns the value that was added to the "image_price_1k" field in this mutation. +func (m *GroupMutation) AddedImagePrice1k() (r float64, exists bool) { + v := m.addimage_price_1k + if v == nil { + return + } + return *v, true +} + +// ClearImagePrice1k clears the value of the "image_price_1k" field. +func (m *GroupMutation) ClearImagePrice1k() { + m.image_price_1k = nil + m.addimage_price_1k = nil + m.clearedFields[group.FieldImagePrice1k] = struct{}{} +} + +// ImagePrice1kCleared returns if the "image_price_1k" field was cleared in this mutation. +func (m *GroupMutation) ImagePrice1kCleared() bool { + _, ok := m.clearedFields[group.FieldImagePrice1k] + return ok +} + +// ResetImagePrice1k resets all changes to the "image_price_1k" field. +func (m *GroupMutation) ResetImagePrice1k() { + m.image_price_1k = nil + m.addimage_price_1k = nil + delete(m.clearedFields, group.FieldImagePrice1k) +} + +// SetImagePrice2k sets the "image_price_2k" field. +func (m *GroupMutation) SetImagePrice2k(f float64) { + m.image_price_2k = &f + m.addimage_price_2k = nil +} + +// ImagePrice2k returns the value of the "image_price_2k" field in the mutation. +func (m *GroupMutation) ImagePrice2k() (r float64, exists bool) { + v := m.image_price_2k + if v == nil { + return + } + return *v, true +} + +// OldImagePrice2k returns the old "image_price_2k" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldImagePrice2k(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImagePrice2k is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImagePrice2k requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImagePrice2k: %w", err) + } + return oldValue.ImagePrice2k, nil +} + +// AddImagePrice2k adds f to the "image_price_2k" field. +func (m *GroupMutation) AddImagePrice2k(f float64) { + if m.addimage_price_2k != nil { + *m.addimage_price_2k += f + } else { + m.addimage_price_2k = &f + } +} + +// AddedImagePrice2k returns the value that was added to the "image_price_2k" field in this mutation. +func (m *GroupMutation) AddedImagePrice2k() (r float64, exists bool) { + v := m.addimage_price_2k + if v == nil { + return + } + return *v, true +} + +// ClearImagePrice2k clears the value of the "image_price_2k" field. +func (m *GroupMutation) ClearImagePrice2k() { + m.image_price_2k = nil + m.addimage_price_2k = nil + m.clearedFields[group.FieldImagePrice2k] = struct{}{} +} + +// ImagePrice2kCleared returns if the "image_price_2k" field was cleared in this mutation. +func (m *GroupMutation) ImagePrice2kCleared() bool { + _, ok := m.clearedFields[group.FieldImagePrice2k] + return ok +} + +// ResetImagePrice2k resets all changes to the "image_price_2k" field. +func (m *GroupMutation) ResetImagePrice2k() { + m.image_price_2k = nil + m.addimage_price_2k = nil + delete(m.clearedFields, group.FieldImagePrice2k) +} + +// SetImagePrice4k sets the "image_price_4k" field. +func (m *GroupMutation) SetImagePrice4k(f float64) { + m.image_price_4k = &f + m.addimage_price_4k = nil +} + +// ImagePrice4k returns the value of the "image_price_4k" field in the mutation. +func (m *GroupMutation) ImagePrice4k() (r float64, exists bool) { + v := m.image_price_4k + if v == nil { + return + } + return *v, true +} + +// OldImagePrice4k returns the old "image_price_4k" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldImagePrice4k(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImagePrice4k is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImagePrice4k requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImagePrice4k: %w", err) + } + return oldValue.ImagePrice4k, nil +} + +// AddImagePrice4k adds f to the "image_price_4k" field. +func (m *GroupMutation) AddImagePrice4k(f float64) { + if m.addimage_price_4k != nil { + *m.addimage_price_4k += f + } else { + m.addimage_price_4k = &f + } +} + +// AddedImagePrice4k returns the value that was added to the "image_price_4k" field in this mutation. +func (m *GroupMutation) AddedImagePrice4k() (r float64, exists bool) { + v := m.addimage_price_4k + if v == nil { + return + } + return *v, true +} + +// ClearImagePrice4k clears the value of the "image_price_4k" field. +func (m *GroupMutation) ClearImagePrice4k() { + m.image_price_4k = nil + m.addimage_price_4k = nil + m.clearedFields[group.FieldImagePrice4k] = struct{}{} +} + +// ImagePrice4kCleared returns if the "image_price_4k" field was cleared in this mutation. +func (m *GroupMutation) ImagePrice4kCleared() bool { + _, ok := m.clearedFields[group.FieldImagePrice4k] + return ok +} + +// ResetImagePrice4k resets all changes to the "image_price_4k" field. +func (m *GroupMutation) ResetImagePrice4k() { + m.image_price_4k = nil + m.addimage_price_4k = nil + delete(m.clearedFields, group.FieldImagePrice4k) +} + +// SetClaudeCodeOnly sets the "claude_code_only" field. +func (m *GroupMutation) SetClaudeCodeOnly(b bool) { + m.claude_code_only = &b +} + +// ClaudeCodeOnly returns the value of the "claude_code_only" field in the mutation. +func (m *GroupMutation) ClaudeCodeOnly() (r bool, exists bool) { + v := m.claude_code_only + if v == nil { + return + } + return *v, true +} + +// OldClaudeCodeOnly returns the old "claude_code_only" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldClaudeCodeOnly(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldClaudeCodeOnly is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldClaudeCodeOnly requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldClaudeCodeOnly: %w", err) + } + return oldValue.ClaudeCodeOnly, nil +} + +// ResetClaudeCodeOnly resets all changes to the "claude_code_only" field. +func (m *GroupMutation) ResetClaudeCodeOnly() { + m.claude_code_only = nil +} + +// SetFallbackGroupID sets the "fallback_group_id" field. +func (m *GroupMutation) SetFallbackGroupID(i int64) { + m.fallback_group_id = &i + m.addfallback_group_id = nil +} + +// FallbackGroupID returns the value of the "fallback_group_id" field in the mutation. +func (m *GroupMutation) FallbackGroupID() (r int64, exists bool) { + v := m.fallback_group_id + if v == nil { + return + } + return *v, true +} + +// OldFallbackGroupID returns the old "fallback_group_id" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldFallbackGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFallbackGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFallbackGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFallbackGroupID: %w", err) + } + return oldValue.FallbackGroupID, nil +} + +// AddFallbackGroupID adds i to the "fallback_group_id" field. +func (m *GroupMutation) AddFallbackGroupID(i int64) { + if m.addfallback_group_id != nil { + *m.addfallback_group_id += i + } else { + m.addfallback_group_id = &i + } +} + +// AddedFallbackGroupID returns the value that was added to the "fallback_group_id" field in this mutation. +func (m *GroupMutation) AddedFallbackGroupID() (r int64, exists bool) { + v := m.addfallback_group_id + if v == nil { + return + } + return *v, true +} + +// ClearFallbackGroupID clears the value of the "fallback_group_id" field. +func (m *GroupMutation) ClearFallbackGroupID() { + m.fallback_group_id = nil + m.addfallback_group_id = nil + m.clearedFields[group.FieldFallbackGroupID] = struct{}{} +} + +// FallbackGroupIDCleared returns if the "fallback_group_id" field was cleared in this mutation. +func (m *GroupMutation) FallbackGroupIDCleared() bool { + _, ok := m.clearedFields[group.FieldFallbackGroupID] + return ok +} + +// ResetFallbackGroupID resets all changes to the "fallback_group_id" field. +func (m *GroupMutation) ResetFallbackGroupID() { + m.fallback_group_id = nil + m.addfallback_group_id = nil + delete(m.clearedFields, group.FieldFallbackGroupID) +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. +func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { + if m.api_keys == nil { + m.api_keys = make(map[int64]struct{}) + } + for i := range ids { + m.api_keys[ids[i]] = struct{}{} + } +} + +// ClearAPIKeys clears the "api_keys" edge to the APIKey entity. +func (m *GroupMutation) ClearAPIKeys() { + m.clearedapi_keys = true +} + +// APIKeysCleared reports if the "api_keys" edge to the APIKey entity was cleared. +func (m *GroupMutation) APIKeysCleared() bool { + return m.clearedapi_keys +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to the APIKey entity by IDs. +func (m *GroupMutation) RemoveAPIKeyIDs(ids ...int64) { + if m.removedapi_keys == nil { + m.removedapi_keys = make(map[int64]struct{}) + } + for i := range ids { + delete(m.api_keys, ids[i]) + m.removedapi_keys[ids[i]] = struct{}{} + } +} + +// RemovedAPIKeys returns the removed IDs of the "api_keys" edge to the APIKey entity. +func (m *GroupMutation) RemovedAPIKeysIDs() (ids []int64) { + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return +} + +// APIKeysIDs returns the "api_keys" edge IDs in the mutation. +func (m *GroupMutation) APIKeysIDs() (ids []int64) { + for id := range m.api_keys { + ids = append(ids, id) + } + return +} + +// ResetAPIKeys resets all changes to the "api_keys" edge. +func (m *GroupMutation) ResetAPIKeys() { + m.api_keys = nil + m.clearedapi_keys = false + m.removedapi_keys = nil +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by ids. +func (m *GroupMutation) AddRedeemCodeIDs(ids ...int64) { + if m.redeem_codes == nil { + m.redeem_codes = make(map[int64]struct{}) + } + for i := range ids { + m.redeem_codes[ids[i]] = struct{}{} + } +} + +// ClearRedeemCodes clears the "redeem_codes" edge to the RedeemCode entity. +func (m *GroupMutation) ClearRedeemCodes() { + m.clearedredeem_codes = true +} + +// RedeemCodesCleared reports if the "redeem_codes" edge to the RedeemCode entity was cleared. +func (m *GroupMutation) RedeemCodesCleared() bool { + return m.clearedredeem_codes +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to the RedeemCode entity by IDs. +func (m *GroupMutation) RemoveRedeemCodeIDs(ids ...int64) { + if m.removedredeem_codes == nil { + m.removedredeem_codes = make(map[int64]struct{}) + } + for i := range ids { + delete(m.redeem_codes, ids[i]) + m.removedredeem_codes[ids[i]] = struct{}{} + } +} + +// RemovedRedeemCodes returns the removed IDs of the "redeem_codes" edge to the RedeemCode entity. +func (m *GroupMutation) RemovedRedeemCodesIDs() (ids []int64) { + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return +} + +// RedeemCodesIDs returns the "redeem_codes" edge IDs in the mutation. +func (m *GroupMutation) RedeemCodesIDs() (ids []int64) { + for id := range m.redeem_codes { + ids = append(ids, id) + } + return +} + +// ResetRedeemCodes resets all changes to the "redeem_codes" edge. +func (m *GroupMutation) ResetRedeemCodes() { + m.redeem_codes = nil + m.clearedredeem_codes = false + m.removedredeem_codes = nil +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by ids. +func (m *GroupMutation) AddSubscriptionIDs(ids ...int64) { + if m.subscriptions == nil { + m.subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.subscriptions[ids[i]] = struct{}{} + } +} + +// ClearSubscriptions clears the "subscriptions" edge to the UserSubscription entity. +func (m *GroupMutation) ClearSubscriptions() { + m.clearedsubscriptions = true +} + +// SubscriptionsCleared reports if the "subscriptions" edge to the UserSubscription entity was cleared. +func (m *GroupMutation) SubscriptionsCleared() bool { + return m.clearedsubscriptions +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to the UserSubscription entity by IDs. +func (m *GroupMutation) RemoveSubscriptionIDs(ids ...int64) { + if m.removedsubscriptions == nil { + m.removedsubscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.subscriptions, ids[i]) + m.removedsubscriptions[ids[i]] = struct{}{} + } +} + +// RemovedSubscriptions returns the removed IDs of the "subscriptions" edge to the UserSubscription entity. +func (m *GroupMutation) RemovedSubscriptionsIDs() (ids []int64) { + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return +} + +// SubscriptionsIDs returns the "subscriptions" edge IDs in the mutation. +func (m *GroupMutation) SubscriptionsIDs() (ids []int64) { + for id := range m.subscriptions { + ids = append(ids, id) + } + return +} + +// ResetSubscriptions resets all changes to the "subscriptions" edge. +func (m *GroupMutation) ResetSubscriptions() { + m.subscriptions = nil + m.clearedsubscriptions = false + m.removedsubscriptions = nil +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by ids. +func (m *GroupMutation) AddUsageLogIDs(ids ...int64) { + if m.usage_logs == nil { + m.usage_logs = make(map[int64]struct{}) + } + for i := range ids { + m.usage_logs[ids[i]] = struct{}{} + } +} + +// ClearUsageLogs clears the "usage_logs" edge to the UsageLog entity. +func (m *GroupMutation) ClearUsageLogs() { + m.clearedusage_logs = true +} + +// UsageLogsCleared reports if the "usage_logs" edge to the UsageLog entity was cleared. +func (m *GroupMutation) UsageLogsCleared() bool { + return m.clearedusage_logs +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to the UsageLog entity by IDs. +func (m *GroupMutation) RemoveUsageLogIDs(ids ...int64) { + if m.removedusage_logs == nil { + m.removedusage_logs = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_logs, ids[i]) + m.removedusage_logs[ids[i]] = struct{}{} + } +} + +// RemovedUsageLogs returns the removed IDs of the "usage_logs" edge to the UsageLog entity. +func (m *GroupMutation) RemovedUsageLogsIDs() (ids []int64) { + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return +} + +// UsageLogsIDs returns the "usage_logs" edge IDs in the mutation. +func (m *GroupMutation) UsageLogsIDs() (ids []int64) { + for id := range m.usage_logs { + ids = append(ids, id) + } + return +} + +// ResetUsageLogs resets all changes to the "usage_logs" edge. +func (m *GroupMutation) ResetUsageLogs() { + m.usage_logs = nil + m.clearedusage_logs = false + m.removedusage_logs = nil +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by ids. +func (m *GroupMutation) AddAccountIDs(ids ...int64) { + if m.accounts == nil { + m.accounts = make(map[int64]struct{}) + } + for i := range ids { + m.accounts[ids[i]] = struct{}{} + } +} + +// ClearAccounts clears the "accounts" edge to the Account entity. +func (m *GroupMutation) ClearAccounts() { + m.clearedaccounts = true +} + +// AccountsCleared reports if the "accounts" edge to the Account entity was cleared. +func (m *GroupMutation) AccountsCleared() bool { + return m.clearedaccounts +} + +// RemoveAccountIDs removes the "accounts" edge to the Account entity by IDs. +func (m *GroupMutation) RemoveAccountIDs(ids ...int64) { + if m.removedaccounts == nil { + m.removedaccounts = make(map[int64]struct{}) + } + for i := range ids { + delete(m.accounts, ids[i]) + m.removedaccounts[ids[i]] = struct{}{} + } +} + +// RemovedAccounts returns the removed IDs of the "accounts" edge to the Account entity. +func (m *GroupMutation) RemovedAccountsIDs() (ids []int64) { + for id := range m.removedaccounts { + ids = append(ids, id) + } + return +} + +// AccountsIDs returns the "accounts" edge IDs in the mutation. +func (m *GroupMutation) AccountsIDs() (ids []int64) { + for id := range m.accounts { + ids = append(ids, id) + } + return +} + +// ResetAccounts resets all changes to the "accounts" edge. +func (m *GroupMutation) ResetAccounts() { + m.accounts = nil + m.clearedaccounts = false + m.removedaccounts = nil +} + +// AddAllowedUserIDs adds the "allowed_users" edge to the User entity by ids. +func (m *GroupMutation) AddAllowedUserIDs(ids ...int64) { + if m.allowed_users == nil { + m.allowed_users = make(map[int64]struct{}) + } + for i := range ids { + m.allowed_users[ids[i]] = struct{}{} + } +} + +// ClearAllowedUsers clears the "allowed_users" edge to the User entity. +func (m *GroupMutation) ClearAllowedUsers() { + m.clearedallowed_users = true +} + +// AllowedUsersCleared reports if the "allowed_users" edge to the User entity was cleared. +func (m *GroupMutation) AllowedUsersCleared() bool { + return m.clearedallowed_users +} + +// RemoveAllowedUserIDs removes the "allowed_users" edge to the User entity by IDs. +func (m *GroupMutation) RemoveAllowedUserIDs(ids ...int64) { + if m.removedallowed_users == nil { + m.removedallowed_users = make(map[int64]struct{}) + } + for i := range ids { + delete(m.allowed_users, ids[i]) + m.removedallowed_users[ids[i]] = struct{}{} + } +} + +// RemovedAllowedUsers returns the removed IDs of the "allowed_users" edge to the User entity. +func (m *GroupMutation) RemovedAllowedUsersIDs() (ids []int64) { + for id := range m.removedallowed_users { + ids = append(ids, id) + } + return +} + +// AllowedUsersIDs returns the "allowed_users" edge IDs in the mutation. +func (m *GroupMutation) AllowedUsersIDs() (ids []int64) { + for id := range m.allowed_users { + ids = append(ids, id) + } + return +} + +// ResetAllowedUsers resets all changes to the "allowed_users" edge. +func (m *GroupMutation) ResetAllowedUsers() { + m.allowed_users = nil + m.clearedallowed_users = false + m.removedallowed_users = nil +} + +// Where appends a list predicates to the GroupMutation builder. +func (m *GroupMutation) Where(ps ...predicate.Group) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the GroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *GroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Group, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *GroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *GroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Group). +func (m *GroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *GroupMutation) Fields() []string { + fields := make([]string, 0, 19) + if m.created_at != nil { + fields = append(fields, group.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, group.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, group.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, group.FieldName) + } + if m.description != nil { + fields = append(fields, group.FieldDescription) + } + if m.rate_multiplier != nil { + fields = append(fields, group.FieldRateMultiplier) + } + if m.is_exclusive != nil { + fields = append(fields, group.FieldIsExclusive) + } + if m.status != nil { + fields = append(fields, group.FieldStatus) + } + if m.platform != nil { + fields = append(fields, group.FieldPlatform) + } + if m.subscription_type != nil { + fields = append(fields, group.FieldSubscriptionType) + } + if m.daily_limit_usd != nil { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.weekly_limit_usd != nil { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.monthly_limit_usd != nil { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + if m.default_validity_days != nil { + fields = append(fields, group.FieldDefaultValidityDays) + } + if m.image_price_1k != nil { + fields = append(fields, group.FieldImagePrice1k) + } + if m.image_price_2k != nil { + fields = append(fields, group.FieldImagePrice2k) + } + if m.image_price_4k != nil { + fields = append(fields, group.FieldImagePrice4k) + } + if m.claude_code_only != nil { + fields = append(fields, group.FieldClaudeCodeOnly) + } + if m.fallback_group_id != nil { + fields = append(fields, group.FieldFallbackGroupID) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *GroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case group.FieldCreatedAt: + return m.CreatedAt() + case group.FieldUpdatedAt: + return m.UpdatedAt() + case group.FieldDeletedAt: + return m.DeletedAt() + case group.FieldName: + return m.Name() + case group.FieldDescription: + return m.Description() + case group.FieldRateMultiplier: + return m.RateMultiplier() + case group.FieldIsExclusive: + return m.IsExclusive() + case group.FieldStatus: + return m.Status() + case group.FieldPlatform: + return m.Platform() + case group.FieldSubscriptionType: + return m.SubscriptionType() + case group.FieldDailyLimitUsd: + return m.DailyLimitUsd() + case group.FieldWeeklyLimitUsd: + return m.WeeklyLimitUsd() + case group.FieldMonthlyLimitUsd: + return m.MonthlyLimitUsd() + case group.FieldDefaultValidityDays: + return m.DefaultValidityDays() + case group.FieldImagePrice1k: + return m.ImagePrice1k() + case group.FieldImagePrice2k: + return m.ImagePrice2k() + case group.FieldImagePrice4k: + return m.ImagePrice4k() + case group.FieldClaudeCodeOnly: + return m.ClaudeCodeOnly() + case group.FieldFallbackGroupID: + return m.FallbackGroupID() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case group.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case group.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case group.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case group.FieldName: + return m.OldName(ctx) + case group.FieldDescription: + return m.OldDescription(ctx) + case group.FieldRateMultiplier: + return m.OldRateMultiplier(ctx) + case group.FieldIsExclusive: + return m.OldIsExclusive(ctx) + case group.FieldStatus: + return m.OldStatus(ctx) + case group.FieldPlatform: + return m.OldPlatform(ctx) + case group.FieldSubscriptionType: + return m.OldSubscriptionType(ctx) + case group.FieldDailyLimitUsd: + return m.OldDailyLimitUsd(ctx) + case group.FieldWeeklyLimitUsd: + return m.OldWeeklyLimitUsd(ctx) + case group.FieldMonthlyLimitUsd: + return m.OldMonthlyLimitUsd(ctx) + case group.FieldDefaultValidityDays: + return m.OldDefaultValidityDays(ctx) + case group.FieldImagePrice1k: + return m.OldImagePrice1k(ctx) + case group.FieldImagePrice2k: + return m.OldImagePrice2k(ctx) + case group.FieldImagePrice4k: + return m.OldImagePrice4k(ctx) + case group.FieldClaudeCodeOnly: + return m.OldClaudeCodeOnly(ctx) + case group.FieldFallbackGroupID: + return m.OldFallbackGroupID(ctx) + } + return nil, fmt.Errorf("unknown Group field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) SetField(name string, value ent.Value) error { + switch name { + case group.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case group.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case group.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case group.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case group.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case group.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateMultiplier(v) + return nil + case group.FieldIsExclusive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsExclusive(v) + return nil + case group.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case group.FieldPlatform: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlatform(v) + return nil + case group.FieldSubscriptionType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSubscriptionType(v) + return nil + case group.FieldDailyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyLimitUsd(v) + return nil + case group.FieldWeeklyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyLimitUsd(v) + return nil + case group.FieldMonthlyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyLimitUsd(v) + return nil + case group.FieldDefaultValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDefaultValidityDays(v) + return nil + case group.FieldImagePrice1k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImagePrice1k(v) + return nil + case group.FieldImagePrice2k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImagePrice2k(v) + return nil + case group.FieldImagePrice4k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImagePrice4k(v) + return nil + case group.FieldClaudeCodeOnly: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetClaudeCodeOnly(v) + return nil + case group.FieldFallbackGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFallbackGroupID(v) + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *GroupMutation) AddedFields() []string { + var fields []string + if m.addrate_multiplier != nil { + fields = append(fields, group.FieldRateMultiplier) + } + if m.adddaily_limit_usd != nil { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.addweekly_limit_usd != nil { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.addmonthly_limit_usd != nil { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + if m.adddefault_validity_days != nil { + fields = append(fields, group.FieldDefaultValidityDays) + } + if m.addimage_price_1k != nil { + fields = append(fields, group.FieldImagePrice1k) + } + if m.addimage_price_2k != nil { + fields = append(fields, group.FieldImagePrice2k) + } + if m.addimage_price_4k != nil { + fields = append(fields, group.FieldImagePrice4k) + } + if m.addfallback_group_id != nil { + fields = append(fields, group.FieldFallbackGroupID) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *GroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case group.FieldRateMultiplier: + return m.AddedRateMultiplier() + case group.FieldDailyLimitUsd: + return m.AddedDailyLimitUsd() + case group.FieldWeeklyLimitUsd: + return m.AddedWeeklyLimitUsd() + case group.FieldMonthlyLimitUsd: + return m.AddedMonthlyLimitUsd() + case group.FieldDefaultValidityDays: + return m.AddedDefaultValidityDays() + case group.FieldImagePrice1k: + return m.AddedImagePrice1k() + case group.FieldImagePrice2k: + return m.AddedImagePrice2k() + case group.FieldImagePrice4k: + return m.AddedImagePrice4k() + case group.FieldFallbackGroupID: + return m.AddedFallbackGroupID() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *GroupMutation) AddField(name string, value ent.Value) error { + switch name { + case group.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRateMultiplier(v) + return nil + case group.FieldDailyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDailyLimitUsd(v) + return nil + case group.FieldWeeklyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddWeeklyLimitUsd(v) + return nil + case group.FieldMonthlyLimitUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMonthlyLimitUsd(v) + return nil + case group.FieldDefaultValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDefaultValidityDays(v) + return nil + case group.FieldImagePrice1k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddImagePrice1k(v) + return nil + case group.FieldImagePrice2k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddImagePrice2k(v) + return nil + case group.FieldImagePrice4k: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddImagePrice4k(v) + return nil + case group.FieldFallbackGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddFallbackGroupID(v) + return nil + } + return fmt.Errorf("unknown Group numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *GroupMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(group.FieldDeletedAt) { + fields = append(fields, group.FieldDeletedAt) + } + if m.FieldCleared(group.FieldDescription) { + fields = append(fields, group.FieldDescription) + } + if m.FieldCleared(group.FieldDailyLimitUsd) { + fields = append(fields, group.FieldDailyLimitUsd) + } + if m.FieldCleared(group.FieldWeeklyLimitUsd) { + fields = append(fields, group.FieldWeeklyLimitUsd) + } + if m.FieldCleared(group.FieldMonthlyLimitUsd) { + fields = append(fields, group.FieldMonthlyLimitUsd) + } + if m.FieldCleared(group.FieldImagePrice1k) { + fields = append(fields, group.FieldImagePrice1k) + } + if m.FieldCleared(group.FieldImagePrice2k) { + fields = append(fields, group.FieldImagePrice2k) + } + if m.FieldCleared(group.FieldImagePrice4k) { + fields = append(fields, group.FieldImagePrice4k) + } + if m.FieldCleared(group.FieldFallbackGroupID) { + fields = append(fields, group.FieldFallbackGroupID) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *GroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *GroupMutation) ClearField(name string) error { + switch name { + case group.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case group.FieldDescription: + m.ClearDescription() + return nil + case group.FieldDailyLimitUsd: + m.ClearDailyLimitUsd() + return nil + case group.FieldWeeklyLimitUsd: + m.ClearWeeklyLimitUsd() + return nil + case group.FieldMonthlyLimitUsd: + m.ClearMonthlyLimitUsd() + return nil + case group.FieldImagePrice1k: + m.ClearImagePrice1k() + return nil + case group.FieldImagePrice2k: + m.ClearImagePrice2k() + return nil + case group.FieldImagePrice4k: + m.ClearImagePrice4k() + return nil + case group.FieldFallbackGroupID: + m.ClearFallbackGroupID() + return nil + } + return fmt.Errorf("unknown Group nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *GroupMutation) ResetField(name string) error { + switch name { + case group.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case group.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case group.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case group.FieldName: + m.ResetName() + return nil + case group.FieldDescription: + m.ResetDescription() + return nil + case group.FieldRateMultiplier: + m.ResetRateMultiplier() + return nil + case group.FieldIsExclusive: + m.ResetIsExclusive() + return nil + case group.FieldStatus: + m.ResetStatus() + return nil + case group.FieldPlatform: + m.ResetPlatform() + return nil + case group.FieldSubscriptionType: + m.ResetSubscriptionType() + return nil + case group.FieldDailyLimitUsd: + m.ResetDailyLimitUsd() + return nil + case group.FieldWeeklyLimitUsd: + m.ResetWeeklyLimitUsd() + return nil + case group.FieldMonthlyLimitUsd: + m.ResetMonthlyLimitUsd() + return nil + case group.FieldDefaultValidityDays: + m.ResetDefaultValidityDays() + return nil + case group.FieldImagePrice1k: + m.ResetImagePrice1k() + return nil + case group.FieldImagePrice2k: + m.ResetImagePrice2k() + return nil + case group.FieldImagePrice4k: + m.ResetImagePrice4k() + return nil + case group.FieldClaudeCodeOnly: + m.ResetClaudeCodeOnly() + return nil + case group.FieldFallbackGroupID: + m.ResetFallbackGroupID() + return nil + } + return fmt.Errorf("unknown Group field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *GroupMutation) AddedEdges() []string { + edges := make([]string, 0, 6) + if m.api_keys != nil { + edges = append(edges, group.EdgeAPIKeys) + } + if m.redeem_codes != nil { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.subscriptions != nil { + edges = append(edges, group.EdgeSubscriptions) + } + if m.usage_logs != nil { + edges = append(edges, group.EdgeUsageLogs) + } + if m.accounts != nil { + edges = append(edges, group.EdgeAccounts) + } + if m.allowed_users != nil { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *GroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case group.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.api_keys)) + for id := range m.api_keys { + ids = append(ids, id) + } + return ids + case group.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.redeem_codes)) + for id := range m.redeem_codes { + ids = append(ids, id) + } + return ids + case group.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.subscriptions)) + for id := range m.subscriptions { + ids = append(ids, id) + } + return ids + case group.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.usage_logs)) + for id := range m.usage_logs { + ids = append(ids, id) + } + return ids + case group.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.accounts)) + for id := range m.accounts { + ids = append(ids, id) + } + return ids + case group.EdgeAllowedUsers: + ids := make([]ent.Value, 0, len(m.allowed_users)) + for id := range m.allowed_users { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *GroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 6) + if m.removedapi_keys != nil { + edges = append(edges, group.EdgeAPIKeys) + } + if m.removedredeem_codes != nil { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.removedsubscriptions != nil { + edges = append(edges, group.EdgeSubscriptions) + } + if m.removedusage_logs != nil { + edges = append(edges, group.EdgeUsageLogs) + } + if m.removedaccounts != nil { + edges = append(edges, group.EdgeAccounts) + } + if m.removedallowed_users != nil { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *GroupMutation) RemovedIDs(name string) []ent.Value { + switch name { + case group.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.removedapi_keys)) + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return ids + case group.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.removedredeem_codes)) + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return ids + case group.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.removedsubscriptions)) + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return ids + case group.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.removedusage_logs)) + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return ids + case group.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.removedaccounts)) + for id := range m.removedaccounts { + ids = append(ids, id) + } + return ids + case group.EdgeAllowedUsers: + ids := make([]ent.Value, 0, len(m.removedallowed_users)) + for id := range m.removedallowed_users { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *GroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 6) + if m.clearedapi_keys { + edges = append(edges, group.EdgeAPIKeys) + } + if m.clearedredeem_codes { + edges = append(edges, group.EdgeRedeemCodes) + } + if m.clearedsubscriptions { + edges = append(edges, group.EdgeSubscriptions) + } + if m.clearedusage_logs { + edges = append(edges, group.EdgeUsageLogs) + } + if m.clearedaccounts { + edges = append(edges, group.EdgeAccounts) + } + if m.clearedallowed_users { + edges = append(edges, group.EdgeAllowedUsers) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *GroupMutation) EdgeCleared(name string) bool { + switch name { + case group.EdgeAPIKeys: + return m.clearedapi_keys + case group.EdgeRedeemCodes: + return m.clearedredeem_codes + case group.EdgeSubscriptions: + return m.clearedsubscriptions + case group.EdgeUsageLogs: + return m.clearedusage_logs + case group.EdgeAccounts: + return m.clearedaccounts + case group.EdgeAllowedUsers: + return m.clearedallowed_users + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *GroupMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Group unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *GroupMutation) ResetEdge(name string) error { + switch name { + case group.EdgeAPIKeys: + m.ResetAPIKeys() + return nil + case group.EdgeRedeemCodes: + m.ResetRedeemCodes() + return nil + case group.EdgeSubscriptions: + m.ResetSubscriptions() + return nil + case group.EdgeUsageLogs: + m.ResetUsageLogs() + return nil + case group.EdgeAccounts: + m.ResetAccounts() + return nil + case group.EdgeAllowedUsers: + m.ResetAllowedUsers() + return nil + } + return fmt.Errorf("unknown Group edge %s", name) +} + +// PromoCodeMutation represents an operation that mutates the PromoCode nodes in the graph. +type PromoCodeMutation struct { + config + op Op + typ string + id *int64 + code *string + bonus_amount *float64 + addbonus_amount *float64 + max_uses *int + addmax_uses *int + used_count *int + addused_count *int + status *string + expires_at *time.Time + notes *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + usage_records map[int64]struct{} + removedusage_records map[int64]struct{} + clearedusage_records bool + done bool + oldValue func(context.Context) (*PromoCode, error) + predicates []predicate.PromoCode +} + +var _ ent.Mutation = (*PromoCodeMutation)(nil) + +// promocodeOption allows management of the mutation configuration using functional options. +type promocodeOption func(*PromoCodeMutation) + +// newPromoCodeMutation creates new mutation for the PromoCode entity. +func newPromoCodeMutation(c config, op Op, opts ...promocodeOption) *PromoCodeMutation { + m := &PromoCodeMutation{ + config: c, + op: op, + typ: TypePromoCode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPromoCodeID sets the ID field of the mutation. +func withPromoCodeID(id int64) promocodeOption { + return func(m *PromoCodeMutation) { + var ( + err error + once sync.Once + value *PromoCode + ) + m.oldValue = func(ctx context.Context) (*PromoCode, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PromoCode.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPromoCode sets the old PromoCode of the mutation. +func withPromoCode(node *PromoCode) promocodeOption { + return func(m *PromoCodeMutation) { + m.oldValue = func(context.Context) (*PromoCode, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PromoCodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PromoCodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PromoCodeMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PromoCodeMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PromoCode.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCode sets the "code" field. +func (m *PromoCodeMutation) SetCode(s string) { + m.code = &s +} + +// Code returns the value of the "code" field in the mutation. +func (m *PromoCodeMutation) Code() (r string, exists bool) { + v := m.code + if v == nil { + return + } + return *v, true +} + +// OldCode returns the old "code" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCode: %w", err) + } + return oldValue.Code, nil +} + +// ResetCode resets all changes to the "code" field. +func (m *PromoCodeMutation) ResetCode() { + m.code = nil +} + +// SetBonusAmount sets the "bonus_amount" field. +func (m *PromoCodeMutation) SetBonusAmount(f float64) { + m.bonus_amount = &f + m.addbonus_amount = nil +} + +// BonusAmount returns the value of the "bonus_amount" field in the mutation. +func (m *PromoCodeMutation) BonusAmount() (r float64, exists bool) { + v := m.bonus_amount + if v == nil { + return + } + return *v, true +} + +// OldBonusAmount returns the old "bonus_amount" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldBonusAmount(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBonusAmount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBonusAmount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBonusAmount: %w", err) + } + return oldValue.BonusAmount, nil +} + +// AddBonusAmount adds f to the "bonus_amount" field. +func (m *PromoCodeMutation) AddBonusAmount(f float64) { + if m.addbonus_amount != nil { + *m.addbonus_amount += f + } else { + m.addbonus_amount = &f + } +} + +// AddedBonusAmount returns the value that was added to the "bonus_amount" field in this mutation. +func (m *PromoCodeMutation) AddedBonusAmount() (r float64, exists bool) { + v := m.addbonus_amount + if v == nil { + return + } + return *v, true +} + +// ResetBonusAmount resets all changes to the "bonus_amount" field. +func (m *PromoCodeMutation) ResetBonusAmount() { + m.bonus_amount = nil + m.addbonus_amount = nil +} + +// SetMaxUses sets the "max_uses" field. +func (m *PromoCodeMutation) SetMaxUses(i int) { + m.max_uses = &i + m.addmax_uses = nil +} + +// MaxUses returns the value of the "max_uses" field in the mutation. +func (m *PromoCodeMutation) MaxUses() (r int, exists bool) { + v := m.max_uses + if v == nil { + return + } + return *v, true +} + +// OldMaxUses returns the old "max_uses" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldMaxUses(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMaxUses is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMaxUses requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMaxUses: %w", err) + } + return oldValue.MaxUses, nil +} + +// AddMaxUses adds i to the "max_uses" field. +func (m *PromoCodeMutation) AddMaxUses(i int) { + if m.addmax_uses != nil { + *m.addmax_uses += i + } else { + m.addmax_uses = &i + } +} + +// AddedMaxUses returns the value that was added to the "max_uses" field in this mutation. +func (m *PromoCodeMutation) AddedMaxUses() (r int, exists bool) { + v := m.addmax_uses + if v == nil { + return + } + return *v, true +} + +// ResetMaxUses resets all changes to the "max_uses" field. +func (m *PromoCodeMutation) ResetMaxUses() { + m.max_uses = nil + m.addmax_uses = nil +} + +// SetUsedCount sets the "used_count" field. +func (m *PromoCodeMutation) SetUsedCount(i int) { + m.used_count = &i + m.addused_count = nil +} + +// UsedCount returns the value of the "used_count" field in the mutation. +func (m *PromoCodeMutation) UsedCount() (r int, exists bool) { + v := m.used_count + if v == nil { + return + } + return *v, true +} + +// OldUsedCount returns the old "used_count" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldUsedCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedCount: %w", err) + } + return oldValue.UsedCount, nil +} + +// AddUsedCount adds i to the "used_count" field. +func (m *PromoCodeMutation) AddUsedCount(i int) { + if m.addused_count != nil { + *m.addused_count += i + } else { + m.addused_count = &i + } +} + +// AddedUsedCount returns the value that was added to the "used_count" field in this mutation. +func (m *PromoCodeMutation) AddedUsedCount() (r int, exists bool) { + v := m.addused_count + if v == nil { + return + } + return *v, true +} + +// ResetUsedCount resets all changes to the "used_count" field. +func (m *PromoCodeMutation) ResetUsedCount() { + m.used_count = nil + m.addused_count = nil +} + +// SetStatus sets the "status" field. +func (m *PromoCodeMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *PromoCodeMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *PromoCodeMutation) ResetStatus() { + m.status = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *PromoCodeMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *PromoCodeMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldExpiresAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (m *PromoCodeMutation) ClearExpiresAt() { + m.expires_at = nil + m.clearedFields[promocode.FieldExpiresAt] = struct{}{} +} + +// ExpiresAtCleared returns if the "expires_at" field was cleared in this mutation. +func (m *PromoCodeMutation) ExpiresAtCleared() bool { + _, ok := m.clearedFields[promocode.FieldExpiresAt] + return ok +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *PromoCodeMutation) ResetExpiresAt() { + m.expires_at = nil + delete(m.clearedFields, promocode.FieldExpiresAt) +} + +// SetNotes sets the "notes" field. +func (m *PromoCodeMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *PromoCodeMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *PromoCodeMutation) ClearNotes() { + m.notes = nil + m.clearedFields[promocode.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *PromoCodeMutation) NotesCleared() bool { + _, ok := m.clearedFields[promocode.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *PromoCodeMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, promocode.FieldNotes) +} + +// SetCreatedAt sets the "created_at" field. +func (m *PromoCodeMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *PromoCodeMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *PromoCodeMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *PromoCodeMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *PromoCodeMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *PromoCodeMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by ids. +func (m *PromoCodeMutation) AddUsageRecordIDs(ids ...int64) { + if m.usage_records == nil { + m.usage_records = make(map[int64]struct{}) + } + for i := range ids { + m.usage_records[ids[i]] = struct{}{} + } +} + +// ClearUsageRecords clears the "usage_records" edge to the PromoCodeUsage entity. +func (m *PromoCodeMutation) ClearUsageRecords() { + m.clearedusage_records = true +} + +// UsageRecordsCleared reports if the "usage_records" edge to the PromoCodeUsage entity was cleared. +func (m *PromoCodeMutation) UsageRecordsCleared() bool { + return m.clearedusage_records +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (m *PromoCodeMutation) RemoveUsageRecordIDs(ids ...int64) { + if m.removedusage_records == nil { + m.removedusage_records = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_records, ids[i]) + m.removedusage_records[ids[i]] = struct{}{} + } +} + +// RemovedUsageRecords returns the removed IDs of the "usage_records" edge to the PromoCodeUsage entity. +func (m *PromoCodeMutation) RemovedUsageRecordsIDs() (ids []int64) { + for id := range m.removedusage_records { + ids = append(ids, id) + } + return +} + +// UsageRecordsIDs returns the "usage_records" edge IDs in the mutation. +func (m *PromoCodeMutation) UsageRecordsIDs() (ids []int64) { + for id := range m.usage_records { + ids = append(ids, id) + } + return +} + +// ResetUsageRecords resets all changes to the "usage_records" edge. +func (m *PromoCodeMutation) ResetUsageRecords() { + m.usage_records = nil + m.clearedusage_records = false + m.removedusage_records = nil +} + +// Where appends a list predicates to the PromoCodeMutation builder. +func (m *PromoCodeMutation) Where(ps ...predicate.PromoCode) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PromoCodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PromoCodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PromoCode, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PromoCodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PromoCodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PromoCode). +func (m *PromoCodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PromoCodeMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.code != nil { + fields = append(fields, promocode.FieldCode) + } + if m.bonus_amount != nil { + fields = append(fields, promocode.FieldBonusAmount) + } + if m.max_uses != nil { + fields = append(fields, promocode.FieldMaxUses) + } + if m.used_count != nil { + fields = append(fields, promocode.FieldUsedCount) + } + if m.status != nil { + fields = append(fields, promocode.FieldStatus) + } + if m.expires_at != nil { + fields = append(fields, promocode.FieldExpiresAt) + } + if m.notes != nil { + fields = append(fields, promocode.FieldNotes) + } + if m.created_at != nil { + fields = append(fields, promocode.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, promocode.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PromoCodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case promocode.FieldCode: + return m.Code() + case promocode.FieldBonusAmount: + return m.BonusAmount() + case promocode.FieldMaxUses: + return m.MaxUses() + case promocode.FieldUsedCount: + return m.UsedCount() + case promocode.FieldStatus: + return m.Status() + case promocode.FieldExpiresAt: + return m.ExpiresAt() + case promocode.FieldNotes: + return m.Notes() + case promocode.FieldCreatedAt: + return m.CreatedAt() + case promocode.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PromoCodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case promocode.FieldCode: + return m.OldCode(ctx) + case promocode.FieldBonusAmount: + return m.OldBonusAmount(ctx) + case promocode.FieldMaxUses: + return m.OldMaxUses(ctx) + case promocode.FieldUsedCount: + return m.OldUsedCount(ctx) + case promocode.FieldStatus: + return m.OldStatus(ctx) + case promocode.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case promocode.FieldNotes: + return m.OldNotes(ctx) + case promocode.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case promocode.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown PromoCode field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeMutation) SetField(name string, value ent.Value) error { + switch name { + case promocode.FieldCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCode(v) + return nil + case promocode.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBonusAmount(v) + return nil + case promocode.FieldMaxUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMaxUses(v) + return nil + case promocode.FieldUsedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedCount(v) + return nil + case promocode.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case promocode.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case promocode.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + case promocode.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case promocode.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown PromoCode field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PromoCodeMutation) AddedFields() []string { + var fields []string + if m.addbonus_amount != nil { + fields = append(fields, promocode.FieldBonusAmount) + } + if m.addmax_uses != nil { + fields = append(fields, promocode.FieldMaxUses) + } + if m.addused_count != nil { + fields = append(fields, promocode.FieldUsedCount) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PromoCodeMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case promocode.FieldBonusAmount: + return m.AddedBonusAmount() + case promocode.FieldMaxUses: + return m.AddedMaxUses() + case promocode.FieldUsedCount: + return m.AddedUsedCount() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeMutation) AddField(name string, value ent.Value) error { + switch name { + case promocode.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBonusAmount(v) + return nil + case promocode.FieldMaxUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMaxUses(v) + return nil + case promocode.FieldUsedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUsedCount(v) + return nil + } + return fmt.Errorf("unknown PromoCode numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PromoCodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(promocode.FieldExpiresAt) { + fields = append(fields, promocode.FieldExpiresAt) + } + if m.FieldCleared(promocode.FieldNotes) { + fields = append(fields, promocode.FieldNotes) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PromoCodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PromoCodeMutation) ClearField(name string) error { + switch name { + case promocode.FieldExpiresAt: + m.ClearExpiresAt() + return nil + case promocode.FieldNotes: + m.ClearNotes() + return nil + } + return fmt.Errorf("unknown PromoCode nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PromoCodeMutation) ResetField(name string) error { + switch name { + case promocode.FieldCode: + m.ResetCode() + return nil + case promocode.FieldBonusAmount: + m.ResetBonusAmount() + return nil + case promocode.FieldMaxUses: + m.ResetMaxUses() + return nil + case promocode.FieldUsedCount: + m.ResetUsedCount() + return nil + case promocode.FieldStatus: + m.ResetStatus() + return nil + case promocode.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case promocode.FieldNotes: + m.ResetNotes() + return nil + case promocode.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case promocode.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown PromoCode field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PromoCodeMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.usage_records != nil { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PromoCodeMutation) AddedIDs(name string) []ent.Value { + switch name { + case promocode.EdgeUsageRecords: + ids := make([]ent.Value, 0, len(m.usage_records)) + for id := range m.usage_records { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PromoCodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedusage_records != nil { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PromoCodeMutation) RemovedIDs(name string) []ent.Value { + switch name { + case promocode.EdgeUsageRecords: + ids := make([]ent.Value, 0, len(m.removedusage_records)) + for id := range m.removedusage_records { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PromoCodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedusage_records { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PromoCodeMutation) EdgeCleared(name string) bool { + switch name { + case promocode.EdgeUsageRecords: + return m.clearedusage_records + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PromoCodeMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown PromoCode unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PromoCodeMutation) ResetEdge(name string) error { + switch name { + case promocode.EdgeUsageRecords: + m.ResetUsageRecords() + return nil + } + return fmt.Errorf("unknown PromoCode edge %s", name) +} + +// PromoCodeUsageMutation represents an operation that mutates the PromoCodeUsage nodes in the graph. +type PromoCodeUsageMutation struct { + config + op Op + typ string + id *int64 + bonus_amount *float64 + addbonus_amount *float64 + used_at *time.Time + clearedFields map[string]struct{} + promo_code *int64 + clearedpromo_code bool + user *int64 + cleareduser bool + done bool + oldValue func(context.Context) (*PromoCodeUsage, error) + predicates []predicate.PromoCodeUsage +} + +var _ ent.Mutation = (*PromoCodeUsageMutation)(nil) + +// promocodeusageOption allows management of the mutation configuration using functional options. +type promocodeusageOption func(*PromoCodeUsageMutation) + +// newPromoCodeUsageMutation creates new mutation for the PromoCodeUsage entity. +func newPromoCodeUsageMutation(c config, op Op, opts ...promocodeusageOption) *PromoCodeUsageMutation { + m := &PromoCodeUsageMutation{ + config: c, + op: op, + typ: TypePromoCodeUsage, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPromoCodeUsageID sets the ID field of the mutation. +func withPromoCodeUsageID(id int64) promocodeusageOption { + return func(m *PromoCodeUsageMutation) { + var ( + err error + once sync.Once + value *PromoCodeUsage + ) + m.oldValue = func(ctx context.Context) (*PromoCodeUsage, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PromoCodeUsage.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPromoCodeUsage sets the old PromoCodeUsage of the mutation. +func withPromoCodeUsage(node *PromoCodeUsage) promocodeusageOption { + return func(m *PromoCodeUsageMutation) { + m.oldValue = func(context.Context) (*PromoCodeUsage, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PromoCodeUsageMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PromoCodeUsageMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PromoCodeUsageMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PromoCodeUsageMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PromoCodeUsage.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (m *PromoCodeUsageMutation) SetPromoCodeID(i int64) { + m.promo_code = &i +} + +// PromoCodeID returns the value of the "promo_code_id" field in the mutation. +func (m *PromoCodeUsageMutation) PromoCodeID() (r int64, exists bool) { + v := m.promo_code + if v == nil { + return + } + return *v, true +} + +// OldPromoCodeID returns the old "promo_code_id" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldPromoCodeID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPromoCodeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPromoCodeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPromoCodeID: %w", err) + } + return oldValue.PromoCodeID, nil +} + +// ResetPromoCodeID resets all changes to the "promo_code_id" field. +func (m *PromoCodeUsageMutation) ResetPromoCodeID() { + m.promo_code = nil +} + +// SetUserID sets the "user_id" field. +func (m *PromoCodeUsageMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *PromoCodeUsageMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *PromoCodeUsageMutation) ResetUserID() { + m.user = nil +} + +// SetBonusAmount sets the "bonus_amount" field. +func (m *PromoCodeUsageMutation) SetBonusAmount(f float64) { + m.bonus_amount = &f + m.addbonus_amount = nil +} + +// BonusAmount returns the value of the "bonus_amount" field in the mutation. +func (m *PromoCodeUsageMutation) BonusAmount() (r float64, exists bool) { + v := m.bonus_amount + if v == nil { + return + } + return *v, true +} + +// OldBonusAmount returns the old "bonus_amount" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldBonusAmount(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBonusAmount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBonusAmount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBonusAmount: %w", err) + } + return oldValue.BonusAmount, nil +} + +// AddBonusAmount adds f to the "bonus_amount" field. +func (m *PromoCodeUsageMutation) AddBonusAmount(f float64) { + if m.addbonus_amount != nil { + *m.addbonus_amount += f + } else { + m.addbonus_amount = &f + } +} + +// AddedBonusAmount returns the value that was added to the "bonus_amount" field in this mutation. +func (m *PromoCodeUsageMutation) AddedBonusAmount() (r float64, exists bool) { + v := m.addbonus_amount + if v == nil { + return + } + return *v, true +} + +// ResetBonusAmount resets all changes to the "bonus_amount" field. +func (m *PromoCodeUsageMutation) ResetBonusAmount() { + m.bonus_amount = nil + m.addbonus_amount = nil +} + +// SetUsedAt sets the "used_at" field. +func (m *PromoCodeUsageMutation) SetUsedAt(t time.Time) { + m.used_at = &t +} + +// UsedAt returns the value of the "used_at" field in the mutation. +func (m *PromoCodeUsageMutation) UsedAt() (r time.Time, exists bool) { + v := m.used_at + if v == nil { + return + } + return *v, true +} + +// OldUsedAt returns the old "used_at" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldUsedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedAt: %w", err) + } + return oldValue.UsedAt, nil +} + +// ResetUsedAt resets all changes to the "used_at" field. +func (m *PromoCodeUsageMutation) ResetUsedAt() { + m.used_at = nil +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (m *PromoCodeUsageMutation) ClearPromoCode() { + m.clearedpromo_code = true + m.clearedFields[promocodeusage.FieldPromoCodeID] = struct{}{} +} + +// PromoCodeCleared reports if the "promo_code" edge to the PromoCode entity was cleared. +func (m *PromoCodeUsageMutation) PromoCodeCleared() bool { + return m.clearedpromo_code +} + +// PromoCodeIDs returns the "promo_code" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PromoCodeID instead. It exists only for internal usage by the builders. +func (m *PromoCodeUsageMutation) PromoCodeIDs() (ids []int64) { + if id := m.promo_code; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPromoCode resets all changes to the "promo_code" edge. +func (m *PromoCodeUsageMutation) ResetPromoCode() { + m.promo_code = nil + m.clearedpromo_code = false +} + +// ClearUser clears the "user" edge to the User entity. +func (m *PromoCodeUsageMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[promocodeusage.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *PromoCodeUsageMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *PromoCodeUsageMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *PromoCodeUsageMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the PromoCodeUsageMutation builder. +func (m *PromoCodeUsageMutation) Where(ps ...predicate.PromoCodeUsage) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PromoCodeUsageMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PromoCodeUsageMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PromoCodeUsage, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PromoCodeUsageMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PromoCodeUsageMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PromoCodeUsage). +func (m *PromoCodeUsageMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PromoCodeUsageMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.promo_code != nil { + fields = append(fields, promocodeusage.FieldPromoCodeID) + } + if m.user != nil { + fields = append(fields, promocodeusage.FieldUserID) + } + if m.bonus_amount != nil { + fields = append(fields, promocodeusage.FieldBonusAmount) + } + if m.used_at != nil { + fields = append(fields, promocodeusage.FieldUsedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PromoCodeUsageMutation) Field(name string) (ent.Value, bool) { + switch name { + case promocodeusage.FieldPromoCodeID: + return m.PromoCodeID() + case promocodeusage.FieldUserID: + return m.UserID() + case promocodeusage.FieldBonusAmount: + return m.BonusAmount() + case promocodeusage.FieldUsedAt: + return m.UsedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PromoCodeUsageMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case promocodeusage.FieldPromoCodeID: + return m.OldPromoCodeID(ctx) + case promocodeusage.FieldUserID: + return m.OldUserID(ctx) + case promocodeusage.FieldBonusAmount: + return m.OldBonusAmount(ctx) + case promocodeusage.FieldUsedAt: + return m.OldUsedAt(ctx) + } + return nil, fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeUsageMutation) SetField(name string, value ent.Value) error { + switch name { + case promocodeusage.FieldPromoCodeID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPromoCodeID(v) + return nil + case promocodeusage.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case promocodeusage.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBonusAmount(v) + return nil + case promocodeusage.FieldUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedAt(v) + return nil + } + return fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PromoCodeUsageMutation) AddedFields() []string { + var fields []string + if m.addbonus_amount != nil { + fields = append(fields, promocodeusage.FieldBonusAmount) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PromoCodeUsageMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case promocodeusage.FieldBonusAmount: + return m.AddedBonusAmount() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeUsageMutation) AddField(name string, value ent.Value) error { + switch name { + case promocodeusage.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBonusAmount(v) + return nil + } + return fmt.Errorf("unknown PromoCodeUsage numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PromoCodeUsageMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PromoCodeUsageMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PromoCodeUsageMutation) ClearField(name string) error { + return fmt.Errorf("unknown PromoCodeUsage nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PromoCodeUsageMutation) ResetField(name string) error { + switch name { + case promocodeusage.FieldPromoCodeID: + m.ResetPromoCodeID() + return nil + case promocodeusage.FieldUserID: + m.ResetUserID() + return nil + case promocodeusage.FieldBonusAmount: + m.ResetBonusAmount() + return nil + case promocodeusage.FieldUsedAt: + m.ResetUsedAt() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PromoCodeUsageMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.promo_code != nil { + edges = append(edges, promocodeusage.EdgePromoCode) + } + if m.user != nil { + edges = append(edges, promocodeusage.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PromoCodeUsageMutation) AddedIDs(name string) []ent.Value { + switch name { + case promocodeusage.EdgePromoCode: + if id := m.promo_code; id != nil { + return []ent.Value{*id} + } + case promocodeusage.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PromoCodeUsageMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PromoCodeUsageMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PromoCodeUsageMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedpromo_code { + edges = append(edges, promocodeusage.EdgePromoCode) + } + if m.cleareduser { + edges = append(edges, promocodeusage.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PromoCodeUsageMutation) EdgeCleared(name string) bool { + switch name { + case promocodeusage.EdgePromoCode: + return m.clearedpromo_code + case promocodeusage.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PromoCodeUsageMutation) ClearEdge(name string) error { + switch name { + case promocodeusage.EdgePromoCode: + m.ClearPromoCode() + return nil + case promocodeusage.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PromoCodeUsageMutation) ResetEdge(name string) error { + switch name { + case promocodeusage.EdgePromoCode: + m.ResetPromoCode() + return nil + case promocodeusage.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage edge %s", name) +} + +// ProxyMutation represents an operation that mutates the Proxy nodes in the graph. +type ProxyMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + protocol *string + host *string + port *int + addport *int + username *string + password *string + status *string + clearedFields map[string]struct{} + accounts map[int64]struct{} + removedaccounts map[int64]struct{} + clearedaccounts bool + done bool + oldValue func(context.Context) (*Proxy, error) + predicates []predicate.Proxy +} + +var _ ent.Mutation = (*ProxyMutation)(nil) + +// proxyOption allows management of the mutation configuration using functional options. +type proxyOption func(*ProxyMutation) + +// newProxyMutation creates new mutation for the Proxy entity. +func newProxyMutation(c config, op Op, opts ...proxyOption) *ProxyMutation { + m := &ProxyMutation{ + config: c, + op: op, + typ: TypeProxy, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withProxyID sets the ID field of the mutation. +func withProxyID(id int64) proxyOption { + return func(m *ProxyMutation) { + var ( + err error + once sync.Once + value *Proxy + ) + m.oldValue = func(ctx context.Context) (*Proxy, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Proxy.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withProxy sets the old Proxy of the mutation. +func withProxy(node *Proxy) proxyOption { + return func(m *ProxyMutation) { + m.oldValue = func(context.Context) (*Proxy, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ProxyMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ProxyMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ProxyMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ProxyMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Proxy.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *ProxyMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ProxyMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ProxyMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ProxyMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ProxyMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ProxyMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *ProxyMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *ProxyMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *ProxyMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[proxy.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *ProxyMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[proxy.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *ProxyMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, proxy.FieldDeletedAt) +} + +// SetName sets the "name" field. +func (m *ProxyMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ProxyMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ProxyMutation) ResetName() { + m.name = nil +} + +// SetProtocol sets the "protocol" field. +func (m *ProxyMutation) SetProtocol(s string) { + m.protocol = &s +} + +// Protocol returns the value of the "protocol" field in the mutation. +func (m *ProxyMutation) Protocol() (r string, exists bool) { + v := m.protocol + if v == nil { + return + } + return *v, true +} + +// OldProtocol returns the old "protocol" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldProtocol(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProtocol is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProtocol requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProtocol: %w", err) + } + return oldValue.Protocol, nil +} + +// ResetProtocol resets all changes to the "protocol" field. +func (m *ProxyMutation) ResetProtocol() { + m.protocol = nil +} + +// SetHost sets the "host" field. +func (m *ProxyMutation) SetHost(s string) { + m.host = &s +} + +// Host returns the value of the "host" field in the mutation. +func (m *ProxyMutation) Host() (r string, exists bool) { + v := m.host + if v == nil { + return + } + return *v, true +} + +// OldHost returns the old "host" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldHost(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHost: %w", err) + } + return oldValue.Host, nil +} + +// ResetHost resets all changes to the "host" field. +func (m *ProxyMutation) ResetHost() { + m.host = nil +} + +// SetPort sets the "port" field. +func (m *ProxyMutation) SetPort(i int) { + m.port = &i + m.addport = nil +} + +// Port returns the value of the "port" field in the mutation. +func (m *ProxyMutation) Port() (r int, exists bool) { + v := m.port + if v == nil { + return + } + return *v, true +} + +// OldPort returns the old "port" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldPort(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPort is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPort requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPort: %w", err) + } + return oldValue.Port, nil +} + +// AddPort adds i to the "port" field. +func (m *ProxyMutation) AddPort(i int) { + if m.addport != nil { + *m.addport += i + } else { + m.addport = &i + } +} + +// AddedPort returns the value that was added to the "port" field in this mutation. +func (m *ProxyMutation) AddedPort() (r int, exists bool) { + v := m.addport + if v == nil { + return + } + return *v, true +} + +// ResetPort resets all changes to the "port" field. +func (m *ProxyMutation) ResetPort() { + m.port = nil + m.addport = nil +} + +// SetUsername sets the "username" field. +func (m *ProxyMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *ProxyMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldUsername(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ClearUsername clears the value of the "username" field. +func (m *ProxyMutation) ClearUsername() { + m.username = nil + m.clearedFields[proxy.FieldUsername] = struct{}{} +} + +// UsernameCleared returns if the "username" field was cleared in this mutation. +func (m *ProxyMutation) UsernameCleared() bool { + _, ok := m.clearedFields[proxy.FieldUsername] + return ok +} + +// ResetUsername resets all changes to the "username" field. +func (m *ProxyMutation) ResetUsername() { + m.username = nil + delete(m.clearedFields, proxy.FieldUsername) +} + +// SetPassword sets the "password" field. +func (m *ProxyMutation) SetPassword(s string) { + m.password = &s +} + +// Password returns the value of the "password" field in the mutation. +func (m *ProxyMutation) Password() (r string, exists bool) { + v := m.password + if v == nil { + return + } + return *v, true +} + +// OldPassword returns the old "password" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldPassword(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPassword is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPassword requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPassword: %w", err) + } + return oldValue.Password, nil +} + +// ClearPassword clears the value of the "password" field. +func (m *ProxyMutation) ClearPassword() { + m.password = nil + m.clearedFields[proxy.FieldPassword] = struct{}{} +} + +// PasswordCleared returns if the "password" field was cleared in this mutation. +func (m *ProxyMutation) PasswordCleared() bool { + _, ok := m.clearedFields[proxy.FieldPassword] + return ok +} + +// ResetPassword resets all changes to the "password" field. +func (m *ProxyMutation) ResetPassword() { + m.password = nil + delete(m.clearedFields, proxy.FieldPassword) +} + +// SetStatus sets the "status" field. +func (m *ProxyMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *ProxyMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Proxy entity. +// If the Proxy object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ProxyMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *ProxyMutation) ResetStatus() { + m.status = nil +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by ids. +func (m *ProxyMutation) AddAccountIDs(ids ...int64) { + if m.accounts == nil { + m.accounts = make(map[int64]struct{}) + } + for i := range ids { + m.accounts[ids[i]] = struct{}{} + } +} + +// ClearAccounts clears the "accounts" edge to the Account entity. +func (m *ProxyMutation) ClearAccounts() { + m.clearedaccounts = true +} + +// AccountsCleared reports if the "accounts" edge to the Account entity was cleared. +func (m *ProxyMutation) AccountsCleared() bool { + return m.clearedaccounts +} + +// RemoveAccountIDs removes the "accounts" edge to the Account entity by IDs. +func (m *ProxyMutation) RemoveAccountIDs(ids ...int64) { + if m.removedaccounts == nil { + m.removedaccounts = make(map[int64]struct{}) + } + for i := range ids { + delete(m.accounts, ids[i]) + m.removedaccounts[ids[i]] = struct{}{} + } +} + +// RemovedAccounts returns the removed IDs of the "accounts" edge to the Account entity. +func (m *ProxyMutation) RemovedAccountsIDs() (ids []int64) { + for id := range m.removedaccounts { + ids = append(ids, id) + } + return +} + +// AccountsIDs returns the "accounts" edge IDs in the mutation. +func (m *ProxyMutation) AccountsIDs() (ids []int64) { + for id := range m.accounts { + ids = append(ids, id) + } + return +} + +// ResetAccounts resets all changes to the "accounts" edge. +func (m *ProxyMutation) ResetAccounts() { + m.accounts = nil + m.clearedaccounts = false + m.removedaccounts = nil +} + +// Where appends a list predicates to the ProxyMutation builder. +func (m *ProxyMutation) Where(ps ...predicate.Proxy) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ProxyMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ProxyMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Proxy, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ProxyMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ProxyMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Proxy). +func (m *ProxyMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ProxyMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.created_at != nil { + fields = append(fields, proxy.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, proxy.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, proxy.FieldDeletedAt) + } + if m.name != nil { + fields = append(fields, proxy.FieldName) + } + if m.protocol != nil { + fields = append(fields, proxy.FieldProtocol) + } + if m.host != nil { + fields = append(fields, proxy.FieldHost) + } + if m.port != nil { + fields = append(fields, proxy.FieldPort) + } + if m.username != nil { + fields = append(fields, proxy.FieldUsername) + } + if m.password != nil { + fields = append(fields, proxy.FieldPassword) + } + if m.status != nil { + fields = append(fields, proxy.FieldStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ProxyMutation) Field(name string) (ent.Value, bool) { + switch name { + case proxy.FieldCreatedAt: + return m.CreatedAt() + case proxy.FieldUpdatedAt: + return m.UpdatedAt() + case proxy.FieldDeletedAt: + return m.DeletedAt() + case proxy.FieldName: + return m.Name() + case proxy.FieldProtocol: + return m.Protocol() + case proxy.FieldHost: + return m.Host() + case proxy.FieldPort: + return m.Port() + case proxy.FieldUsername: + return m.Username() + case proxy.FieldPassword: + return m.Password() + case proxy.FieldStatus: + return m.Status() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ProxyMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case proxy.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case proxy.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case proxy.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case proxy.FieldName: + return m.OldName(ctx) + case proxy.FieldProtocol: + return m.OldProtocol(ctx) + case proxy.FieldHost: + return m.OldHost(ctx) + case proxy.FieldPort: + return m.OldPort(ctx) + case proxy.FieldUsername: + return m.OldUsername(ctx) + case proxy.FieldPassword: + return m.OldPassword(ctx) + case proxy.FieldStatus: + return m.OldStatus(ctx) + } + return nil, fmt.Errorf("unknown Proxy field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ProxyMutation) SetField(name string, value ent.Value) error { + switch name { + case proxy.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case proxy.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case proxy.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case proxy.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case proxy.FieldProtocol: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProtocol(v) + return nil + case proxy.FieldHost: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHost(v) + return nil + case proxy.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPort(v) + return nil + case proxy.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case proxy.FieldPassword: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPassword(v) + return nil + case proxy.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + } + return fmt.Errorf("unknown Proxy field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ProxyMutation) AddedFields() []string { + var fields []string + if m.addport != nil { + fields = append(fields, proxy.FieldPort) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ProxyMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case proxy.FieldPort: + return m.AddedPort() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ProxyMutation) AddField(name string, value ent.Value) error { + switch name { + case proxy.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPort(v) + return nil + } + return fmt.Errorf("unknown Proxy numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ProxyMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(proxy.FieldDeletedAt) { + fields = append(fields, proxy.FieldDeletedAt) + } + if m.FieldCleared(proxy.FieldUsername) { + fields = append(fields, proxy.FieldUsername) + } + if m.FieldCleared(proxy.FieldPassword) { + fields = append(fields, proxy.FieldPassword) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ProxyMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ProxyMutation) ClearField(name string) error { + switch name { + case proxy.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case proxy.FieldUsername: + m.ClearUsername() + return nil + case proxy.FieldPassword: + m.ClearPassword() + return nil + } + return fmt.Errorf("unknown Proxy nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ProxyMutation) ResetField(name string) error { + switch name { + case proxy.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case proxy.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case proxy.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case proxy.FieldName: + m.ResetName() + return nil + case proxy.FieldProtocol: + m.ResetProtocol() + return nil + case proxy.FieldHost: + m.ResetHost() + return nil + case proxy.FieldPort: + m.ResetPort() + return nil + case proxy.FieldUsername: + m.ResetUsername() + return nil + case proxy.FieldPassword: + m.ResetPassword() + return nil + case proxy.FieldStatus: + m.ResetStatus() + return nil + } + return fmt.Errorf("unknown Proxy field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ProxyMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.accounts != nil { + edges = append(edges, proxy.EdgeAccounts) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ProxyMutation) AddedIDs(name string) []ent.Value { + switch name { + case proxy.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.accounts)) + for id := range m.accounts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ProxyMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedaccounts != nil { + edges = append(edges, proxy.EdgeAccounts) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ProxyMutation) RemovedIDs(name string) []ent.Value { + switch name { + case proxy.EdgeAccounts: + ids := make([]ent.Value, 0, len(m.removedaccounts)) + for id := range m.removedaccounts { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ProxyMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedaccounts { + edges = append(edges, proxy.EdgeAccounts) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ProxyMutation) EdgeCleared(name string) bool { + switch name { + case proxy.EdgeAccounts: + return m.clearedaccounts + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ProxyMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown Proxy unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ProxyMutation) ResetEdge(name string) error { + switch name { + case proxy.EdgeAccounts: + m.ResetAccounts() + return nil + } + return fmt.Errorf("unknown Proxy edge %s", name) +} + +// RedeemCodeMutation represents an operation that mutates the RedeemCode nodes in the graph. +type RedeemCodeMutation struct { + config + op Op + typ string + id *int64 + code *string + _type *string + value *float64 + addvalue *float64 + status *string + used_at *time.Time + notes *string + created_at *time.Time + validity_days *int + addvalidity_days *int + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*RedeemCode, error) + predicates []predicate.RedeemCode +} + +var _ ent.Mutation = (*RedeemCodeMutation)(nil) + +// redeemcodeOption allows management of the mutation configuration using functional options. +type redeemcodeOption func(*RedeemCodeMutation) + +// newRedeemCodeMutation creates new mutation for the RedeemCode entity. +func newRedeemCodeMutation(c config, op Op, opts ...redeemcodeOption) *RedeemCodeMutation { + m := &RedeemCodeMutation{ + config: c, + op: op, + typ: TypeRedeemCode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withRedeemCodeID sets the ID field of the mutation. +func withRedeemCodeID(id int64) redeemcodeOption { + return func(m *RedeemCodeMutation) { + var ( + err error + once sync.Once + value *RedeemCode + ) + m.oldValue = func(ctx context.Context) (*RedeemCode, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().RedeemCode.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withRedeemCode sets the old RedeemCode of the mutation. +func withRedeemCode(node *RedeemCode) redeemcodeOption { + return func(m *RedeemCodeMutation) { + m.oldValue = func(context.Context) (*RedeemCode, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m RedeemCodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m RedeemCodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *RedeemCodeMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *RedeemCodeMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().RedeemCode.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCode sets the "code" field. +func (m *RedeemCodeMutation) SetCode(s string) { + m.code = &s +} + +// Code returns the value of the "code" field in the mutation. +func (m *RedeemCodeMutation) Code() (r string, exists bool) { + v := m.code + if v == nil { + return + } + return *v, true +} + +// OldCode returns the old "code" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCode: %w", err) + } + return oldValue.Code, nil +} + +// ResetCode resets all changes to the "code" field. +func (m *RedeemCodeMutation) ResetCode() { + m.code = nil +} + +// SetType sets the "type" field. +func (m *RedeemCodeMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *RedeemCodeMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *RedeemCodeMutation) ResetType() { + m._type = nil +} + +// SetValue sets the "value" field. +func (m *RedeemCodeMutation) SetValue(f float64) { + m.value = &f + m.addvalue = nil +} + +// Value returns the value of the "value" field in the mutation. +func (m *RedeemCodeMutation) Value() (r float64, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldValue(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// AddValue adds f to the "value" field. +func (m *RedeemCodeMutation) AddValue(f float64) { + if m.addvalue != nil { + *m.addvalue += f + } else { + m.addvalue = &f + } +} + +// AddedValue returns the value that was added to the "value" field in this mutation. +func (m *RedeemCodeMutation) AddedValue() (r float64, exists bool) { + v := m.addvalue + if v == nil { + return + } + return *v, true +} + +// ResetValue resets all changes to the "value" field. +func (m *RedeemCodeMutation) ResetValue() { + m.value = nil + m.addvalue = nil +} + +// SetStatus sets the "status" field. +func (m *RedeemCodeMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *RedeemCodeMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *RedeemCodeMutation) ResetStatus() { + m.status = nil +} + +// SetUsedBy sets the "used_by" field. +func (m *RedeemCodeMutation) SetUsedBy(i int64) { + m.user = &i +} + +// UsedBy returns the value of the "used_by" field in the mutation. +func (m *RedeemCodeMutation) UsedBy() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUsedBy returns the old "used_by" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldUsedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedBy: %w", err) + } + return oldValue.UsedBy, nil +} + +// ClearUsedBy clears the value of the "used_by" field. +func (m *RedeemCodeMutation) ClearUsedBy() { + m.user = nil + m.clearedFields[redeemcode.FieldUsedBy] = struct{}{} +} + +// UsedByCleared returns if the "used_by" field was cleared in this mutation. +func (m *RedeemCodeMutation) UsedByCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldUsedBy] + return ok +} + +// ResetUsedBy resets all changes to the "used_by" field. +func (m *RedeemCodeMutation) ResetUsedBy() { + m.user = nil + delete(m.clearedFields, redeemcode.FieldUsedBy) +} + +// SetUsedAt sets the "used_at" field. +func (m *RedeemCodeMutation) SetUsedAt(t time.Time) { + m.used_at = &t +} + +// UsedAt returns the value of the "used_at" field in the mutation. +func (m *RedeemCodeMutation) UsedAt() (r time.Time, exists bool) { + v := m.used_at + if v == nil { + return + } + return *v, true +} + +// OldUsedAt returns the old "used_at" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldUsedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedAt: %w", err) + } + return oldValue.UsedAt, nil +} + +// ClearUsedAt clears the value of the "used_at" field. +func (m *RedeemCodeMutation) ClearUsedAt() { + m.used_at = nil + m.clearedFields[redeemcode.FieldUsedAt] = struct{}{} +} + +// UsedAtCleared returns if the "used_at" field was cleared in this mutation. +func (m *RedeemCodeMutation) UsedAtCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldUsedAt] + return ok +} + +// ResetUsedAt resets all changes to the "used_at" field. +func (m *RedeemCodeMutation) ResetUsedAt() { + m.used_at = nil + delete(m.clearedFields, redeemcode.FieldUsedAt) +} + +// SetNotes sets the "notes" field. +func (m *RedeemCodeMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *RedeemCodeMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *RedeemCodeMutation) ClearNotes() { + m.notes = nil + m.clearedFields[redeemcode.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *RedeemCodeMutation) NotesCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *RedeemCodeMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, redeemcode.FieldNotes) +} + +// SetCreatedAt sets the "created_at" field. +func (m *RedeemCodeMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *RedeemCodeMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *RedeemCodeMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetGroupID sets the "group_id" field. +func (m *RedeemCodeMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *RedeemCodeMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ClearGroupID clears the value of the "group_id" field. +func (m *RedeemCodeMutation) ClearGroupID() { + m.group = nil + m.clearedFields[redeemcode.FieldGroupID] = struct{}{} +} + +// GroupIDCleared returns if the "group_id" field was cleared in this mutation. +func (m *RedeemCodeMutation) GroupIDCleared() bool { + _, ok := m.clearedFields[redeemcode.FieldGroupID] + return ok +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *RedeemCodeMutation) ResetGroupID() { + m.group = nil + delete(m.clearedFields, redeemcode.FieldGroupID) +} + +// SetValidityDays sets the "validity_days" field. +func (m *RedeemCodeMutation) SetValidityDays(i int) { + m.validity_days = &i + m.addvalidity_days = nil +} + +// ValidityDays returns the value of the "validity_days" field in the mutation. +func (m *RedeemCodeMutation) ValidityDays() (r int, exists bool) { + v := m.validity_days + if v == nil { + return + } + return *v, true +} + +// OldValidityDays returns the old "validity_days" field's value of the RedeemCode entity. +// If the RedeemCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *RedeemCodeMutation) OldValidityDays(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValidityDays is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValidityDays requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValidityDays: %w", err) + } + return oldValue.ValidityDays, nil +} + +// AddValidityDays adds i to the "validity_days" field. +func (m *RedeemCodeMutation) AddValidityDays(i int) { + if m.addvalidity_days != nil { + *m.addvalidity_days += i + } else { + m.addvalidity_days = &i + } +} + +// AddedValidityDays returns the value that was added to the "validity_days" field in this mutation. +func (m *RedeemCodeMutation) AddedValidityDays() (r int, exists bool) { + v := m.addvalidity_days + if v == nil { + return + } + return *v, true +} + +// ResetValidityDays resets all changes to the "validity_days" field. +func (m *RedeemCodeMutation) ResetValidityDays() { + m.validity_days = nil + m.addvalidity_days = nil +} + +// SetUserID sets the "user" edge to the User entity by id. +func (m *RedeemCodeMutation) SetUserID(id int64) { + m.user = &id +} + +// ClearUser clears the "user" edge to the User entity. +func (m *RedeemCodeMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[redeemcode.FieldUsedBy] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *RedeemCodeMutation) UserCleared() bool { + return m.UsedByCleared() || m.cleareduser +} + +// UserID returns the "user" edge ID in the mutation. +func (m *RedeemCodeMutation) UserID() (id int64, exists bool) { + if m.user != nil { + return *m.user, true + } + return +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *RedeemCodeMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *RedeemCodeMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *RedeemCodeMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[redeemcode.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *RedeemCodeMutation) GroupCleared() bool { + return m.GroupIDCleared() || m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *RedeemCodeMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *RedeemCodeMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the RedeemCodeMutation builder. +func (m *RedeemCodeMutation) Where(ps ...predicate.RedeemCode) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the RedeemCodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *RedeemCodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.RedeemCode, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *RedeemCodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *RedeemCodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (RedeemCode). +func (m *RedeemCodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *RedeemCodeMutation) Fields() []string { + fields := make([]string, 0, 10) + if m.code != nil { + fields = append(fields, redeemcode.FieldCode) + } + if m._type != nil { + fields = append(fields, redeemcode.FieldType) + } + if m.value != nil { + fields = append(fields, redeemcode.FieldValue) + } + if m.status != nil { + fields = append(fields, redeemcode.FieldStatus) + } + if m.user != nil { + fields = append(fields, redeemcode.FieldUsedBy) + } + if m.used_at != nil { + fields = append(fields, redeemcode.FieldUsedAt) + } + if m.notes != nil { + fields = append(fields, redeemcode.FieldNotes) + } + if m.created_at != nil { + fields = append(fields, redeemcode.FieldCreatedAt) + } + if m.group != nil { + fields = append(fields, redeemcode.FieldGroupID) + } + if m.validity_days != nil { + fields = append(fields, redeemcode.FieldValidityDays) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *RedeemCodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case redeemcode.FieldCode: + return m.Code() + case redeemcode.FieldType: + return m.GetType() + case redeemcode.FieldValue: + return m.Value() + case redeemcode.FieldStatus: + return m.Status() + case redeemcode.FieldUsedBy: + return m.UsedBy() + case redeemcode.FieldUsedAt: + return m.UsedAt() + case redeemcode.FieldNotes: + return m.Notes() + case redeemcode.FieldCreatedAt: + return m.CreatedAt() + case redeemcode.FieldGroupID: + return m.GroupID() + case redeemcode.FieldValidityDays: + return m.ValidityDays() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *RedeemCodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case redeemcode.FieldCode: + return m.OldCode(ctx) + case redeemcode.FieldType: + return m.OldType(ctx) + case redeemcode.FieldValue: + return m.OldValue(ctx) + case redeemcode.FieldStatus: + return m.OldStatus(ctx) + case redeemcode.FieldUsedBy: + return m.OldUsedBy(ctx) + case redeemcode.FieldUsedAt: + return m.OldUsedAt(ctx) + case redeemcode.FieldNotes: + return m.OldNotes(ctx) + case redeemcode.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case redeemcode.FieldGroupID: + return m.OldGroupID(ctx) + case redeemcode.FieldValidityDays: + return m.OldValidityDays(ctx) + } + return nil, fmt.Errorf("unknown RedeemCode field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RedeemCodeMutation) SetField(name string, value ent.Value) error { + switch name { + case redeemcode.FieldCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCode(v) + return nil + case redeemcode.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case redeemcode.FieldValue: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case redeemcode.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case redeemcode.FieldUsedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedBy(v) + return nil + case redeemcode.FieldUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedAt(v) + return nil + case redeemcode.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + case redeemcode.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case redeemcode.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case redeemcode.FieldValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValidityDays(v) + return nil + } + return fmt.Errorf("unknown RedeemCode field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *RedeemCodeMutation) AddedFields() []string { + var fields []string + if m.addvalue != nil { + fields = append(fields, redeemcode.FieldValue) + } + if m.addvalidity_days != nil { + fields = append(fields, redeemcode.FieldValidityDays) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *RedeemCodeMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case redeemcode.FieldValue: + return m.AddedValue() + case redeemcode.FieldValidityDays: + return m.AddedValidityDays() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *RedeemCodeMutation) AddField(name string, value ent.Value) error { + switch name { + case redeemcode.FieldValue: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddValue(v) + return nil + case redeemcode.FieldValidityDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddValidityDays(v) + return nil + } + return fmt.Errorf("unknown RedeemCode numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *RedeemCodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(redeemcode.FieldUsedBy) { + fields = append(fields, redeemcode.FieldUsedBy) + } + if m.FieldCleared(redeemcode.FieldUsedAt) { + fields = append(fields, redeemcode.FieldUsedAt) + } + if m.FieldCleared(redeemcode.FieldNotes) { + fields = append(fields, redeemcode.FieldNotes) + } + if m.FieldCleared(redeemcode.FieldGroupID) { + fields = append(fields, redeemcode.FieldGroupID) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *RedeemCodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *RedeemCodeMutation) ClearField(name string) error { + switch name { + case redeemcode.FieldUsedBy: + m.ClearUsedBy() + return nil + case redeemcode.FieldUsedAt: + m.ClearUsedAt() + return nil + case redeemcode.FieldNotes: + m.ClearNotes() + return nil + case redeemcode.FieldGroupID: + m.ClearGroupID() + return nil + } + return fmt.Errorf("unknown RedeemCode nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *RedeemCodeMutation) ResetField(name string) error { + switch name { + case redeemcode.FieldCode: + m.ResetCode() + return nil + case redeemcode.FieldType: + m.ResetType() + return nil + case redeemcode.FieldValue: + m.ResetValue() + return nil + case redeemcode.FieldStatus: + m.ResetStatus() + return nil + case redeemcode.FieldUsedBy: + m.ResetUsedBy() + return nil + case redeemcode.FieldUsedAt: + m.ResetUsedAt() + return nil + case redeemcode.FieldNotes: + m.ResetNotes() + return nil + case redeemcode.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case redeemcode.FieldGroupID: + m.ResetGroupID() + return nil + case redeemcode.FieldValidityDays: + m.ResetValidityDays() + return nil + } + return fmt.Errorf("unknown RedeemCode field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *RedeemCodeMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, redeemcode.EdgeUser) + } + if m.group != nil { + edges = append(edges, redeemcode.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *RedeemCodeMutation) AddedIDs(name string) []ent.Value { + switch name { + case redeemcode.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case redeemcode.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *RedeemCodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *RedeemCodeMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *RedeemCodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, redeemcode.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, redeemcode.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *RedeemCodeMutation) EdgeCleared(name string) bool { + switch name { + case redeemcode.EdgeUser: + return m.cleareduser + case redeemcode.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *RedeemCodeMutation) ClearEdge(name string) error { + switch name { + case redeemcode.EdgeUser: + m.ClearUser() + return nil + case redeemcode.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown RedeemCode unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *RedeemCodeMutation) ResetEdge(name string) error { + switch name { + case redeemcode.EdgeUser: + m.ResetUser() + return nil + case redeemcode.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown RedeemCode edge %s", name) +} + +// SettingMutation represents an operation that mutates the Setting nodes in the graph. +type SettingMutation struct { + config + op Op + typ string + id *int64 + key *string + value *string + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Setting, error) + predicates []predicate.Setting +} + +var _ ent.Mutation = (*SettingMutation)(nil) + +// settingOption allows management of the mutation configuration using functional options. +type settingOption func(*SettingMutation) + +// newSettingMutation creates new mutation for the Setting entity. +func newSettingMutation(c config, op Op, opts ...settingOption) *SettingMutation { + m := &SettingMutation{ + config: c, + op: op, + typ: TypeSetting, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withSettingID sets the ID field of the mutation. +func withSettingID(id int64) settingOption { + return func(m *SettingMutation) { + var ( + err error + once sync.Once + value *Setting + ) + m.oldValue = func(ctx context.Context) (*Setting, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Setting.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withSetting sets the old Setting of the mutation. +func withSetting(node *Setting) settingOption { + return func(m *SettingMutation) { + m.oldValue = func(context.Context) (*Setting, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m SettingMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m SettingMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *SettingMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *SettingMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Setting.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetKey sets the "key" field. +func (m *SettingMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *SettingMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *SettingMutation) ResetKey() { + m.key = nil +} + +// SetValue sets the "value" field. +func (m *SettingMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *SettingMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *SettingMutation) ResetValue() { + m.value = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *SettingMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *SettingMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Setting entity. +// If the Setting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *SettingMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *SettingMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the SettingMutation builder. +func (m *SettingMutation) Where(ps ...predicate.Setting) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the SettingMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *SettingMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Setting, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *SettingMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *SettingMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Setting). +func (m *SettingMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *SettingMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.key != nil { + fields = append(fields, setting.FieldKey) + } + if m.value != nil { + fields = append(fields, setting.FieldValue) + } + if m.updated_at != nil { + fields = append(fields, setting.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *SettingMutation) Field(name string) (ent.Value, bool) { + switch name { + case setting.FieldKey: + return m.Key() + case setting.FieldValue: + return m.Value() + case setting.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *SettingMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case setting.FieldKey: + return m.OldKey(ctx) + case setting.FieldValue: + return m.OldValue(ctx) + case setting.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown Setting field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SettingMutation) SetField(name string, value ent.Value) error { + switch name { + case setting.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case setting.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + case setting.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown Setting field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *SettingMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *SettingMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *SettingMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Setting numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *SettingMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *SettingMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *SettingMutation) ClearField(name string) error { + return fmt.Errorf("unknown Setting nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *SettingMutation) ResetField(name string) error { + switch name { + case setting.FieldKey: + m.ResetKey() + return nil + case setting.FieldValue: + m.ResetValue() + return nil + case setting.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown Setting field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *SettingMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *SettingMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *SettingMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *SettingMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *SettingMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *SettingMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *SettingMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Setting unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *SettingMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Setting edge %s", name) +} + +// UsageLogMutation represents an operation that mutates the UsageLog nodes in the graph. +type UsageLogMutation struct { + config + op Op + typ string + id *int64 + request_id *string + model *string + input_tokens *int + addinput_tokens *int + output_tokens *int + addoutput_tokens *int + cache_creation_tokens *int + addcache_creation_tokens *int + cache_read_tokens *int + addcache_read_tokens *int + cache_creation_5m_tokens *int + addcache_creation_5m_tokens *int + cache_creation_1h_tokens *int + addcache_creation_1h_tokens *int + input_cost *float64 + addinput_cost *float64 + output_cost *float64 + addoutput_cost *float64 + cache_creation_cost *float64 + addcache_creation_cost *float64 + cache_read_cost *float64 + addcache_read_cost *float64 + total_cost *float64 + addtotal_cost *float64 + actual_cost *float64 + addactual_cost *float64 + rate_multiplier *float64 + addrate_multiplier *float64 + account_rate_multiplier *float64 + addaccount_rate_multiplier *float64 + billing_type *int8 + addbilling_type *int8 + stream *bool + duration_ms *int + addduration_ms *int + first_token_ms *int + addfirst_token_ms *int + user_agent *string + ip_address *string + image_count *int + addimage_count *int + image_size *string + created_at *time.Time + clearedFields map[string]struct{} + user *int64 + cleareduser bool + api_key *int64 + clearedapi_key bool + account *int64 + clearedaccount bool + group *int64 + clearedgroup bool + subscription *int64 + clearedsubscription bool + done bool + oldValue func(context.Context) (*UsageLog, error) + predicates []predicate.UsageLog +} + +var _ ent.Mutation = (*UsageLogMutation)(nil) + +// usagelogOption allows management of the mutation configuration using functional options. +type usagelogOption func(*UsageLogMutation) + +// newUsageLogMutation creates new mutation for the UsageLog entity. +func newUsageLogMutation(c config, op Op, opts ...usagelogOption) *UsageLogMutation { + m := &UsageLogMutation{ + config: c, + op: op, + typ: TypeUsageLog, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUsageLogID sets the ID field of the mutation. +func withUsageLogID(id int64) usagelogOption { + return func(m *UsageLogMutation) { + var ( + err error + once sync.Once + value *UsageLog + ) + m.oldValue = func(ctx context.Context) (*UsageLog, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UsageLog.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUsageLog sets the old UsageLog of the mutation. +func withUsageLog(node *UsageLog) usagelogOption { + return func(m *UsageLogMutation) { + m.oldValue = func(context.Context) (*UsageLog, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UsageLogMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UsageLogMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UsageLogMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UsageLogMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UsageLog.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetUserID sets the "user_id" field. +func (m *UsageLogMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UsageLogMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UsageLogMutation) ResetUserID() { + m.user = nil +} + +// SetAPIKeyID sets the "api_key_id" field. +func (m *UsageLogMutation) SetAPIKeyID(i int64) { + m.api_key = &i +} + +// APIKeyID returns the value of the "api_key_id" field in the mutation. +func (m *UsageLogMutation) APIKeyID() (r int64, exists bool) { + v := m.api_key + if v == nil { + return + } + return *v, true +} + +// OldAPIKeyID returns the old "api_key_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldAPIKeyID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAPIKeyID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAPIKeyID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAPIKeyID: %w", err) + } + return oldValue.APIKeyID, nil +} + +// ResetAPIKeyID resets all changes to the "api_key_id" field. +func (m *UsageLogMutation) ResetAPIKeyID() { + m.api_key = nil +} + +// SetAccountID sets the "account_id" field. +func (m *UsageLogMutation) SetAccountID(i int64) { + m.account = &i +} + +// AccountID returns the value of the "account_id" field in the mutation. +func (m *UsageLogMutation) AccountID() (r int64, exists bool) { + v := m.account + if v == nil { + return + } + return *v, true +} + +// OldAccountID returns the old "account_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldAccountID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccountID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccountID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccountID: %w", err) + } + return oldValue.AccountID, nil +} + +// ResetAccountID resets all changes to the "account_id" field. +func (m *UsageLogMutation) ResetAccountID() { + m.account = nil +} + +// SetRequestID sets the "request_id" field. +func (m *UsageLogMutation) SetRequestID(s string) { + m.request_id = &s +} + +// RequestID returns the value of the "request_id" field in the mutation. +func (m *UsageLogMutation) RequestID() (r string, exists bool) { + v := m.request_id + if v == nil { + return + } + return *v, true +} + +// OldRequestID returns the old "request_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldRequestID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRequestID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRequestID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRequestID: %w", err) + } + return oldValue.RequestID, nil +} + +// ResetRequestID resets all changes to the "request_id" field. +func (m *UsageLogMutation) ResetRequestID() { + m.request_id = nil +} + +// SetModel sets the "model" field. +func (m *UsageLogMutation) SetModel(s string) { + m.model = &s +} + +// Model returns the value of the "model" field in the mutation. +func (m *UsageLogMutation) Model() (r string, exists bool) { + v := m.model + if v == nil { + return + } + return *v, true +} + +// OldModel returns the old "model" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldModel(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldModel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldModel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldModel: %w", err) + } + return oldValue.Model, nil +} + +// ResetModel resets all changes to the "model" field. +func (m *UsageLogMutation) ResetModel() { + m.model = nil +} + +// SetGroupID sets the "group_id" field. +func (m *UsageLogMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *UsageLogMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldGroupID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ClearGroupID clears the value of the "group_id" field. +func (m *UsageLogMutation) ClearGroupID() { + m.group = nil + m.clearedFields[usagelog.FieldGroupID] = struct{}{} +} + +// GroupIDCleared returns if the "group_id" field was cleared in this mutation. +func (m *UsageLogMutation) GroupIDCleared() bool { + _, ok := m.clearedFields[usagelog.FieldGroupID] + return ok +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *UsageLogMutation) ResetGroupID() { + m.group = nil + delete(m.clearedFields, usagelog.FieldGroupID) +} + +// SetSubscriptionID sets the "subscription_id" field. +func (m *UsageLogMutation) SetSubscriptionID(i int64) { + m.subscription = &i +} + +// SubscriptionID returns the value of the "subscription_id" field in the mutation. +func (m *UsageLogMutation) SubscriptionID() (r int64, exists bool) { + v := m.subscription + if v == nil { + return + } + return *v, true +} + +// OldSubscriptionID returns the old "subscription_id" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldSubscriptionID(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSubscriptionID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSubscriptionID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSubscriptionID: %w", err) + } + return oldValue.SubscriptionID, nil +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (m *UsageLogMutation) ClearSubscriptionID() { + m.subscription = nil + m.clearedFields[usagelog.FieldSubscriptionID] = struct{}{} +} + +// SubscriptionIDCleared returns if the "subscription_id" field was cleared in this mutation. +func (m *UsageLogMutation) SubscriptionIDCleared() bool { + _, ok := m.clearedFields[usagelog.FieldSubscriptionID] + return ok +} + +// ResetSubscriptionID resets all changes to the "subscription_id" field. +func (m *UsageLogMutation) ResetSubscriptionID() { + m.subscription = nil + delete(m.clearedFields, usagelog.FieldSubscriptionID) +} + +// SetInputTokens sets the "input_tokens" field. +func (m *UsageLogMutation) SetInputTokens(i int) { + m.input_tokens = &i + m.addinput_tokens = nil +} + +// InputTokens returns the value of the "input_tokens" field in the mutation. +func (m *UsageLogMutation) InputTokens() (r int, exists bool) { + v := m.input_tokens + if v == nil { + return + } + return *v, true +} + +// OldInputTokens returns the old "input_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldInputTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldInputTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldInputTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldInputTokens: %w", err) + } + return oldValue.InputTokens, nil +} + +// AddInputTokens adds i to the "input_tokens" field. +func (m *UsageLogMutation) AddInputTokens(i int) { + if m.addinput_tokens != nil { + *m.addinput_tokens += i + } else { + m.addinput_tokens = &i + } +} + +// AddedInputTokens returns the value that was added to the "input_tokens" field in this mutation. +func (m *UsageLogMutation) AddedInputTokens() (r int, exists bool) { + v := m.addinput_tokens + if v == nil { + return + } + return *v, true +} + +// ResetInputTokens resets all changes to the "input_tokens" field. +func (m *UsageLogMutation) ResetInputTokens() { + m.input_tokens = nil + m.addinput_tokens = nil +} + +// SetOutputTokens sets the "output_tokens" field. +func (m *UsageLogMutation) SetOutputTokens(i int) { + m.output_tokens = &i + m.addoutput_tokens = nil +} + +// OutputTokens returns the value of the "output_tokens" field in the mutation. +func (m *UsageLogMutation) OutputTokens() (r int, exists bool) { + v := m.output_tokens + if v == nil { + return + } + return *v, true +} + +// OldOutputTokens returns the old "output_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldOutputTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOutputTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOutputTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOutputTokens: %w", err) + } + return oldValue.OutputTokens, nil +} + +// AddOutputTokens adds i to the "output_tokens" field. +func (m *UsageLogMutation) AddOutputTokens(i int) { + if m.addoutput_tokens != nil { + *m.addoutput_tokens += i + } else { + m.addoutput_tokens = &i + } +} + +// AddedOutputTokens returns the value that was added to the "output_tokens" field in this mutation. +func (m *UsageLogMutation) AddedOutputTokens() (r int, exists bool) { + v := m.addoutput_tokens + if v == nil { + return + } + return *v, true +} + +// ResetOutputTokens resets all changes to the "output_tokens" field. +func (m *UsageLogMutation) ResetOutputTokens() { + m.output_tokens = nil + m.addoutput_tokens = nil +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (m *UsageLogMutation) SetCacheCreationTokens(i int) { + m.cache_creation_tokens = &i + m.addcache_creation_tokens = nil +} + +// CacheCreationTokens returns the value of the "cache_creation_tokens" field in the mutation. +func (m *UsageLogMutation) CacheCreationTokens() (r int, exists bool) { + v := m.cache_creation_tokens + if v == nil { + return + } + return *v, true +} + +// OldCacheCreationTokens returns the old "cache_creation_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheCreationTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheCreationTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheCreationTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheCreationTokens: %w", err) + } + return oldValue.CacheCreationTokens, nil +} + +// AddCacheCreationTokens adds i to the "cache_creation_tokens" field. +func (m *UsageLogMutation) AddCacheCreationTokens(i int) { + if m.addcache_creation_tokens != nil { + *m.addcache_creation_tokens += i + } else { + m.addcache_creation_tokens = &i + } +} + +// AddedCacheCreationTokens returns the value that was added to the "cache_creation_tokens" field in this mutation. +func (m *UsageLogMutation) AddedCacheCreationTokens() (r int, exists bool) { + v := m.addcache_creation_tokens + if v == nil { + return + } + return *v, true +} + +// ResetCacheCreationTokens resets all changes to the "cache_creation_tokens" field. +func (m *UsageLogMutation) ResetCacheCreationTokens() { + m.cache_creation_tokens = nil + m.addcache_creation_tokens = nil +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (m *UsageLogMutation) SetCacheReadTokens(i int) { + m.cache_read_tokens = &i + m.addcache_read_tokens = nil +} + +// CacheReadTokens returns the value of the "cache_read_tokens" field in the mutation. +func (m *UsageLogMutation) CacheReadTokens() (r int, exists bool) { + v := m.cache_read_tokens + if v == nil { + return + } + return *v, true +} + +// OldCacheReadTokens returns the old "cache_read_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheReadTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheReadTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheReadTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheReadTokens: %w", err) + } + return oldValue.CacheReadTokens, nil +} + +// AddCacheReadTokens adds i to the "cache_read_tokens" field. +func (m *UsageLogMutation) AddCacheReadTokens(i int) { + if m.addcache_read_tokens != nil { + *m.addcache_read_tokens += i + } else { + m.addcache_read_tokens = &i + } +} + +// AddedCacheReadTokens returns the value that was added to the "cache_read_tokens" field in this mutation. +func (m *UsageLogMutation) AddedCacheReadTokens() (r int, exists bool) { + v := m.addcache_read_tokens + if v == nil { + return + } + return *v, true +} + +// ResetCacheReadTokens resets all changes to the "cache_read_tokens" field. +func (m *UsageLogMutation) ResetCacheReadTokens() { + m.cache_read_tokens = nil + m.addcache_read_tokens = nil +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (m *UsageLogMutation) SetCacheCreation5mTokens(i int) { + m.cache_creation_5m_tokens = &i + m.addcache_creation_5m_tokens = nil +} + +// CacheCreation5mTokens returns the value of the "cache_creation_5m_tokens" field in the mutation. +func (m *UsageLogMutation) CacheCreation5mTokens() (r int, exists bool) { + v := m.cache_creation_5m_tokens + if v == nil { + return + } + return *v, true +} + +// OldCacheCreation5mTokens returns the old "cache_creation_5m_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheCreation5mTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheCreation5mTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheCreation5mTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheCreation5mTokens: %w", err) + } + return oldValue.CacheCreation5mTokens, nil +} + +// AddCacheCreation5mTokens adds i to the "cache_creation_5m_tokens" field. +func (m *UsageLogMutation) AddCacheCreation5mTokens(i int) { + if m.addcache_creation_5m_tokens != nil { + *m.addcache_creation_5m_tokens += i + } else { + m.addcache_creation_5m_tokens = &i + } +} + +// AddedCacheCreation5mTokens returns the value that was added to the "cache_creation_5m_tokens" field in this mutation. +func (m *UsageLogMutation) AddedCacheCreation5mTokens() (r int, exists bool) { + v := m.addcache_creation_5m_tokens + if v == nil { + return + } + return *v, true +} + +// ResetCacheCreation5mTokens resets all changes to the "cache_creation_5m_tokens" field. +func (m *UsageLogMutation) ResetCacheCreation5mTokens() { + m.cache_creation_5m_tokens = nil + m.addcache_creation_5m_tokens = nil +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (m *UsageLogMutation) SetCacheCreation1hTokens(i int) { + m.cache_creation_1h_tokens = &i + m.addcache_creation_1h_tokens = nil +} + +// CacheCreation1hTokens returns the value of the "cache_creation_1h_tokens" field in the mutation. +func (m *UsageLogMutation) CacheCreation1hTokens() (r int, exists bool) { + v := m.cache_creation_1h_tokens + if v == nil { + return + } + return *v, true +} + +// OldCacheCreation1hTokens returns the old "cache_creation_1h_tokens" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheCreation1hTokens(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheCreation1hTokens is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheCreation1hTokens requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheCreation1hTokens: %w", err) + } + return oldValue.CacheCreation1hTokens, nil +} + +// AddCacheCreation1hTokens adds i to the "cache_creation_1h_tokens" field. +func (m *UsageLogMutation) AddCacheCreation1hTokens(i int) { + if m.addcache_creation_1h_tokens != nil { + *m.addcache_creation_1h_tokens += i + } else { + m.addcache_creation_1h_tokens = &i + } +} + +// AddedCacheCreation1hTokens returns the value that was added to the "cache_creation_1h_tokens" field in this mutation. +func (m *UsageLogMutation) AddedCacheCreation1hTokens() (r int, exists bool) { + v := m.addcache_creation_1h_tokens + if v == nil { + return + } + return *v, true +} + +// ResetCacheCreation1hTokens resets all changes to the "cache_creation_1h_tokens" field. +func (m *UsageLogMutation) ResetCacheCreation1hTokens() { + m.cache_creation_1h_tokens = nil + m.addcache_creation_1h_tokens = nil +} + +// SetInputCost sets the "input_cost" field. +func (m *UsageLogMutation) SetInputCost(f float64) { + m.input_cost = &f + m.addinput_cost = nil +} + +// InputCost returns the value of the "input_cost" field in the mutation. +func (m *UsageLogMutation) InputCost() (r float64, exists bool) { + v := m.input_cost + if v == nil { + return + } + return *v, true +} + +// OldInputCost returns the old "input_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldInputCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldInputCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldInputCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldInputCost: %w", err) + } + return oldValue.InputCost, nil +} + +// AddInputCost adds f to the "input_cost" field. +func (m *UsageLogMutation) AddInputCost(f float64) { + if m.addinput_cost != nil { + *m.addinput_cost += f + } else { + m.addinput_cost = &f + } +} + +// AddedInputCost returns the value that was added to the "input_cost" field in this mutation. +func (m *UsageLogMutation) AddedInputCost() (r float64, exists bool) { + v := m.addinput_cost + if v == nil { + return + } + return *v, true +} + +// ResetInputCost resets all changes to the "input_cost" field. +func (m *UsageLogMutation) ResetInputCost() { + m.input_cost = nil + m.addinput_cost = nil +} + +// SetOutputCost sets the "output_cost" field. +func (m *UsageLogMutation) SetOutputCost(f float64) { + m.output_cost = &f + m.addoutput_cost = nil +} + +// OutputCost returns the value of the "output_cost" field in the mutation. +func (m *UsageLogMutation) OutputCost() (r float64, exists bool) { + v := m.output_cost + if v == nil { + return + } + return *v, true +} + +// OldOutputCost returns the old "output_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldOutputCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOutputCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOutputCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOutputCost: %w", err) + } + return oldValue.OutputCost, nil +} + +// AddOutputCost adds f to the "output_cost" field. +func (m *UsageLogMutation) AddOutputCost(f float64) { + if m.addoutput_cost != nil { + *m.addoutput_cost += f + } else { + m.addoutput_cost = &f + } +} + +// AddedOutputCost returns the value that was added to the "output_cost" field in this mutation. +func (m *UsageLogMutation) AddedOutputCost() (r float64, exists bool) { + v := m.addoutput_cost + if v == nil { + return + } + return *v, true +} + +// ResetOutputCost resets all changes to the "output_cost" field. +func (m *UsageLogMutation) ResetOutputCost() { + m.output_cost = nil + m.addoutput_cost = nil +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (m *UsageLogMutation) SetCacheCreationCost(f float64) { + m.cache_creation_cost = &f + m.addcache_creation_cost = nil +} + +// CacheCreationCost returns the value of the "cache_creation_cost" field in the mutation. +func (m *UsageLogMutation) CacheCreationCost() (r float64, exists bool) { + v := m.cache_creation_cost + if v == nil { + return + } + return *v, true +} + +// OldCacheCreationCost returns the old "cache_creation_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheCreationCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheCreationCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheCreationCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheCreationCost: %w", err) + } + return oldValue.CacheCreationCost, nil +} + +// AddCacheCreationCost adds f to the "cache_creation_cost" field. +func (m *UsageLogMutation) AddCacheCreationCost(f float64) { + if m.addcache_creation_cost != nil { + *m.addcache_creation_cost += f + } else { + m.addcache_creation_cost = &f + } +} + +// AddedCacheCreationCost returns the value that was added to the "cache_creation_cost" field in this mutation. +func (m *UsageLogMutation) AddedCacheCreationCost() (r float64, exists bool) { + v := m.addcache_creation_cost + if v == nil { + return + } + return *v, true +} + +// ResetCacheCreationCost resets all changes to the "cache_creation_cost" field. +func (m *UsageLogMutation) ResetCacheCreationCost() { + m.cache_creation_cost = nil + m.addcache_creation_cost = nil +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (m *UsageLogMutation) SetCacheReadCost(f float64) { + m.cache_read_cost = &f + m.addcache_read_cost = nil +} + +// CacheReadCost returns the value of the "cache_read_cost" field in the mutation. +func (m *UsageLogMutation) CacheReadCost() (r float64, exists bool) { + v := m.cache_read_cost + if v == nil { + return + } + return *v, true +} + +// OldCacheReadCost returns the old "cache_read_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCacheReadCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCacheReadCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCacheReadCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCacheReadCost: %w", err) + } + return oldValue.CacheReadCost, nil +} + +// AddCacheReadCost adds f to the "cache_read_cost" field. +func (m *UsageLogMutation) AddCacheReadCost(f float64) { + if m.addcache_read_cost != nil { + *m.addcache_read_cost += f + } else { + m.addcache_read_cost = &f + } +} + +// AddedCacheReadCost returns the value that was added to the "cache_read_cost" field in this mutation. +func (m *UsageLogMutation) AddedCacheReadCost() (r float64, exists bool) { + v := m.addcache_read_cost + if v == nil { + return + } + return *v, true +} + +// ResetCacheReadCost resets all changes to the "cache_read_cost" field. +func (m *UsageLogMutation) ResetCacheReadCost() { + m.cache_read_cost = nil + m.addcache_read_cost = nil +} + +// SetTotalCost sets the "total_cost" field. +func (m *UsageLogMutation) SetTotalCost(f float64) { + m.total_cost = &f + m.addtotal_cost = nil +} + +// TotalCost returns the value of the "total_cost" field in the mutation. +func (m *UsageLogMutation) TotalCost() (r float64, exists bool) { + v := m.total_cost + if v == nil { + return + } + return *v, true +} + +// OldTotalCost returns the old "total_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldTotalCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTotalCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTotalCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTotalCost: %w", err) + } + return oldValue.TotalCost, nil +} + +// AddTotalCost adds f to the "total_cost" field. +func (m *UsageLogMutation) AddTotalCost(f float64) { + if m.addtotal_cost != nil { + *m.addtotal_cost += f + } else { + m.addtotal_cost = &f + } +} + +// AddedTotalCost returns the value that was added to the "total_cost" field in this mutation. +func (m *UsageLogMutation) AddedTotalCost() (r float64, exists bool) { + v := m.addtotal_cost + if v == nil { + return + } + return *v, true +} + +// ResetTotalCost resets all changes to the "total_cost" field. +func (m *UsageLogMutation) ResetTotalCost() { + m.total_cost = nil + m.addtotal_cost = nil +} + +// SetActualCost sets the "actual_cost" field. +func (m *UsageLogMutation) SetActualCost(f float64) { + m.actual_cost = &f + m.addactual_cost = nil +} + +// ActualCost returns the value of the "actual_cost" field in the mutation. +func (m *UsageLogMutation) ActualCost() (r float64, exists bool) { + v := m.actual_cost + if v == nil { + return + } + return *v, true +} + +// OldActualCost returns the old "actual_cost" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldActualCost(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldActualCost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldActualCost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldActualCost: %w", err) + } + return oldValue.ActualCost, nil +} + +// AddActualCost adds f to the "actual_cost" field. +func (m *UsageLogMutation) AddActualCost(f float64) { + if m.addactual_cost != nil { + *m.addactual_cost += f + } else { + m.addactual_cost = &f + } +} + +// AddedActualCost returns the value that was added to the "actual_cost" field in this mutation. +func (m *UsageLogMutation) AddedActualCost() (r float64, exists bool) { + v := m.addactual_cost + if v == nil { + return + } + return *v, true +} + +// ResetActualCost resets all changes to the "actual_cost" field. +func (m *UsageLogMutation) ResetActualCost() { + m.actual_cost = nil + m.addactual_cost = nil +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (m *UsageLogMutation) SetRateMultiplier(f float64) { + m.rate_multiplier = &f + m.addrate_multiplier = nil +} + +// RateMultiplier returns the value of the "rate_multiplier" field in the mutation. +func (m *UsageLogMutation) RateMultiplier() (r float64, exists bool) { + v := m.rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldRateMultiplier returns the old "rate_multiplier" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err) + } + return oldValue.RateMultiplier, nil +} + +// AddRateMultiplier adds f to the "rate_multiplier" field. +func (m *UsageLogMutation) AddRateMultiplier(f float64) { + if m.addrate_multiplier != nil { + *m.addrate_multiplier += f + } else { + m.addrate_multiplier = &f + } +} + +// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation. +func (m *UsageLogMutation) AddedRateMultiplier() (r float64, exists bool) { + v := m.addrate_multiplier + if v == nil { + return + } + return *v, true +} + +// ResetRateMultiplier resets all changes to the "rate_multiplier" field. +func (m *UsageLogMutation) ResetRateMultiplier() { + m.rate_multiplier = nil + m.addrate_multiplier = nil +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (m *UsageLogMutation) SetAccountRateMultiplier(f float64) { + m.account_rate_multiplier = &f + m.addaccount_rate_multiplier = nil +} + +// AccountRateMultiplier returns the value of the "account_rate_multiplier" field in the mutation. +func (m *UsageLogMutation) AccountRateMultiplier() (r float64, exists bool) { + v := m.account_rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldAccountRateMultiplier returns the old "account_rate_multiplier" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldAccountRateMultiplier(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccountRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccountRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccountRateMultiplier: %w", err) + } + return oldValue.AccountRateMultiplier, nil +} + +// AddAccountRateMultiplier adds f to the "account_rate_multiplier" field. +func (m *UsageLogMutation) AddAccountRateMultiplier(f float64) { + if m.addaccount_rate_multiplier != nil { + *m.addaccount_rate_multiplier += f + } else { + m.addaccount_rate_multiplier = &f + } +} + +// AddedAccountRateMultiplier returns the value that was added to the "account_rate_multiplier" field in this mutation. +func (m *UsageLogMutation) AddedAccountRateMultiplier() (r float64, exists bool) { + v := m.addaccount_rate_multiplier + if v == nil { + return + } + return *v, true +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (m *UsageLogMutation) ClearAccountRateMultiplier() { + m.account_rate_multiplier = nil + m.addaccount_rate_multiplier = nil + m.clearedFields[usagelog.FieldAccountRateMultiplier] = struct{}{} +} + +// AccountRateMultiplierCleared returns if the "account_rate_multiplier" field was cleared in this mutation. +func (m *UsageLogMutation) AccountRateMultiplierCleared() bool { + _, ok := m.clearedFields[usagelog.FieldAccountRateMultiplier] + return ok +} + +// ResetAccountRateMultiplier resets all changes to the "account_rate_multiplier" field. +func (m *UsageLogMutation) ResetAccountRateMultiplier() { + m.account_rate_multiplier = nil + m.addaccount_rate_multiplier = nil + delete(m.clearedFields, usagelog.FieldAccountRateMultiplier) +} + +// SetBillingType sets the "billing_type" field. +func (m *UsageLogMutation) SetBillingType(i int8) { + m.billing_type = &i + m.addbilling_type = nil +} + +// BillingType returns the value of the "billing_type" field in the mutation. +func (m *UsageLogMutation) BillingType() (r int8, exists bool) { + v := m.billing_type + if v == nil { + return + } + return *v, true +} + +// OldBillingType returns the old "billing_type" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldBillingType(ctx context.Context) (v int8, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBillingType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBillingType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBillingType: %w", err) + } + return oldValue.BillingType, nil +} + +// AddBillingType adds i to the "billing_type" field. +func (m *UsageLogMutation) AddBillingType(i int8) { + if m.addbilling_type != nil { + *m.addbilling_type += i + } else { + m.addbilling_type = &i + } +} + +// AddedBillingType returns the value that was added to the "billing_type" field in this mutation. +func (m *UsageLogMutation) AddedBillingType() (r int8, exists bool) { + v := m.addbilling_type + if v == nil { + return + } + return *v, true +} + +// ResetBillingType resets all changes to the "billing_type" field. +func (m *UsageLogMutation) ResetBillingType() { + m.billing_type = nil + m.addbilling_type = nil +} + +// SetStream sets the "stream" field. +func (m *UsageLogMutation) SetStream(b bool) { + m.stream = &b +} + +// Stream returns the value of the "stream" field in the mutation. +func (m *UsageLogMutation) Stream() (r bool, exists bool) { + v := m.stream + if v == nil { + return + } + return *v, true +} + +// OldStream returns the old "stream" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldStream(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStream is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStream requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStream: %w", err) + } + return oldValue.Stream, nil +} + +// ResetStream resets all changes to the "stream" field. +func (m *UsageLogMutation) ResetStream() { + m.stream = nil +} + +// SetDurationMs sets the "duration_ms" field. +func (m *UsageLogMutation) SetDurationMs(i int) { + m.duration_ms = &i + m.addduration_ms = nil +} + +// DurationMs returns the value of the "duration_ms" field in the mutation. +func (m *UsageLogMutation) DurationMs() (r int, exists bool) { + v := m.duration_ms + if v == nil { + return + } + return *v, true +} + +// OldDurationMs returns the old "duration_ms" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldDurationMs(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDurationMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDurationMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDurationMs: %w", err) + } + return oldValue.DurationMs, nil +} + +// AddDurationMs adds i to the "duration_ms" field. +func (m *UsageLogMutation) AddDurationMs(i int) { + if m.addduration_ms != nil { + *m.addduration_ms += i + } else { + m.addduration_ms = &i + } +} + +// AddedDurationMs returns the value that was added to the "duration_ms" field in this mutation. +func (m *UsageLogMutation) AddedDurationMs() (r int, exists bool) { + v := m.addduration_ms + if v == nil { + return + } + return *v, true +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (m *UsageLogMutation) ClearDurationMs() { + m.duration_ms = nil + m.addduration_ms = nil + m.clearedFields[usagelog.FieldDurationMs] = struct{}{} +} + +// DurationMsCleared returns if the "duration_ms" field was cleared in this mutation. +func (m *UsageLogMutation) DurationMsCleared() bool { + _, ok := m.clearedFields[usagelog.FieldDurationMs] + return ok +} + +// ResetDurationMs resets all changes to the "duration_ms" field. +func (m *UsageLogMutation) ResetDurationMs() { + m.duration_ms = nil + m.addduration_ms = nil + delete(m.clearedFields, usagelog.FieldDurationMs) +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (m *UsageLogMutation) SetFirstTokenMs(i int) { + m.first_token_ms = &i + m.addfirst_token_ms = nil +} + +// FirstTokenMs returns the value of the "first_token_ms" field in the mutation. +func (m *UsageLogMutation) FirstTokenMs() (r int, exists bool) { + v := m.first_token_ms + if v == nil { + return + } + return *v, true +} + +// OldFirstTokenMs returns the old "first_token_ms" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldFirstTokenMs(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFirstTokenMs is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFirstTokenMs requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFirstTokenMs: %w", err) + } + return oldValue.FirstTokenMs, nil +} + +// AddFirstTokenMs adds i to the "first_token_ms" field. +func (m *UsageLogMutation) AddFirstTokenMs(i int) { + if m.addfirst_token_ms != nil { + *m.addfirst_token_ms += i + } else { + m.addfirst_token_ms = &i + } +} + +// AddedFirstTokenMs returns the value that was added to the "first_token_ms" field in this mutation. +func (m *UsageLogMutation) AddedFirstTokenMs() (r int, exists bool) { + v := m.addfirst_token_ms + if v == nil { + return + } + return *v, true +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (m *UsageLogMutation) ClearFirstTokenMs() { + m.first_token_ms = nil + m.addfirst_token_ms = nil + m.clearedFields[usagelog.FieldFirstTokenMs] = struct{}{} +} + +// FirstTokenMsCleared returns if the "first_token_ms" field was cleared in this mutation. +func (m *UsageLogMutation) FirstTokenMsCleared() bool { + _, ok := m.clearedFields[usagelog.FieldFirstTokenMs] + return ok +} + +// ResetFirstTokenMs resets all changes to the "first_token_ms" field. +func (m *UsageLogMutation) ResetFirstTokenMs() { + m.first_token_ms = nil + m.addfirst_token_ms = nil + delete(m.clearedFields, usagelog.FieldFirstTokenMs) +} + +// SetUserAgent sets the "user_agent" field. +func (m *UsageLogMutation) SetUserAgent(s string) { + m.user_agent = &s +} + +// UserAgent returns the value of the "user_agent" field in the mutation. +func (m *UsageLogMutation) UserAgent() (r string, exists bool) { + v := m.user_agent + if v == nil { + return + } + return *v, true +} + +// OldUserAgent returns the old "user_agent" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldUserAgent(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserAgent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserAgent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserAgent: %w", err) + } + return oldValue.UserAgent, nil +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (m *UsageLogMutation) ClearUserAgent() { + m.user_agent = nil + m.clearedFields[usagelog.FieldUserAgent] = struct{}{} +} + +// UserAgentCleared returns if the "user_agent" field was cleared in this mutation. +func (m *UsageLogMutation) UserAgentCleared() bool { + _, ok := m.clearedFields[usagelog.FieldUserAgent] + return ok +} + +// ResetUserAgent resets all changes to the "user_agent" field. +func (m *UsageLogMutation) ResetUserAgent() { + m.user_agent = nil + delete(m.clearedFields, usagelog.FieldUserAgent) +} + +// SetIPAddress sets the "ip_address" field. +func (m *UsageLogMutation) SetIPAddress(s string) { + m.ip_address = &s +} + +// IPAddress returns the value of the "ip_address" field in the mutation. +func (m *UsageLogMutation) IPAddress() (r string, exists bool) { + v := m.ip_address + if v == nil { + return + } + return *v, true +} + +// OldIPAddress returns the old "ip_address" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldIPAddress(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPAddress is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPAddress: %w", err) + } + return oldValue.IPAddress, nil +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (m *UsageLogMutation) ClearIPAddress() { + m.ip_address = nil + m.clearedFields[usagelog.FieldIPAddress] = struct{}{} +} + +// IPAddressCleared returns if the "ip_address" field was cleared in this mutation. +func (m *UsageLogMutation) IPAddressCleared() bool { + _, ok := m.clearedFields[usagelog.FieldIPAddress] + return ok +} + +// ResetIPAddress resets all changes to the "ip_address" field. +func (m *UsageLogMutation) ResetIPAddress() { + m.ip_address = nil + delete(m.clearedFields, usagelog.FieldIPAddress) +} + +// SetImageCount sets the "image_count" field. +func (m *UsageLogMutation) SetImageCount(i int) { + m.image_count = &i + m.addimage_count = nil +} + +// ImageCount returns the value of the "image_count" field in the mutation. +func (m *UsageLogMutation) ImageCount() (r int, exists bool) { + v := m.image_count + if v == nil { + return + } + return *v, true +} + +// OldImageCount returns the old "image_count" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldImageCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImageCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImageCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImageCount: %w", err) + } + return oldValue.ImageCount, nil +} + +// AddImageCount adds i to the "image_count" field. +func (m *UsageLogMutation) AddImageCount(i int) { + if m.addimage_count != nil { + *m.addimage_count += i + } else { + m.addimage_count = &i + } +} + +// AddedImageCount returns the value that was added to the "image_count" field in this mutation. +func (m *UsageLogMutation) AddedImageCount() (r int, exists bool) { + v := m.addimage_count + if v == nil { + return + } + return *v, true +} + +// ResetImageCount resets all changes to the "image_count" field. +func (m *UsageLogMutation) ResetImageCount() { + m.image_count = nil + m.addimage_count = nil +} + +// SetImageSize sets the "image_size" field. +func (m *UsageLogMutation) SetImageSize(s string) { + m.image_size = &s +} + +// ImageSize returns the value of the "image_size" field in the mutation. +func (m *UsageLogMutation) ImageSize() (r string, exists bool) { + v := m.image_size + if v == nil { + return + } + return *v, true +} + +// OldImageSize returns the old "image_size" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldImageSize(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldImageSize is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldImageSize requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldImageSize: %w", err) + } + return oldValue.ImageSize, nil +} + +// ClearImageSize clears the value of the "image_size" field. +func (m *UsageLogMutation) ClearImageSize() { + m.image_size = nil + m.clearedFields[usagelog.FieldImageSize] = struct{}{} +} + +// ImageSizeCleared returns if the "image_size" field was cleared in this mutation. +func (m *UsageLogMutation) ImageSizeCleared() bool { + _, ok := m.clearedFields[usagelog.FieldImageSize] + return ok +} + +// ResetImageSize resets all changes to the "image_size" field. +func (m *UsageLogMutation) ResetImageSize() { + m.image_size = nil + delete(m.clearedFields, usagelog.FieldImageSize) +} + +// SetCreatedAt sets the "created_at" field. +func (m *UsageLogMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UsageLogMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UsageLogMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UsageLogMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[usagelog.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UsageLogMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UsageLogMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UsageLogMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearAPIKey clears the "api_key" edge to the APIKey entity. +func (m *UsageLogMutation) ClearAPIKey() { + m.clearedapi_key = true + m.clearedFields[usagelog.FieldAPIKeyID] = struct{}{} +} + +// APIKeyCleared reports if the "api_key" edge to the APIKey entity was cleared. +func (m *UsageLogMutation) APIKeyCleared() bool { + return m.clearedapi_key +} + +// APIKeyIDs returns the "api_key" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// APIKeyID instead. It exists only for internal usage by the builders. +func (m *UsageLogMutation) APIKeyIDs() (ids []int64) { + if id := m.api_key; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAPIKey resets all changes to the "api_key" edge. +func (m *UsageLogMutation) ResetAPIKey() { + m.api_key = nil + m.clearedapi_key = false +} + +// ClearAccount clears the "account" edge to the Account entity. +func (m *UsageLogMutation) ClearAccount() { + m.clearedaccount = true + m.clearedFields[usagelog.FieldAccountID] = struct{}{} +} + +// AccountCleared reports if the "account" edge to the Account entity was cleared. +func (m *UsageLogMutation) AccountCleared() bool { + return m.clearedaccount +} + +// AccountIDs returns the "account" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AccountID instead. It exists only for internal usage by the builders. +func (m *UsageLogMutation) AccountIDs() (ids []int64) { + if id := m.account; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAccount resets all changes to the "account" edge. +func (m *UsageLogMutation) ResetAccount() { + m.account = nil + m.clearedaccount = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *UsageLogMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[usagelog.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *UsageLogMutation) GroupCleared() bool { + return m.GroupIDCleared() || m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *UsageLogMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *UsageLogMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// ClearSubscription clears the "subscription" edge to the UserSubscription entity. +func (m *UsageLogMutation) ClearSubscription() { + m.clearedsubscription = true + m.clearedFields[usagelog.FieldSubscriptionID] = struct{}{} +} + +// SubscriptionCleared reports if the "subscription" edge to the UserSubscription entity was cleared. +func (m *UsageLogMutation) SubscriptionCleared() bool { + return m.SubscriptionIDCleared() || m.clearedsubscription +} + +// SubscriptionIDs returns the "subscription" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// SubscriptionID instead. It exists only for internal usage by the builders. +func (m *UsageLogMutation) SubscriptionIDs() (ids []int64) { + if id := m.subscription; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetSubscription resets all changes to the "subscription" edge. +func (m *UsageLogMutation) ResetSubscription() { + m.subscription = nil + m.clearedsubscription = false +} + +// Where appends a list predicates to the UsageLogMutation builder. +func (m *UsageLogMutation) Where(ps ...predicate.UsageLog) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UsageLogMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UsageLogMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UsageLog, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UsageLogMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UsageLogMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UsageLog). +func (m *UsageLogMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UsageLogMutation) Fields() []string { + fields := make([]string, 0, 30) + if m.user != nil { + fields = append(fields, usagelog.FieldUserID) + } + if m.api_key != nil { + fields = append(fields, usagelog.FieldAPIKeyID) + } + if m.account != nil { + fields = append(fields, usagelog.FieldAccountID) + } + if m.request_id != nil { + fields = append(fields, usagelog.FieldRequestID) + } + if m.model != nil { + fields = append(fields, usagelog.FieldModel) + } + if m.group != nil { + fields = append(fields, usagelog.FieldGroupID) + } + if m.subscription != nil { + fields = append(fields, usagelog.FieldSubscriptionID) + } + if m.input_tokens != nil { + fields = append(fields, usagelog.FieldInputTokens) + } + if m.output_tokens != nil { + fields = append(fields, usagelog.FieldOutputTokens) + } + if m.cache_creation_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreationTokens) + } + if m.cache_read_tokens != nil { + fields = append(fields, usagelog.FieldCacheReadTokens) + } + if m.cache_creation_5m_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreation5mTokens) + } + if m.cache_creation_1h_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreation1hTokens) + } + if m.input_cost != nil { + fields = append(fields, usagelog.FieldInputCost) + } + if m.output_cost != nil { + fields = append(fields, usagelog.FieldOutputCost) + } + if m.cache_creation_cost != nil { + fields = append(fields, usagelog.FieldCacheCreationCost) + } + if m.cache_read_cost != nil { + fields = append(fields, usagelog.FieldCacheReadCost) + } + if m.total_cost != nil { + fields = append(fields, usagelog.FieldTotalCost) + } + if m.actual_cost != nil { + fields = append(fields, usagelog.FieldActualCost) + } + if m.rate_multiplier != nil { + fields = append(fields, usagelog.FieldRateMultiplier) + } + if m.account_rate_multiplier != nil { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } + if m.billing_type != nil { + fields = append(fields, usagelog.FieldBillingType) + } + if m.stream != nil { + fields = append(fields, usagelog.FieldStream) + } + if m.duration_ms != nil { + fields = append(fields, usagelog.FieldDurationMs) + } + if m.first_token_ms != nil { + fields = append(fields, usagelog.FieldFirstTokenMs) + } + if m.user_agent != nil { + fields = append(fields, usagelog.FieldUserAgent) + } + if m.ip_address != nil { + fields = append(fields, usagelog.FieldIPAddress) + } + if m.image_count != nil { + fields = append(fields, usagelog.FieldImageCount) + } + if m.image_size != nil { + fields = append(fields, usagelog.FieldImageSize) + } + if m.created_at != nil { + fields = append(fields, usagelog.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UsageLogMutation) Field(name string) (ent.Value, bool) { + switch name { + case usagelog.FieldUserID: + return m.UserID() + case usagelog.FieldAPIKeyID: + return m.APIKeyID() + case usagelog.FieldAccountID: + return m.AccountID() + case usagelog.FieldRequestID: + return m.RequestID() + case usagelog.FieldModel: + return m.Model() + case usagelog.FieldGroupID: + return m.GroupID() + case usagelog.FieldSubscriptionID: + return m.SubscriptionID() + case usagelog.FieldInputTokens: + return m.InputTokens() + case usagelog.FieldOutputTokens: + return m.OutputTokens() + case usagelog.FieldCacheCreationTokens: + return m.CacheCreationTokens() + case usagelog.FieldCacheReadTokens: + return m.CacheReadTokens() + case usagelog.FieldCacheCreation5mTokens: + return m.CacheCreation5mTokens() + case usagelog.FieldCacheCreation1hTokens: + return m.CacheCreation1hTokens() + case usagelog.FieldInputCost: + return m.InputCost() + case usagelog.FieldOutputCost: + return m.OutputCost() + case usagelog.FieldCacheCreationCost: + return m.CacheCreationCost() + case usagelog.FieldCacheReadCost: + return m.CacheReadCost() + case usagelog.FieldTotalCost: + return m.TotalCost() + case usagelog.FieldActualCost: + return m.ActualCost() + case usagelog.FieldRateMultiplier: + return m.RateMultiplier() + case usagelog.FieldAccountRateMultiplier: + return m.AccountRateMultiplier() + case usagelog.FieldBillingType: + return m.BillingType() + case usagelog.FieldStream: + return m.Stream() + case usagelog.FieldDurationMs: + return m.DurationMs() + case usagelog.FieldFirstTokenMs: + return m.FirstTokenMs() + case usagelog.FieldUserAgent: + return m.UserAgent() + case usagelog.FieldIPAddress: + return m.IPAddress() + case usagelog.FieldImageCount: + return m.ImageCount() + case usagelog.FieldImageSize: + return m.ImageSize() + case usagelog.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UsageLogMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case usagelog.FieldUserID: + return m.OldUserID(ctx) + case usagelog.FieldAPIKeyID: + return m.OldAPIKeyID(ctx) + case usagelog.FieldAccountID: + return m.OldAccountID(ctx) + case usagelog.FieldRequestID: + return m.OldRequestID(ctx) + case usagelog.FieldModel: + return m.OldModel(ctx) + case usagelog.FieldGroupID: + return m.OldGroupID(ctx) + case usagelog.FieldSubscriptionID: + return m.OldSubscriptionID(ctx) + case usagelog.FieldInputTokens: + return m.OldInputTokens(ctx) + case usagelog.FieldOutputTokens: + return m.OldOutputTokens(ctx) + case usagelog.FieldCacheCreationTokens: + return m.OldCacheCreationTokens(ctx) + case usagelog.FieldCacheReadTokens: + return m.OldCacheReadTokens(ctx) + case usagelog.FieldCacheCreation5mTokens: + return m.OldCacheCreation5mTokens(ctx) + case usagelog.FieldCacheCreation1hTokens: + return m.OldCacheCreation1hTokens(ctx) + case usagelog.FieldInputCost: + return m.OldInputCost(ctx) + case usagelog.FieldOutputCost: + return m.OldOutputCost(ctx) + case usagelog.FieldCacheCreationCost: + return m.OldCacheCreationCost(ctx) + case usagelog.FieldCacheReadCost: + return m.OldCacheReadCost(ctx) + case usagelog.FieldTotalCost: + return m.OldTotalCost(ctx) + case usagelog.FieldActualCost: + return m.OldActualCost(ctx) + case usagelog.FieldRateMultiplier: + return m.OldRateMultiplier(ctx) + case usagelog.FieldAccountRateMultiplier: + return m.OldAccountRateMultiplier(ctx) + case usagelog.FieldBillingType: + return m.OldBillingType(ctx) + case usagelog.FieldStream: + return m.OldStream(ctx) + case usagelog.FieldDurationMs: + return m.OldDurationMs(ctx) + case usagelog.FieldFirstTokenMs: + return m.OldFirstTokenMs(ctx) + case usagelog.FieldUserAgent: + return m.OldUserAgent(ctx) + case usagelog.FieldIPAddress: + return m.OldIPAddress(ctx) + case usagelog.FieldImageCount: + return m.OldImageCount(ctx) + case usagelog.FieldImageSize: + return m.OldImageSize(ctx) + case usagelog.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown UsageLog field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageLogMutation) SetField(name string, value ent.Value) error { + switch name { + case usagelog.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case usagelog.FieldAPIKeyID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAPIKeyID(v) + return nil + case usagelog.FieldAccountID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccountID(v) + return nil + case usagelog.FieldRequestID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRequestID(v) + return nil + case usagelog.FieldModel: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetModel(v) + return nil + case usagelog.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case usagelog.FieldSubscriptionID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSubscriptionID(v) + return nil + case usagelog.FieldInputTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetInputTokens(v) + return nil + case usagelog.FieldOutputTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOutputTokens(v) + return nil + case usagelog.FieldCacheCreationTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheCreationTokens(v) + return nil + case usagelog.FieldCacheReadTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheReadTokens(v) + return nil + case usagelog.FieldCacheCreation5mTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheCreation5mTokens(v) + return nil + case usagelog.FieldCacheCreation1hTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheCreation1hTokens(v) + return nil + case usagelog.FieldInputCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetInputCost(v) + return nil + case usagelog.FieldOutputCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOutputCost(v) + return nil + case usagelog.FieldCacheCreationCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheCreationCost(v) + return nil + case usagelog.FieldCacheReadCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCacheReadCost(v) + return nil + case usagelog.FieldTotalCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTotalCost(v) + return nil + case usagelog.FieldActualCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetActualCost(v) + return nil + case usagelog.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateMultiplier(v) + return nil + case usagelog.FieldAccountRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccountRateMultiplier(v) + return nil + case usagelog.FieldBillingType: + v, ok := value.(int8) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBillingType(v) + return nil + case usagelog.FieldStream: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStream(v) + return nil + case usagelog.FieldDurationMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDurationMs(v) + return nil + case usagelog.FieldFirstTokenMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFirstTokenMs(v) + return nil + case usagelog.FieldUserAgent: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserAgent(v) + return nil + case usagelog.FieldIPAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPAddress(v) + return nil + case usagelog.FieldImageCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImageCount(v) + return nil + case usagelog.FieldImageSize: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetImageSize(v) + return nil + case usagelog.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown UsageLog field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UsageLogMutation) AddedFields() []string { + var fields []string + if m.addinput_tokens != nil { + fields = append(fields, usagelog.FieldInputTokens) + } + if m.addoutput_tokens != nil { + fields = append(fields, usagelog.FieldOutputTokens) + } + if m.addcache_creation_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreationTokens) + } + if m.addcache_read_tokens != nil { + fields = append(fields, usagelog.FieldCacheReadTokens) + } + if m.addcache_creation_5m_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreation5mTokens) + } + if m.addcache_creation_1h_tokens != nil { + fields = append(fields, usagelog.FieldCacheCreation1hTokens) + } + if m.addinput_cost != nil { + fields = append(fields, usagelog.FieldInputCost) + } + if m.addoutput_cost != nil { + fields = append(fields, usagelog.FieldOutputCost) + } + if m.addcache_creation_cost != nil { + fields = append(fields, usagelog.FieldCacheCreationCost) + } + if m.addcache_read_cost != nil { + fields = append(fields, usagelog.FieldCacheReadCost) + } + if m.addtotal_cost != nil { + fields = append(fields, usagelog.FieldTotalCost) + } + if m.addactual_cost != nil { + fields = append(fields, usagelog.FieldActualCost) + } + if m.addrate_multiplier != nil { + fields = append(fields, usagelog.FieldRateMultiplier) + } + if m.addaccount_rate_multiplier != nil { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } + if m.addbilling_type != nil { + fields = append(fields, usagelog.FieldBillingType) + } + if m.addduration_ms != nil { + fields = append(fields, usagelog.FieldDurationMs) + } + if m.addfirst_token_ms != nil { + fields = append(fields, usagelog.FieldFirstTokenMs) + } + if m.addimage_count != nil { + fields = append(fields, usagelog.FieldImageCount) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UsageLogMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case usagelog.FieldInputTokens: + return m.AddedInputTokens() + case usagelog.FieldOutputTokens: + return m.AddedOutputTokens() + case usagelog.FieldCacheCreationTokens: + return m.AddedCacheCreationTokens() + case usagelog.FieldCacheReadTokens: + return m.AddedCacheReadTokens() + case usagelog.FieldCacheCreation5mTokens: + return m.AddedCacheCreation5mTokens() + case usagelog.FieldCacheCreation1hTokens: + return m.AddedCacheCreation1hTokens() + case usagelog.FieldInputCost: + return m.AddedInputCost() + case usagelog.FieldOutputCost: + return m.AddedOutputCost() + case usagelog.FieldCacheCreationCost: + return m.AddedCacheCreationCost() + case usagelog.FieldCacheReadCost: + return m.AddedCacheReadCost() + case usagelog.FieldTotalCost: + return m.AddedTotalCost() + case usagelog.FieldActualCost: + return m.AddedActualCost() + case usagelog.FieldRateMultiplier: + return m.AddedRateMultiplier() + case usagelog.FieldAccountRateMultiplier: + return m.AddedAccountRateMultiplier() + case usagelog.FieldBillingType: + return m.AddedBillingType() + case usagelog.FieldDurationMs: + return m.AddedDurationMs() + case usagelog.FieldFirstTokenMs: + return m.AddedFirstTokenMs() + case usagelog.FieldImageCount: + return m.AddedImageCount() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UsageLogMutation) AddField(name string, value ent.Value) error { + switch name { + case usagelog.FieldInputTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddInputTokens(v) + return nil + case usagelog.FieldOutputTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddOutputTokens(v) + return nil + case usagelog.FieldCacheCreationTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheCreationTokens(v) + return nil + case usagelog.FieldCacheReadTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheReadTokens(v) + return nil + case usagelog.FieldCacheCreation5mTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheCreation5mTokens(v) + return nil + case usagelog.FieldCacheCreation1hTokens: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheCreation1hTokens(v) + return nil + case usagelog.FieldInputCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddInputCost(v) + return nil + case usagelog.FieldOutputCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddOutputCost(v) + return nil + case usagelog.FieldCacheCreationCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheCreationCost(v) + return nil + case usagelog.FieldCacheReadCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddCacheReadCost(v) + return nil + case usagelog.FieldTotalCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddTotalCost(v) + return nil + case usagelog.FieldActualCost: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddActualCost(v) + return nil + case usagelog.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRateMultiplier(v) + return nil + case usagelog.FieldAccountRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddAccountRateMultiplier(v) + return nil + case usagelog.FieldBillingType: + v, ok := value.(int8) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBillingType(v) + return nil + case usagelog.FieldDurationMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDurationMs(v) + return nil + case usagelog.FieldFirstTokenMs: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddFirstTokenMs(v) + return nil + case usagelog.FieldImageCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddImageCount(v) + return nil + } + return fmt.Errorf("unknown UsageLog numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UsageLogMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(usagelog.FieldGroupID) { + fields = append(fields, usagelog.FieldGroupID) + } + if m.FieldCleared(usagelog.FieldSubscriptionID) { + fields = append(fields, usagelog.FieldSubscriptionID) + } + if m.FieldCleared(usagelog.FieldAccountRateMultiplier) { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } + if m.FieldCleared(usagelog.FieldDurationMs) { + fields = append(fields, usagelog.FieldDurationMs) + } + if m.FieldCleared(usagelog.FieldFirstTokenMs) { + fields = append(fields, usagelog.FieldFirstTokenMs) + } + if m.FieldCleared(usagelog.FieldUserAgent) { + fields = append(fields, usagelog.FieldUserAgent) + } + if m.FieldCleared(usagelog.FieldIPAddress) { + fields = append(fields, usagelog.FieldIPAddress) + } + if m.FieldCleared(usagelog.FieldImageSize) { + fields = append(fields, usagelog.FieldImageSize) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UsageLogMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UsageLogMutation) ClearField(name string) error { + switch name { + case usagelog.FieldGroupID: + m.ClearGroupID() + return nil + case usagelog.FieldSubscriptionID: + m.ClearSubscriptionID() + return nil + case usagelog.FieldAccountRateMultiplier: + m.ClearAccountRateMultiplier() + return nil + case usagelog.FieldDurationMs: + m.ClearDurationMs() + return nil + case usagelog.FieldFirstTokenMs: + m.ClearFirstTokenMs() + return nil + case usagelog.FieldUserAgent: + m.ClearUserAgent() + return nil + case usagelog.FieldIPAddress: + m.ClearIPAddress() + return nil + case usagelog.FieldImageSize: + m.ClearImageSize() + return nil + } + return fmt.Errorf("unknown UsageLog nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UsageLogMutation) ResetField(name string) error { + switch name { + case usagelog.FieldUserID: + m.ResetUserID() + return nil + case usagelog.FieldAPIKeyID: + m.ResetAPIKeyID() + return nil + case usagelog.FieldAccountID: + m.ResetAccountID() + return nil + case usagelog.FieldRequestID: + m.ResetRequestID() + return nil + case usagelog.FieldModel: + m.ResetModel() + return nil + case usagelog.FieldGroupID: + m.ResetGroupID() + return nil + case usagelog.FieldSubscriptionID: + m.ResetSubscriptionID() + return nil + case usagelog.FieldInputTokens: + m.ResetInputTokens() + return nil + case usagelog.FieldOutputTokens: + m.ResetOutputTokens() + return nil + case usagelog.FieldCacheCreationTokens: + m.ResetCacheCreationTokens() + return nil + case usagelog.FieldCacheReadTokens: + m.ResetCacheReadTokens() + return nil + case usagelog.FieldCacheCreation5mTokens: + m.ResetCacheCreation5mTokens() + return nil + case usagelog.FieldCacheCreation1hTokens: + m.ResetCacheCreation1hTokens() + return nil + case usagelog.FieldInputCost: + m.ResetInputCost() + return nil + case usagelog.FieldOutputCost: + m.ResetOutputCost() + return nil + case usagelog.FieldCacheCreationCost: + m.ResetCacheCreationCost() + return nil + case usagelog.FieldCacheReadCost: + m.ResetCacheReadCost() + return nil + case usagelog.FieldTotalCost: + m.ResetTotalCost() + return nil + case usagelog.FieldActualCost: + m.ResetActualCost() + return nil + case usagelog.FieldRateMultiplier: + m.ResetRateMultiplier() + return nil + case usagelog.FieldAccountRateMultiplier: + m.ResetAccountRateMultiplier() + return nil + case usagelog.FieldBillingType: + m.ResetBillingType() + return nil + case usagelog.FieldStream: + m.ResetStream() + return nil + case usagelog.FieldDurationMs: + m.ResetDurationMs() + return nil + case usagelog.FieldFirstTokenMs: + m.ResetFirstTokenMs() + return nil + case usagelog.FieldUserAgent: + m.ResetUserAgent() + return nil + case usagelog.FieldIPAddress: + m.ResetIPAddress() + return nil + case usagelog.FieldImageCount: + m.ResetImageCount() + return nil + case usagelog.FieldImageSize: + m.ResetImageSize() + return nil + case usagelog.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown UsageLog field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UsageLogMutation) AddedEdges() []string { + edges := make([]string, 0, 5) + if m.user != nil { + edges = append(edges, usagelog.EdgeUser) + } + if m.api_key != nil { + edges = append(edges, usagelog.EdgeAPIKey) + } + if m.account != nil { + edges = append(edges, usagelog.EdgeAccount) + } + if m.group != nil { + edges = append(edges, usagelog.EdgeGroup) + } + if m.subscription != nil { + edges = append(edges, usagelog.EdgeSubscription) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UsageLogMutation) AddedIDs(name string) []ent.Value { + switch name { + case usagelog.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case usagelog.EdgeAPIKey: + if id := m.api_key; id != nil { + return []ent.Value{*id} + } + case usagelog.EdgeAccount: + if id := m.account; id != nil { + return []ent.Value{*id} + } + case usagelog.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case usagelog.EdgeSubscription: + if id := m.subscription; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UsageLogMutation) RemovedEdges() []string { + edges := make([]string, 0, 5) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UsageLogMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UsageLogMutation) ClearedEdges() []string { + edges := make([]string, 0, 5) + if m.cleareduser { + edges = append(edges, usagelog.EdgeUser) + } + if m.clearedapi_key { + edges = append(edges, usagelog.EdgeAPIKey) + } + if m.clearedaccount { + edges = append(edges, usagelog.EdgeAccount) + } + if m.clearedgroup { + edges = append(edges, usagelog.EdgeGroup) + } + if m.clearedsubscription { + edges = append(edges, usagelog.EdgeSubscription) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UsageLogMutation) EdgeCleared(name string) bool { + switch name { + case usagelog.EdgeUser: + return m.cleareduser + case usagelog.EdgeAPIKey: + return m.clearedapi_key + case usagelog.EdgeAccount: + return m.clearedaccount + case usagelog.EdgeGroup: + return m.clearedgroup + case usagelog.EdgeSubscription: + return m.clearedsubscription + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UsageLogMutation) ClearEdge(name string) error { + switch name { + case usagelog.EdgeUser: + m.ClearUser() + return nil + case usagelog.EdgeAPIKey: + m.ClearAPIKey() + return nil + case usagelog.EdgeAccount: + m.ClearAccount() + return nil + case usagelog.EdgeGroup: + m.ClearGroup() + return nil + case usagelog.EdgeSubscription: + m.ClearSubscription() + return nil + } + return fmt.Errorf("unknown UsageLog unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UsageLogMutation) ResetEdge(name string) error { + switch name { + case usagelog.EdgeUser: + m.ResetUser() + return nil + case usagelog.EdgeAPIKey: + m.ResetAPIKey() + return nil + case usagelog.EdgeAccount: + m.ResetAccount() + return nil + case usagelog.EdgeGroup: + m.ResetGroup() + return nil + case usagelog.EdgeSubscription: + m.ResetSubscription() + return nil + } + return fmt.Errorf("unknown UsageLog edge %s", name) +} + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + email *string + password_hash *string + role *string + balance *float64 + addbalance *float64 + concurrency *int + addconcurrency *int + status *string + username *string + notes *string + clearedFields map[string]struct{} + api_keys map[int64]struct{} + removedapi_keys map[int64]struct{} + clearedapi_keys bool + redeem_codes map[int64]struct{} + removedredeem_codes map[int64]struct{} + clearedredeem_codes bool + subscriptions map[int64]struct{} + removedsubscriptions map[int64]struct{} + clearedsubscriptions bool + assigned_subscriptions map[int64]struct{} + removedassigned_subscriptions map[int64]struct{} + clearedassigned_subscriptions bool + allowed_groups map[int64]struct{} + removedallowed_groups map[int64]struct{} + clearedallowed_groups bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + attribute_values map[int64]struct{} + removedattribute_values map[int64]struct{} + clearedattribute_values bool + promo_code_usages map[int64]struct{} + removedpromo_code_usages map[int64]struct{} + clearedpromo_code_usages bool + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id int64) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *UserMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *UserMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *UserMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[user.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *UserMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[user.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *UserMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, user.FieldDeletedAt) +} + +// SetEmail sets the "email" field. +func (m *UserMutation) SetEmail(s string) { + m.email = &s +} + +// Email returns the value of the "email" field in the mutation. +func (m *UserMutation) Email() (r string, exists bool) { + v := m.email + if v == nil { + return + } + return *v, true +} + +// OldEmail returns the old "email" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldEmail(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEmail is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEmail requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEmail: %w", err) + } + return oldValue.Email, nil +} + +// ResetEmail resets all changes to the "email" field. +func (m *UserMutation) ResetEmail() { + m.email = nil +} + +// SetPasswordHash sets the "password_hash" field. +func (m *UserMutation) SetPasswordHash(s string) { + m.password_hash = &s +} + +// PasswordHash returns the value of the "password_hash" field in the mutation. +func (m *UserMutation) PasswordHash() (r string, exists bool) { + v := m.password_hash + if v == nil { + return + } + return *v, true +} + +// OldPasswordHash returns the old "password_hash" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPasswordHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPasswordHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPasswordHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPasswordHash: %w", err) + } + return oldValue.PasswordHash, nil +} + +// ResetPasswordHash resets all changes to the "password_hash" field. +func (m *UserMutation) ResetPasswordHash() { + m.password_hash = nil +} + +// SetRole sets the "role" field. +func (m *UserMutation) SetRole(s string) { + m.role = &s +} + +// Role returns the value of the "role" field in the mutation. +func (m *UserMutation) Role() (r string, exists bool) { + v := m.role + if v == nil { + return + } + return *v, true +} + +// OldRole returns the old "role" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldRole(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRole is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRole requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRole: %w", err) + } + return oldValue.Role, nil +} + +// ResetRole resets all changes to the "role" field. +func (m *UserMutation) ResetRole() { + m.role = nil +} + +// SetBalance sets the "balance" field. +func (m *UserMutation) SetBalance(f float64) { + m.balance = &f + m.addbalance = nil +} + +// Balance returns the value of the "balance" field in the mutation. +func (m *UserMutation) Balance() (r float64, exists bool) { + v := m.balance + if v == nil { + return + } + return *v, true +} + +// OldBalance returns the old "balance" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldBalance(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBalance is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBalance requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBalance: %w", err) + } + return oldValue.Balance, nil +} + +// AddBalance adds f to the "balance" field. +func (m *UserMutation) AddBalance(f float64) { + if m.addbalance != nil { + *m.addbalance += f + } else { + m.addbalance = &f + } +} + +// AddedBalance returns the value that was added to the "balance" field in this mutation. +func (m *UserMutation) AddedBalance() (r float64, exists bool) { + v := m.addbalance + if v == nil { + return + } + return *v, true +} + +// ResetBalance resets all changes to the "balance" field. +func (m *UserMutation) ResetBalance() { + m.balance = nil + m.addbalance = nil +} + +// SetConcurrency sets the "concurrency" field. +func (m *UserMutation) SetConcurrency(i int) { + m.concurrency = &i + m.addconcurrency = nil +} + +// Concurrency returns the value of the "concurrency" field in the mutation. +func (m *UserMutation) Concurrency() (r int, exists bool) { + v := m.concurrency + if v == nil { + return + } + return *v, true +} + +// OldConcurrency returns the old "concurrency" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldConcurrency(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldConcurrency is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldConcurrency requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldConcurrency: %w", err) + } + return oldValue.Concurrency, nil +} + +// AddConcurrency adds i to the "concurrency" field. +func (m *UserMutation) AddConcurrency(i int) { + if m.addconcurrency != nil { + *m.addconcurrency += i + } else { + m.addconcurrency = &i + } +} + +// AddedConcurrency returns the value that was added to the "concurrency" field in this mutation. +func (m *UserMutation) AddedConcurrency() (r int, exists bool) { + v := m.addconcurrency + if v == nil { + return + } + return *v, true +} + +// ResetConcurrency resets all changes to the "concurrency" field. +func (m *UserMutation) ResetConcurrency() { + m.concurrency = nil + m.addconcurrency = nil +} + +// SetStatus sets the "status" field. +func (m *UserMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UserMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UserMutation) ResetStatus() { + m.status = nil +} + +// SetUsername sets the "username" field. +func (m *UserMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *UserMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ResetUsername resets all changes to the "username" field. +func (m *UserMutation) ResetUsername() { + m.username = nil +} + +// SetNotes sets the "notes" field. +func (m *UserMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *UserMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldNotes(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ResetNotes resets all changes to the "notes" field. +func (m *UserMutation) ResetNotes() { + m.notes = nil +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. +func (m *UserMutation) AddAPIKeyIDs(ids ...int64) { + if m.api_keys == nil { + m.api_keys = make(map[int64]struct{}) + } + for i := range ids { + m.api_keys[ids[i]] = struct{}{} + } +} + +// ClearAPIKeys clears the "api_keys" edge to the APIKey entity. +func (m *UserMutation) ClearAPIKeys() { + m.clearedapi_keys = true +} + +// APIKeysCleared reports if the "api_keys" edge to the APIKey entity was cleared. +func (m *UserMutation) APIKeysCleared() bool { + return m.clearedapi_keys +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to the APIKey entity by IDs. +func (m *UserMutation) RemoveAPIKeyIDs(ids ...int64) { + if m.removedapi_keys == nil { + m.removedapi_keys = make(map[int64]struct{}) + } + for i := range ids { + delete(m.api_keys, ids[i]) + m.removedapi_keys[ids[i]] = struct{}{} + } +} + +// RemovedAPIKeys returns the removed IDs of the "api_keys" edge to the APIKey entity. +func (m *UserMutation) RemovedAPIKeysIDs() (ids []int64) { + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return +} + +// APIKeysIDs returns the "api_keys" edge IDs in the mutation. +func (m *UserMutation) APIKeysIDs() (ids []int64) { + for id := range m.api_keys { + ids = append(ids, id) + } + return +} + +// ResetAPIKeys resets all changes to the "api_keys" edge. +func (m *UserMutation) ResetAPIKeys() { + m.api_keys = nil + m.clearedapi_keys = false + m.removedapi_keys = nil +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by ids. +func (m *UserMutation) AddRedeemCodeIDs(ids ...int64) { + if m.redeem_codes == nil { + m.redeem_codes = make(map[int64]struct{}) + } + for i := range ids { + m.redeem_codes[ids[i]] = struct{}{} + } +} + +// ClearRedeemCodes clears the "redeem_codes" edge to the RedeemCode entity. +func (m *UserMutation) ClearRedeemCodes() { + m.clearedredeem_codes = true +} + +// RedeemCodesCleared reports if the "redeem_codes" edge to the RedeemCode entity was cleared. +func (m *UserMutation) RedeemCodesCleared() bool { + return m.clearedredeem_codes +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to the RedeemCode entity by IDs. +func (m *UserMutation) RemoveRedeemCodeIDs(ids ...int64) { + if m.removedredeem_codes == nil { + m.removedredeem_codes = make(map[int64]struct{}) + } + for i := range ids { + delete(m.redeem_codes, ids[i]) + m.removedredeem_codes[ids[i]] = struct{}{} + } +} + +// RemovedRedeemCodes returns the removed IDs of the "redeem_codes" edge to the RedeemCode entity. +func (m *UserMutation) RemovedRedeemCodesIDs() (ids []int64) { + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return +} + +// RedeemCodesIDs returns the "redeem_codes" edge IDs in the mutation. +func (m *UserMutation) RedeemCodesIDs() (ids []int64) { + for id := range m.redeem_codes { + ids = append(ids, id) + } + return +} + +// ResetRedeemCodes resets all changes to the "redeem_codes" edge. +func (m *UserMutation) ResetRedeemCodes() { + m.redeem_codes = nil + m.clearedredeem_codes = false + m.removedredeem_codes = nil +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by ids. +func (m *UserMutation) AddSubscriptionIDs(ids ...int64) { + if m.subscriptions == nil { + m.subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.subscriptions[ids[i]] = struct{}{} + } +} + +// ClearSubscriptions clears the "subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) ClearSubscriptions() { + m.clearedsubscriptions = true +} + +// SubscriptionsCleared reports if the "subscriptions" edge to the UserSubscription entity was cleared. +func (m *UserMutation) SubscriptionsCleared() bool { + return m.clearedsubscriptions +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to the UserSubscription entity by IDs. +func (m *UserMutation) RemoveSubscriptionIDs(ids ...int64) { + if m.removedsubscriptions == nil { + m.removedsubscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.subscriptions, ids[i]) + m.removedsubscriptions[ids[i]] = struct{}{} + } +} + +// RemovedSubscriptions returns the removed IDs of the "subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) RemovedSubscriptionsIDs() (ids []int64) { + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return +} + +// SubscriptionsIDs returns the "subscriptions" edge IDs in the mutation. +func (m *UserMutation) SubscriptionsIDs() (ids []int64) { + for id := range m.subscriptions { + ids = append(ids, id) + } + return +} + +// ResetSubscriptions resets all changes to the "subscriptions" edge. +func (m *UserMutation) ResetSubscriptions() { + m.subscriptions = nil + m.clearedsubscriptions = false + m.removedsubscriptions = nil +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by ids. +func (m *UserMutation) AddAssignedSubscriptionIDs(ids ...int64) { + if m.assigned_subscriptions == nil { + m.assigned_subscriptions = make(map[int64]struct{}) + } + for i := range ids { + m.assigned_subscriptions[ids[i]] = struct{}{} + } +} + +// ClearAssignedSubscriptions clears the "assigned_subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) ClearAssignedSubscriptions() { + m.clearedassigned_subscriptions = true +} + +// AssignedSubscriptionsCleared reports if the "assigned_subscriptions" edge to the UserSubscription entity was cleared. +func (m *UserMutation) AssignedSubscriptionsCleared() bool { + return m.clearedassigned_subscriptions +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (m *UserMutation) RemoveAssignedSubscriptionIDs(ids ...int64) { + if m.removedassigned_subscriptions == nil { + m.removedassigned_subscriptions = make(map[int64]struct{}) + } + for i := range ids { + delete(m.assigned_subscriptions, ids[i]) + m.removedassigned_subscriptions[ids[i]] = struct{}{} + } +} + +// RemovedAssignedSubscriptions returns the removed IDs of the "assigned_subscriptions" edge to the UserSubscription entity. +func (m *UserMutation) RemovedAssignedSubscriptionsIDs() (ids []int64) { + for id := range m.removedassigned_subscriptions { + ids = append(ids, id) + } + return +} + +// AssignedSubscriptionsIDs returns the "assigned_subscriptions" edge IDs in the mutation. +func (m *UserMutation) AssignedSubscriptionsIDs() (ids []int64) { + for id := range m.assigned_subscriptions { + ids = append(ids, id) + } + return +} + +// ResetAssignedSubscriptions resets all changes to the "assigned_subscriptions" edge. +func (m *UserMutation) ResetAssignedSubscriptions() { + m.assigned_subscriptions = nil + m.clearedassigned_subscriptions = false + m.removedassigned_subscriptions = nil +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by ids. +func (m *UserMutation) AddAllowedGroupIDs(ids ...int64) { + if m.allowed_groups == nil { + m.allowed_groups = make(map[int64]struct{}) + } + for i := range ids { + m.allowed_groups[ids[i]] = struct{}{} + } +} + +// ClearAllowedGroups clears the "allowed_groups" edge to the Group entity. +func (m *UserMutation) ClearAllowedGroups() { + m.clearedallowed_groups = true +} + +// AllowedGroupsCleared reports if the "allowed_groups" edge to the Group entity was cleared. +func (m *UserMutation) AllowedGroupsCleared() bool { + return m.clearedallowed_groups +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to the Group entity by IDs. +func (m *UserMutation) RemoveAllowedGroupIDs(ids ...int64) { + if m.removedallowed_groups == nil { + m.removedallowed_groups = make(map[int64]struct{}) + } + for i := range ids { + delete(m.allowed_groups, ids[i]) + m.removedallowed_groups[ids[i]] = struct{}{} + } +} + +// RemovedAllowedGroups returns the removed IDs of the "allowed_groups" edge to the Group entity. +func (m *UserMutation) RemovedAllowedGroupsIDs() (ids []int64) { + for id := range m.removedallowed_groups { + ids = append(ids, id) + } + return +} + +// AllowedGroupsIDs returns the "allowed_groups" edge IDs in the mutation. +func (m *UserMutation) AllowedGroupsIDs() (ids []int64) { + for id := range m.allowed_groups { + ids = append(ids, id) + } + return +} + +// ResetAllowedGroups resets all changes to the "allowed_groups" edge. +func (m *UserMutation) ResetAllowedGroups() { + m.allowed_groups = nil + m.clearedallowed_groups = false + m.removedallowed_groups = nil +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by ids. +func (m *UserMutation) AddUsageLogIDs(ids ...int64) { + if m.usage_logs == nil { + m.usage_logs = make(map[int64]struct{}) + } + for i := range ids { + m.usage_logs[ids[i]] = struct{}{} + } +} + +// ClearUsageLogs clears the "usage_logs" edge to the UsageLog entity. +func (m *UserMutation) ClearUsageLogs() { + m.clearedusage_logs = true +} + +// UsageLogsCleared reports if the "usage_logs" edge to the UsageLog entity was cleared. +func (m *UserMutation) UsageLogsCleared() bool { + return m.clearedusage_logs +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to the UsageLog entity by IDs. +func (m *UserMutation) RemoveUsageLogIDs(ids ...int64) { + if m.removedusage_logs == nil { + m.removedusage_logs = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_logs, ids[i]) + m.removedusage_logs[ids[i]] = struct{}{} + } +} + +// RemovedUsageLogs returns the removed IDs of the "usage_logs" edge to the UsageLog entity. +func (m *UserMutation) RemovedUsageLogsIDs() (ids []int64) { + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return +} + +// UsageLogsIDs returns the "usage_logs" edge IDs in the mutation. +func (m *UserMutation) UsageLogsIDs() (ids []int64) { + for id := range m.usage_logs { + ids = append(ids, id) + } + return +} + +// ResetUsageLogs resets all changes to the "usage_logs" edge. +func (m *UserMutation) ResetUsageLogs() { + m.usage_logs = nil + m.clearedusage_logs = false + m.removedusage_logs = nil +} + +// AddAttributeValueIDs adds the "attribute_values" edge to the UserAttributeValue entity by ids. +func (m *UserMutation) AddAttributeValueIDs(ids ...int64) { + if m.attribute_values == nil { + m.attribute_values = make(map[int64]struct{}) + } + for i := range ids { + m.attribute_values[ids[i]] = struct{}{} + } +} + +// ClearAttributeValues clears the "attribute_values" edge to the UserAttributeValue entity. +func (m *UserMutation) ClearAttributeValues() { + m.clearedattribute_values = true +} + +// AttributeValuesCleared reports if the "attribute_values" edge to the UserAttributeValue entity was cleared. +func (m *UserMutation) AttributeValuesCleared() bool { + return m.clearedattribute_values +} + +// RemoveAttributeValueIDs removes the "attribute_values" edge to the UserAttributeValue entity by IDs. +func (m *UserMutation) RemoveAttributeValueIDs(ids ...int64) { + if m.removedattribute_values == nil { + m.removedattribute_values = make(map[int64]struct{}) + } + for i := range ids { + delete(m.attribute_values, ids[i]) + m.removedattribute_values[ids[i]] = struct{}{} + } +} + +// RemovedAttributeValues returns the removed IDs of the "attribute_values" edge to the UserAttributeValue entity. +func (m *UserMutation) RemovedAttributeValuesIDs() (ids []int64) { + for id := range m.removedattribute_values { + ids = append(ids, id) + } + return +} + +// AttributeValuesIDs returns the "attribute_values" edge IDs in the mutation. +func (m *UserMutation) AttributeValuesIDs() (ids []int64) { + for id := range m.attribute_values { + ids = append(ids, id) + } + return +} + +// ResetAttributeValues resets all changes to the "attribute_values" edge. +func (m *UserMutation) ResetAttributeValues() { + m.attribute_values = nil + m.clearedattribute_values = false + m.removedattribute_values = nil +} + +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by ids. +func (m *UserMutation) AddPromoCodeUsageIDs(ids ...int64) { + if m.promo_code_usages == nil { + m.promo_code_usages = make(map[int64]struct{}) + } + for i := range ids { + m.promo_code_usages[ids[i]] = struct{}{} + } +} + +// ClearPromoCodeUsages clears the "promo_code_usages" edge to the PromoCodeUsage entity. +func (m *UserMutation) ClearPromoCodeUsages() { + m.clearedpromo_code_usages = true +} + +// PromoCodeUsagesCleared reports if the "promo_code_usages" edge to the PromoCodeUsage entity was cleared. +func (m *UserMutation) PromoCodeUsagesCleared() bool { + return m.clearedpromo_code_usages +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (m *UserMutation) RemovePromoCodeUsageIDs(ids ...int64) { + if m.removedpromo_code_usages == nil { + m.removedpromo_code_usages = make(map[int64]struct{}) + } + for i := range ids { + delete(m.promo_code_usages, ids[i]) + m.removedpromo_code_usages[ids[i]] = struct{}{} + } +} + +// RemovedPromoCodeUsages returns the removed IDs of the "promo_code_usages" edge to the PromoCodeUsage entity. +func (m *UserMutation) RemovedPromoCodeUsagesIDs() (ids []int64) { + for id := range m.removedpromo_code_usages { + ids = append(ids, id) + } + return +} + +// PromoCodeUsagesIDs returns the "promo_code_usages" edge IDs in the mutation. +func (m *UserMutation) PromoCodeUsagesIDs() (ids []int64) { + for id := range m.promo_code_usages { + ids = append(ids, id) + } + return +} + +// ResetPromoCodeUsages resets all changes to the "promo_code_usages" edge. +func (m *UserMutation) ResetPromoCodeUsages() { + m.promo_code_usages = nil + m.clearedpromo_code_usages = false + m.removedpromo_code_usages = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, user.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, user.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, user.FieldDeletedAt) + } + if m.email != nil { + fields = append(fields, user.FieldEmail) + } + if m.password_hash != nil { + fields = append(fields, user.FieldPasswordHash) + } + if m.role != nil { + fields = append(fields, user.FieldRole) + } + if m.balance != nil { + fields = append(fields, user.FieldBalance) + } + if m.concurrency != nil { + fields = append(fields, user.FieldConcurrency) + } + if m.status != nil { + fields = append(fields, user.FieldStatus) + } + if m.username != nil { + fields = append(fields, user.FieldUsername) + } + if m.notes != nil { + fields = append(fields, user.FieldNotes) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldCreatedAt: + return m.CreatedAt() + case user.FieldUpdatedAt: + return m.UpdatedAt() + case user.FieldDeletedAt: + return m.DeletedAt() + case user.FieldEmail: + return m.Email() + case user.FieldPasswordHash: + return m.PasswordHash() + case user.FieldRole: + return m.Role() + case user.FieldBalance: + return m.Balance() + case user.FieldConcurrency: + return m.Concurrency() + case user.FieldStatus: + return m.Status() + case user.FieldUsername: + return m.Username() + case user.FieldNotes: + return m.Notes() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case user.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case user.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case user.FieldEmail: + return m.OldEmail(ctx) + case user.FieldPasswordHash: + return m.OldPasswordHash(ctx) + case user.FieldRole: + return m.OldRole(ctx) + case user.FieldBalance: + return m.OldBalance(ctx) + case user.FieldConcurrency: + return m.OldConcurrency(ctx) + case user.FieldStatus: + return m.OldStatus(ctx) + case user.FieldUsername: + return m.OldUsername(ctx) + case user.FieldNotes: + return m.OldNotes(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case user.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case user.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case user.FieldEmail: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEmail(v) + return nil + case user.FieldPasswordHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPasswordHash(v) + return nil + case user.FieldRole: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRole(v) + return nil + case user.FieldBalance: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBalance(v) + return nil + case user.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetConcurrency(v) + return nil + case user.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case user.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case user.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + var fields []string + if m.addbalance != nil { + fields = append(fields, user.FieldBalance) + } + if m.addconcurrency != nil { + fields = append(fields, user.FieldConcurrency) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case user.FieldBalance: + return m.AddedBalance() + case user.FieldConcurrency: + return m.AddedConcurrency() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + case user.FieldBalance: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBalance(v) + return nil + case user.FieldConcurrency: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddConcurrency(v) + return nil + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(user.FieldDeletedAt) { + fields = append(fields, user.FieldDeletedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + switch name { + case user.FieldDeletedAt: + m.ClearDeletedAt() + return nil + } + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case user.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case user.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case user.FieldEmail: + m.ResetEmail() + return nil + case user.FieldPasswordHash: + m.ResetPasswordHash() + return nil + case user.FieldRole: + m.ResetRole() + return nil + case user.FieldBalance: + m.ResetBalance() + return nil + case user.FieldConcurrency: + m.ResetConcurrency() + return nil + case user.FieldStatus: + m.ResetStatus() + return nil + case user.FieldUsername: + m.ResetUsername() + return nil + case user.FieldNotes: + m.ResetNotes() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 8) + if m.api_keys != nil { + edges = append(edges, user.EdgeAPIKeys) + } + if m.redeem_codes != nil { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.subscriptions != nil { + edges = append(edges, user.EdgeSubscriptions) + } + if m.assigned_subscriptions != nil { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.allowed_groups != nil { + edges = append(edges, user.EdgeAllowedGroups) + } + if m.usage_logs != nil { + edges = append(edges, user.EdgeUsageLogs) + } + if m.attribute_values != nil { + edges = append(edges, user.EdgeAttributeValues) + } + if m.promo_code_usages != nil { + edges = append(edges, user.EdgePromoCodeUsages) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + switch name { + case user.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.api_keys)) + for id := range m.api_keys { + ids = append(ids, id) + } + return ids + case user.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.redeem_codes)) + for id := range m.redeem_codes { + ids = append(ids, id) + } + return ids + case user.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.subscriptions)) + for id := range m.subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAssignedSubscriptions: + ids := make([]ent.Value, 0, len(m.assigned_subscriptions)) + for id := range m.assigned_subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAllowedGroups: + ids := make([]ent.Value, 0, len(m.allowed_groups)) + for id := range m.allowed_groups { + ids = append(ids, id) + } + return ids + case user.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.usage_logs)) + for id := range m.usage_logs { + ids = append(ids, id) + } + return ids + case user.EdgeAttributeValues: + ids := make([]ent.Value, 0, len(m.attribute_values)) + for id := range m.attribute_values { + ids = append(ids, id) + } + return ids + case user.EdgePromoCodeUsages: + ids := make([]ent.Value, 0, len(m.promo_code_usages)) + for id := range m.promo_code_usages { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 8) + if m.removedapi_keys != nil { + edges = append(edges, user.EdgeAPIKeys) + } + if m.removedredeem_codes != nil { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.removedsubscriptions != nil { + edges = append(edges, user.EdgeSubscriptions) + } + if m.removedassigned_subscriptions != nil { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.removedallowed_groups != nil { + edges = append(edges, user.EdgeAllowedGroups) + } + if m.removedusage_logs != nil { + edges = append(edges, user.EdgeUsageLogs) + } + if m.removedattribute_values != nil { + edges = append(edges, user.EdgeAttributeValues) + } + if m.removedpromo_code_usages != nil { + edges = append(edges, user.EdgePromoCodeUsages) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + switch name { + case user.EdgeAPIKeys: + ids := make([]ent.Value, 0, len(m.removedapi_keys)) + for id := range m.removedapi_keys { + ids = append(ids, id) + } + return ids + case user.EdgeRedeemCodes: + ids := make([]ent.Value, 0, len(m.removedredeem_codes)) + for id := range m.removedredeem_codes { + ids = append(ids, id) + } + return ids + case user.EdgeSubscriptions: + ids := make([]ent.Value, 0, len(m.removedsubscriptions)) + for id := range m.removedsubscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAssignedSubscriptions: + ids := make([]ent.Value, 0, len(m.removedassigned_subscriptions)) + for id := range m.removedassigned_subscriptions { + ids = append(ids, id) + } + return ids + case user.EdgeAllowedGroups: + ids := make([]ent.Value, 0, len(m.removedallowed_groups)) + for id := range m.removedallowed_groups { + ids = append(ids, id) + } + return ids + case user.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.removedusage_logs)) + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return ids + case user.EdgeAttributeValues: + ids := make([]ent.Value, 0, len(m.removedattribute_values)) + for id := range m.removedattribute_values { + ids = append(ids, id) + } + return ids + case user.EdgePromoCodeUsages: + ids := make([]ent.Value, 0, len(m.removedpromo_code_usages)) + for id := range m.removedpromo_code_usages { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 8) + if m.clearedapi_keys { + edges = append(edges, user.EdgeAPIKeys) + } + if m.clearedredeem_codes { + edges = append(edges, user.EdgeRedeemCodes) + } + if m.clearedsubscriptions { + edges = append(edges, user.EdgeSubscriptions) + } + if m.clearedassigned_subscriptions { + edges = append(edges, user.EdgeAssignedSubscriptions) + } + if m.clearedallowed_groups { + edges = append(edges, user.EdgeAllowedGroups) + } + if m.clearedusage_logs { + edges = append(edges, user.EdgeUsageLogs) + } + if m.clearedattribute_values { + edges = append(edges, user.EdgeAttributeValues) + } + if m.clearedpromo_code_usages { + edges = append(edges, user.EdgePromoCodeUsages) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + switch name { + case user.EdgeAPIKeys: + return m.clearedapi_keys + case user.EdgeRedeemCodes: + return m.clearedredeem_codes + case user.EdgeSubscriptions: + return m.clearedsubscriptions + case user.EdgeAssignedSubscriptions: + return m.clearedassigned_subscriptions + case user.EdgeAllowedGroups: + return m.clearedallowed_groups + case user.EdgeUsageLogs: + return m.clearedusage_logs + case user.EdgeAttributeValues: + return m.clearedattribute_values + case user.EdgePromoCodeUsages: + return m.clearedpromo_code_usages + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + switch name { + case user.EdgeAPIKeys: + m.ResetAPIKeys() + return nil + case user.EdgeRedeemCodes: + m.ResetRedeemCodes() + return nil + case user.EdgeSubscriptions: + m.ResetSubscriptions() + return nil + case user.EdgeAssignedSubscriptions: + m.ResetAssignedSubscriptions() + return nil + case user.EdgeAllowedGroups: + m.ResetAllowedGroups() + return nil + case user.EdgeUsageLogs: + m.ResetUsageLogs() + return nil + case user.EdgeAttributeValues: + m.ResetAttributeValues() + return nil + case user.EdgePromoCodeUsages: + m.ResetPromoCodeUsages() + return nil + } + return fmt.Errorf("unknown User edge %s", name) +} + +// UserAllowedGroupMutation represents an operation that mutates the UserAllowedGroup nodes in the graph. +type UserAllowedGroupMutation struct { + config + op Op + typ string + created_at *time.Time + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + done bool + oldValue func(context.Context) (*UserAllowedGroup, error) + predicates []predicate.UserAllowedGroup +} + +var _ ent.Mutation = (*UserAllowedGroupMutation)(nil) + +// userallowedgroupOption allows management of the mutation configuration using functional options. +type userallowedgroupOption func(*UserAllowedGroupMutation) + +// newUserAllowedGroupMutation creates new mutation for the UserAllowedGroup entity. +func newUserAllowedGroupMutation(c config, op Op, opts ...userallowedgroupOption) *UserAllowedGroupMutation { + m := &UserAllowedGroupMutation{ + config: c, + op: op, + typ: TypeUserAllowedGroup, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAllowedGroupMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAllowedGroupMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetUserID sets the "user_id" field. +func (m *UserAllowedGroupMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserAllowedGroupMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserAllowedGroupMutation) ResetUserID() { + m.user = nil +} + +// SetGroupID sets the "group_id" field. +func (m *UserAllowedGroupMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *UserAllowedGroupMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *UserAllowedGroupMutation) ResetGroupID() { + m.group = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAllowedGroupMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAllowedGroupMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAllowedGroupMutation) ResetCreatedAt() { + m.created_at = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserAllowedGroupMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[userallowedgroup.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserAllowedGroupMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserAllowedGroupMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserAllowedGroupMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *UserAllowedGroupMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[userallowedgroup.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *UserAllowedGroupMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *UserAllowedGroupMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *UserAllowedGroupMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// Where appends a list predicates to the UserAllowedGroupMutation builder. +func (m *UserAllowedGroupMutation) Where(ps ...predicate.UserAllowedGroup) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAllowedGroupMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAllowedGroupMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAllowedGroup, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAllowedGroupMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAllowedGroupMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAllowedGroup). +func (m *UserAllowedGroupMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAllowedGroupMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.user != nil { + fields = append(fields, userallowedgroup.FieldUserID) + } + if m.group != nil { + fields = append(fields, userallowedgroup.FieldGroupID) + } + if m.created_at != nil { + fields = append(fields, userallowedgroup.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAllowedGroupMutation) Field(name string) (ent.Value, bool) { + switch name { + case userallowedgroup.FieldUserID: + return m.UserID() + case userallowedgroup.FieldGroupID: + return m.GroupID() + case userallowedgroup.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAllowedGroupMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + return nil, errors.New("edge schema UserAllowedGroup does not support getting old values") +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAllowedGroupMutation) SetField(name string, value ent.Value) error { + switch name { + case userallowedgroup.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case userallowedgroup.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case userallowedgroup.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown UserAllowedGroup field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAllowedGroupMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAllowedGroupMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAllowedGroupMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown UserAllowedGroup numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAllowedGroupMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAllowedGroupMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAllowedGroupMutation) ClearField(name string) error { + return fmt.Errorf("unknown UserAllowedGroup nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAllowedGroupMutation) ResetField(name string) error { + switch name { + case userallowedgroup.FieldUserID: + m.ResetUserID() + return nil + case userallowedgroup.FieldGroupID: + m.ResetGroupID() + return nil + case userallowedgroup.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAllowedGroupMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, userallowedgroup.EdgeUser) + } + if m.group != nil { + edges = append(edges, userallowedgroup.EdgeGroup) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAllowedGroupMutation) AddedIDs(name string) []ent.Value { + switch name { + case userallowedgroup.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case userallowedgroup.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAllowedGroupMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAllowedGroupMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAllowedGroupMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, userallowedgroup.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, userallowedgroup.EdgeGroup) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAllowedGroupMutation) EdgeCleared(name string) bool { + switch name { + case userallowedgroup.EdgeUser: + return m.cleareduser + case userallowedgroup.EdgeGroup: + return m.clearedgroup + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAllowedGroupMutation) ClearEdge(name string) error { + switch name { + case userallowedgroup.EdgeUser: + m.ClearUser() + return nil + case userallowedgroup.EdgeGroup: + m.ClearGroup() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAllowedGroupMutation) ResetEdge(name string) error { + switch name { + case userallowedgroup.EdgeUser: + m.ResetUser() + return nil + case userallowedgroup.EdgeGroup: + m.ResetGroup() + return nil + } + return fmt.Errorf("unknown UserAllowedGroup edge %s", name) +} + +// UserAttributeDefinitionMutation represents an operation that mutates the UserAttributeDefinition nodes in the graph. +type UserAttributeDefinitionMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + key *string + name *string + description *string + _type *string + options *[]map[string]interface{} + appendoptions []map[string]interface{} + required *bool + validation *map[string]interface{} + placeholder *string + display_order *int + adddisplay_order *int + enabled *bool + clearedFields map[string]struct{} + values map[int64]struct{} + removedvalues map[int64]struct{} + clearedvalues bool + done bool + oldValue func(context.Context) (*UserAttributeDefinition, error) + predicates []predicate.UserAttributeDefinition +} + +var _ ent.Mutation = (*UserAttributeDefinitionMutation)(nil) + +// userattributedefinitionOption allows management of the mutation configuration using functional options. +type userattributedefinitionOption func(*UserAttributeDefinitionMutation) + +// newUserAttributeDefinitionMutation creates new mutation for the UserAttributeDefinition entity. +func newUserAttributeDefinitionMutation(c config, op Op, opts ...userattributedefinitionOption) *UserAttributeDefinitionMutation { + m := &UserAttributeDefinitionMutation{ + config: c, + op: op, + typ: TypeUserAttributeDefinition, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserAttributeDefinitionID sets the ID field of the mutation. +func withUserAttributeDefinitionID(id int64) userattributedefinitionOption { + return func(m *UserAttributeDefinitionMutation) { + var ( + err error + once sync.Once + value *UserAttributeDefinition + ) + m.oldValue = func(ctx context.Context) (*UserAttributeDefinition, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserAttributeDefinition.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserAttributeDefinition sets the old UserAttributeDefinition of the mutation. +func withUserAttributeDefinition(node *UserAttributeDefinition) userattributedefinitionOption { + return func(m *UserAttributeDefinitionMutation) { + m.oldValue = func(context.Context) (*UserAttributeDefinition, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAttributeDefinitionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAttributeDefinitionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserAttributeDefinitionMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserAttributeDefinitionMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserAttributeDefinition.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAttributeDefinitionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAttributeDefinitionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserAttributeDefinitionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserAttributeDefinitionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *UserAttributeDefinitionMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[userattributedefinition.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *UserAttributeDefinitionMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[userattributedefinition.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *UserAttributeDefinitionMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, userattributedefinition.FieldDeletedAt) +} + +// SetKey sets the "key" field. +func (m *UserAttributeDefinitionMutation) SetKey(s string) { + m.key = &s +} + +// Key returns the value of the "key" field in the mutation. +func (m *UserAttributeDefinitionMutation) Key() (r string, exists bool) { + v := m.key + if v == nil { + return + } + return *v, true +} + +// OldKey returns the old "key" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKey: %w", err) + } + return oldValue.Key, nil +} + +// ResetKey resets all changes to the "key" field. +func (m *UserAttributeDefinitionMutation) ResetKey() { + m.key = nil +} + +// SetName sets the "name" field. +func (m *UserAttributeDefinitionMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *UserAttributeDefinitionMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *UserAttributeDefinitionMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *UserAttributeDefinitionMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *UserAttributeDefinitionMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ResetDescription resets all changes to the "description" field. +func (m *UserAttributeDefinitionMutation) ResetDescription() { + m.description = nil +} + +// SetType sets the "type" field. +func (m *UserAttributeDefinitionMutation) SetType(s string) { + m._type = &s +} + +// GetType returns the value of the "type" field in the mutation. +func (m *UserAttributeDefinitionMutation) GetType() (r string, exists bool) { + v := m._type + if v == nil { + return + } + return *v, true +} + +// OldType returns the old "type" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldType: %w", err) + } + return oldValue.Type, nil +} + +// ResetType resets all changes to the "type" field. +func (m *UserAttributeDefinitionMutation) ResetType() { + m._type = nil +} + +// SetOptions sets the "options" field. +func (m *UserAttributeDefinitionMutation) SetOptions(value []map[string]interface{}) { + m.options = &value + m.appendoptions = nil +} + +// Options returns the value of the "options" field in the mutation. +func (m *UserAttributeDefinitionMutation) Options() (r []map[string]interface{}, exists bool) { + v := m.options + if v == nil { + return + } + return *v, true +} + +// OldOptions returns the old "options" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldOptions(ctx context.Context) (v []map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOptions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOptions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOptions: %w", err) + } + return oldValue.Options, nil +} + +// AppendOptions adds value to the "options" field. +func (m *UserAttributeDefinitionMutation) AppendOptions(value []map[string]interface{}) { + m.appendoptions = append(m.appendoptions, value...) +} + +// AppendedOptions returns the list of values that were appended to the "options" field in this mutation. +func (m *UserAttributeDefinitionMutation) AppendedOptions() ([]map[string]interface{}, bool) { + if len(m.appendoptions) == 0 { + return nil, false + } + return m.appendoptions, true +} + +// ResetOptions resets all changes to the "options" field. +func (m *UserAttributeDefinitionMutation) ResetOptions() { + m.options = nil + m.appendoptions = nil +} + +// SetRequired sets the "required" field. +func (m *UserAttributeDefinitionMutation) SetRequired(b bool) { + m.required = &b +} + +// Required returns the value of the "required" field in the mutation. +func (m *UserAttributeDefinitionMutation) Required() (r bool, exists bool) { + v := m.required + if v == nil { + return + } + return *v, true +} + +// OldRequired returns the old "required" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldRequired(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRequired is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRequired requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRequired: %w", err) + } + return oldValue.Required, nil +} + +// ResetRequired resets all changes to the "required" field. +func (m *UserAttributeDefinitionMutation) ResetRequired() { + m.required = nil +} + +// SetValidation sets the "validation" field. +func (m *UserAttributeDefinitionMutation) SetValidation(value map[string]interface{}) { + m.validation = &value +} + +// Validation returns the value of the "validation" field in the mutation. +func (m *UserAttributeDefinitionMutation) Validation() (r map[string]interface{}, exists bool) { + v := m.validation + if v == nil { + return + } + return *v, true +} + +// OldValidation returns the old "validation" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldValidation(ctx context.Context) (v map[string]interface{}, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValidation is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValidation requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValidation: %w", err) + } + return oldValue.Validation, nil +} + +// ResetValidation resets all changes to the "validation" field. +func (m *UserAttributeDefinitionMutation) ResetValidation() { + m.validation = nil +} + +// SetPlaceholder sets the "placeholder" field. +func (m *UserAttributeDefinitionMutation) SetPlaceholder(s string) { + m.placeholder = &s +} + +// Placeholder returns the value of the "placeholder" field in the mutation. +func (m *UserAttributeDefinitionMutation) Placeholder() (r string, exists bool) { + v := m.placeholder + if v == nil { + return + } + return *v, true +} + +// OldPlaceholder returns the old "placeholder" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldPlaceholder(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPlaceholder is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPlaceholder requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPlaceholder: %w", err) + } + return oldValue.Placeholder, nil +} + +// ResetPlaceholder resets all changes to the "placeholder" field. +func (m *UserAttributeDefinitionMutation) ResetPlaceholder() { + m.placeholder = nil +} + +// SetDisplayOrder sets the "display_order" field. +func (m *UserAttributeDefinitionMutation) SetDisplayOrder(i int) { + m.display_order = &i + m.adddisplay_order = nil +} + +// DisplayOrder returns the value of the "display_order" field in the mutation. +func (m *UserAttributeDefinitionMutation) DisplayOrder() (r int, exists bool) { + v := m.display_order + if v == nil { + return + } + return *v, true +} + +// OldDisplayOrder returns the old "display_order" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldDisplayOrder(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDisplayOrder is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDisplayOrder requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDisplayOrder: %w", err) + } + return oldValue.DisplayOrder, nil +} + +// AddDisplayOrder adds i to the "display_order" field. +func (m *UserAttributeDefinitionMutation) AddDisplayOrder(i int) { + if m.adddisplay_order != nil { + *m.adddisplay_order += i + } else { + m.adddisplay_order = &i + } +} + +// AddedDisplayOrder returns the value that was added to the "display_order" field in this mutation. +func (m *UserAttributeDefinitionMutation) AddedDisplayOrder() (r int, exists bool) { + v := m.adddisplay_order + if v == nil { + return + } + return *v, true +} + +// ResetDisplayOrder resets all changes to the "display_order" field. +func (m *UserAttributeDefinitionMutation) ResetDisplayOrder() { + m.display_order = nil + m.adddisplay_order = nil +} + +// SetEnabled sets the "enabled" field. +func (m *UserAttributeDefinitionMutation) SetEnabled(b bool) { + m.enabled = &b +} + +// Enabled returns the value of the "enabled" field in the mutation. +func (m *UserAttributeDefinitionMutation) Enabled() (r bool, exists bool) { + v := m.enabled + if v == nil { + return + } + return *v, true +} + +// OldEnabled returns the old "enabled" field's value of the UserAttributeDefinition entity. +// If the UserAttributeDefinition object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeDefinitionMutation) OldEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEnabled: %w", err) + } + return oldValue.Enabled, nil +} + +// ResetEnabled resets all changes to the "enabled" field. +func (m *UserAttributeDefinitionMutation) ResetEnabled() { + m.enabled = nil +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by ids. +func (m *UserAttributeDefinitionMutation) AddValueIDs(ids ...int64) { + if m.values == nil { + m.values = make(map[int64]struct{}) + } + for i := range ids { + m.values[ids[i]] = struct{}{} + } +} + +// ClearValues clears the "values" edge to the UserAttributeValue entity. +func (m *UserAttributeDefinitionMutation) ClearValues() { + m.clearedvalues = true +} + +// ValuesCleared reports if the "values" edge to the UserAttributeValue entity was cleared. +func (m *UserAttributeDefinitionMutation) ValuesCleared() bool { + return m.clearedvalues +} + +// RemoveValueIDs removes the "values" edge to the UserAttributeValue entity by IDs. +func (m *UserAttributeDefinitionMutation) RemoveValueIDs(ids ...int64) { + if m.removedvalues == nil { + m.removedvalues = make(map[int64]struct{}) + } + for i := range ids { + delete(m.values, ids[i]) + m.removedvalues[ids[i]] = struct{}{} + } +} + +// RemovedValues returns the removed IDs of the "values" edge to the UserAttributeValue entity. +func (m *UserAttributeDefinitionMutation) RemovedValuesIDs() (ids []int64) { + for id := range m.removedvalues { + ids = append(ids, id) + } + return +} + +// ValuesIDs returns the "values" edge IDs in the mutation. +func (m *UserAttributeDefinitionMutation) ValuesIDs() (ids []int64) { + for id := range m.values { + ids = append(ids, id) + } + return +} + +// ResetValues resets all changes to the "values" edge. +func (m *UserAttributeDefinitionMutation) ResetValues() { + m.values = nil + m.clearedvalues = false + m.removedvalues = nil +} + +// Where appends a list predicates to the UserAttributeDefinitionMutation builder. +func (m *UserAttributeDefinitionMutation) Where(ps ...predicate.UserAttributeDefinition) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAttributeDefinitionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAttributeDefinitionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAttributeDefinition, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAttributeDefinitionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAttributeDefinitionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAttributeDefinition). +func (m *UserAttributeDefinitionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAttributeDefinitionMutation) Fields() []string { + fields := make([]string, 0, 13) + if m.created_at != nil { + fields = append(fields, userattributedefinition.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, userattributedefinition.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, userattributedefinition.FieldDeletedAt) + } + if m.key != nil { + fields = append(fields, userattributedefinition.FieldKey) + } + if m.name != nil { + fields = append(fields, userattributedefinition.FieldName) + } + if m.description != nil { + fields = append(fields, userattributedefinition.FieldDescription) + } + if m._type != nil { + fields = append(fields, userattributedefinition.FieldType) + } + if m.options != nil { + fields = append(fields, userattributedefinition.FieldOptions) + } + if m.required != nil { + fields = append(fields, userattributedefinition.FieldRequired) + } + if m.validation != nil { + fields = append(fields, userattributedefinition.FieldValidation) + } + if m.placeholder != nil { + fields = append(fields, userattributedefinition.FieldPlaceholder) + } + if m.display_order != nil { + fields = append(fields, userattributedefinition.FieldDisplayOrder) + } + if m.enabled != nil { + fields = append(fields, userattributedefinition.FieldEnabled) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAttributeDefinitionMutation) Field(name string) (ent.Value, bool) { + switch name { + case userattributedefinition.FieldCreatedAt: + return m.CreatedAt() + case userattributedefinition.FieldUpdatedAt: + return m.UpdatedAt() + case userattributedefinition.FieldDeletedAt: + return m.DeletedAt() + case userattributedefinition.FieldKey: + return m.Key() + case userattributedefinition.FieldName: + return m.Name() + case userattributedefinition.FieldDescription: + return m.Description() + case userattributedefinition.FieldType: + return m.GetType() + case userattributedefinition.FieldOptions: + return m.Options() + case userattributedefinition.FieldRequired: + return m.Required() + case userattributedefinition.FieldValidation: + return m.Validation() + case userattributedefinition.FieldPlaceholder: + return m.Placeholder() + case userattributedefinition.FieldDisplayOrder: + return m.DisplayOrder() + case userattributedefinition.FieldEnabled: + return m.Enabled() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAttributeDefinitionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case userattributedefinition.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case userattributedefinition.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case userattributedefinition.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case userattributedefinition.FieldKey: + return m.OldKey(ctx) + case userattributedefinition.FieldName: + return m.OldName(ctx) + case userattributedefinition.FieldDescription: + return m.OldDescription(ctx) + case userattributedefinition.FieldType: + return m.OldType(ctx) + case userattributedefinition.FieldOptions: + return m.OldOptions(ctx) + case userattributedefinition.FieldRequired: + return m.OldRequired(ctx) + case userattributedefinition.FieldValidation: + return m.OldValidation(ctx) + case userattributedefinition.FieldPlaceholder: + return m.OldPlaceholder(ctx) + case userattributedefinition.FieldDisplayOrder: + return m.OldDisplayOrder(ctx) + case userattributedefinition.FieldEnabled: + return m.OldEnabled(ctx) + } + return nil, fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeDefinitionMutation) SetField(name string, value ent.Value) error { + switch name { + case userattributedefinition.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case userattributedefinition.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case userattributedefinition.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case userattributedefinition.FieldKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKey(v) + return nil + case userattributedefinition.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case userattributedefinition.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case userattributedefinition.FieldType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetType(v) + return nil + case userattributedefinition.FieldOptions: + v, ok := value.([]map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOptions(v) + return nil + case userattributedefinition.FieldRequired: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRequired(v) + return nil + case userattributedefinition.FieldValidation: + v, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValidation(v) + return nil + case userattributedefinition.FieldPlaceholder: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPlaceholder(v) + return nil + case userattributedefinition.FieldDisplayOrder: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDisplayOrder(v) + return nil + case userattributedefinition.FieldEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEnabled(v) + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAttributeDefinitionMutation) AddedFields() []string { + var fields []string + if m.adddisplay_order != nil { + fields = append(fields, userattributedefinition.FieldDisplayOrder) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAttributeDefinitionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case userattributedefinition.FieldDisplayOrder: + return m.AddedDisplayOrder() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeDefinitionMutation) AddField(name string, value ent.Value) error { + switch name { + case userattributedefinition.FieldDisplayOrder: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDisplayOrder(v) + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAttributeDefinitionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(userattributedefinition.FieldDeletedAt) { + fields = append(fields, userattributedefinition.FieldDeletedAt) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAttributeDefinitionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ClearField(name string) error { + switch name { + case userattributedefinition.FieldDeletedAt: + m.ClearDeletedAt() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ResetField(name string) error { + switch name { + case userattributedefinition.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case userattributedefinition.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case userattributedefinition.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case userattributedefinition.FieldKey: + m.ResetKey() + return nil + case userattributedefinition.FieldName: + m.ResetName() + return nil + case userattributedefinition.FieldDescription: + m.ResetDescription() + return nil + case userattributedefinition.FieldType: + m.ResetType() + return nil + case userattributedefinition.FieldOptions: + m.ResetOptions() + return nil + case userattributedefinition.FieldRequired: + m.ResetRequired() + return nil + case userattributedefinition.FieldValidation: + m.ResetValidation() + return nil + case userattributedefinition.FieldPlaceholder: + m.ResetPlaceholder() + return nil + case userattributedefinition.FieldDisplayOrder: + m.ResetDisplayOrder() + return nil + case userattributedefinition.FieldEnabled: + m.ResetEnabled() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAttributeDefinitionMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.values != nil { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAttributeDefinitionMutation) AddedIDs(name string) []ent.Value { + switch name { + case userattributedefinition.EdgeValues: + ids := make([]ent.Value, 0, len(m.values)) + for id := range m.values { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAttributeDefinitionMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedvalues != nil { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAttributeDefinitionMutation) RemovedIDs(name string) []ent.Value { + switch name { + case userattributedefinition.EdgeValues: + ids := make([]ent.Value, 0, len(m.removedvalues)) + for id := range m.removedvalues { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAttributeDefinitionMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedvalues { + edges = append(edges, userattributedefinition.EdgeValues) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAttributeDefinitionMutation) EdgeCleared(name string) bool { + switch name { + case userattributedefinition.EdgeValues: + return m.clearedvalues + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown UserAttributeDefinition unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAttributeDefinitionMutation) ResetEdge(name string) error { + switch name { + case userattributedefinition.EdgeValues: + m.ResetValues() + return nil + } + return fmt.Errorf("unknown UserAttributeDefinition edge %s", name) +} + +// UserAttributeValueMutation represents an operation that mutates the UserAttributeValue nodes in the graph. +type UserAttributeValueMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + value *string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + definition *int64 + cleareddefinition bool + done bool + oldValue func(context.Context) (*UserAttributeValue, error) + predicates []predicate.UserAttributeValue +} + +var _ ent.Mutation = (*UserAttributeValueMutation)(nil) + +// userattributevalueOption allows management of the mutation configuration using functional options. +type userattributevalueOption func(*UserAttributeValueMutation) + +// newUserAttributeValueMutation creates new mutation for the UserAttributeValue entity. +func newUserAttributeValueMutation(c config, op Op, opts ...userattributevalueOption) *UserAttributeValueMutation { + m := &UserAttributeValueMutation{ + config: c, + op: op, + typ: TypeUserAttributeValue, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserAttributeValueID sets the ID field of the mutation. +func withUserAttributeValueID(id int64) userattributevalueOption { + return func(m *UserAttributeValueMutation) { + var ( + err error + once sync.Once + value *UserAttributeValue + ) + m.oldValue = func(ctx context.Context) (*UserAttributeValue, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserAttributeValue.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserAttributeValue sets the old UserAttributeValue of the mutation. +func withUserAttributeValue(node *UserAttributeValue) userattributevalueOption { + return func(m *UserAttributeValueMutation) { + m.oldValue = func(context.Context) (*UserAttributeValue, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserAttributeValueMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserAttributeValueMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserAttributeValueMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserAttributeValueMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserAttributeValue.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserAttributeValueMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserAttributeValueMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserAttributeValueMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserAttributeValueMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserAttributeValueMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserAttributeValueMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUserID sets the "user_id" field. +func (m *UserAttributeValueMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserAttributeValueMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserAttributeValueMutation) ResetUserID() { + m.user = nil +} + +// SetAttributeID sets the "attribute_id" field. +func (m *UserAttributeValueMutation) SetAttributeID(i int64) { + m.definition = &i +} + +// AttributeID returns the value of the "attribute_id" field in the mutation. +func (m *UserAttributeValueMutation) AttributeID() (r int64, exists bool) { + v := m.definition + if v == nil { + return + } + return *v, true +} + +// OldAttributeID returns the old "attribute_id" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldAttributeID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAttributeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAttributeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAttributeID: %w", err) + } + return oldValue.AttributeID, nil +} + +// ResetAttributeID resets all changes to the "attribute_id" field. +func (m *UserAttributeValueMutation) ResetAttributeID() { + m.definition = nil +} + +// SetValue sets the "value" field. +func (m *UserAttributeValueMutation) SetValue(s string) { + m.value = &s +} + +// Value returns the value of the "value" field in the mutation. +func (m *UserAttributeValueMutation) Value() (r string, exists bool) { + v := m.value + if v == nil { + return + } + return *v, true +} + +// OldValue returns the old "value" field's value of the UserAttributeValue entity. +// If the UserAttributeValue object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserAttributeValueMutation) OldValue(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldValue is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldValue requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldValue: %w", err) + } + return oldValue.Value, nil +} + +// ResetValue resets all changes to the "value" field. +func (m *UserAttributeValueMutation) ResetValue() { + m.value = nil +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserAttributeValueMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[userattributevalue.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserAttributeValueMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserAttributeValueMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserAttributeValueMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by id. +func (m *UserAttributeValueMutation) SetDefinitionID(id int64) { + m.definition = &id +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (m *UserAttributeValueMutation) ClearDefinition() { + m.cleareddefinition = true + m.clearedFields[userattributevalue.FieldAttributeID] = struct{}{} +} + +// DefinitionCleared reports if the "definition" edge to the UserAttributeDefinition entity was cleared. +func (m *UserAttributeValueMutation) DefinitionCleared() bool { + return m.cleareddefinition +} + +// DefinitionID returns the "definition" edge ID in the mutation. +func (m *UserAttributeValueMutation) DefinitionID() (id int64, exists bool) { + if m.definition != nil { + return *m.definition, true + } + return +} + +// DefinitionIDs returns the "definition" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// DefinitionID instead. It exists only for internal usage by the builders. +func (m *UserAttributeValueMutation) DefinitionIDs() (ids []int64) { + if id := m.definition; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetDefinition resets all changes to the "definition" edge. +func (m *UserAttributeValueMutation) ResetDefinition() { + m.definition = nil + m.cleareddefinition = false +} + +// Where appends a list predicates to the UserAttributeValueMutation builder. +func (m *UserAttributeValueMutation) Where(ps ...predicate.UserAttributeValue) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserAttributeValueMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserAttributeValueMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserAttributeValue, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserAttributeValueMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserAttributeValueMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserAttributeValue). +func (m *UserAttributeValueMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserAttributeValueMutation) Fields() []string { + fields := make([]string, 0, 5) + if m.created_at != nil { + fields = append(fields, userattributevalue.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, userattributevalue.FieldUpdatedAt) + } + if m.user != nil { + fields = append(fields, userattributevalue.FieldUserID) + } + if m.definition != nil { + fields = append(fields, userattributevalue.FieldAttributeID) + } + if m.value != nil { + fields = append(fields, userattributevalue.FieldValue) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserAttributeValueMutation) Field(name string) (ent.Value, bool) { + switch name { + case userattributevalue.FieldCreatedAt: + return m.CreatedAt() + case userattributevalue.FieldUpdatedAt: + return m.UpdatedAt() + case userattributevalue.FieldUserID: + return m.UserID() + case userattributevalue.FieldAttributeID: + return m.AttributeID() + case userattributevalue.FieldValue: + return m.Value() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserAttributeValueMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case userattributevalue.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case userattributevalue.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case userattributevalue.FieldUserID: + return m.OldUserID(ctx) + case userattributevalue.FieldAttributeID: + return m.OldAttributeID(ctx) + case userattributevalue.FieldValue: + return m.OldValue(ctx) + } + return nil, fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeValueMutation) SetField(name string, value ent.Value) error { + switch name { + case userattributevalue.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case userattributevalue.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case userattributevalue.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case userattributevalue.FieldAttributeID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAttributeID(v) + return nil + case userattributevalue.FieldValue: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetValue(v) + return nil + } + return fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserAttributeValueMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserAttributeValueMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserAttributeValueMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown UserAttributeValue numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserAttributeValueMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserAttributeValueMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserAttributeValueMutation) ClearField(name string) error { + return fmt.Errorf("unknown UserAttributeValue nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserAttributeValueMutation) ResetField(name string) error { + switch name { + case userattributevalue.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case userattributevalue.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case userattributevalue.FieldUserID: + m.ResetUserID() + return nil + case userattributevalue.FieldAttributeID: + m.ResetAttributeID() + return nil + case userattributevalue.FieldValue: + m.ResetValue() + return nil + } + return fmt.Errorf("unknown UserAttributeValue field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserAttributeValueMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.user != nil { + edges = append(edges, userattributevalue.EdgeUser) + } + if m.definition != nil { + edges = append(edges, userattributevalue.EdgeDefinition) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserAttributeValueMutation) AddedIDs(name string) []ent.Value { + switch name { + case userattributevalue.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case userattributevalue.EdgeDefinition: + if id := m.definition; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserAttributeValueMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserAttributeValueMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserAttributeValueMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.cleareduser { + edges = append(edges, userattributevalue.EdgeUser) + } + if m.cleareddefinition { + edges = append(edges, userattributevalue.EdgeDefinition) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserAttributeValueMutation) EdgeCleared(name string) bool { + switch name { + case userattributevalue.EdgeUser: + return m.cleareduser + case userattributevalue.EdgeDefinition: + return m.cleareddefinition + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserAttributeValueMutation) ClearEdge(name string) error { + switch name { + case userattributevalue.EdgeUser: + m.ClearUser() + return nil + case userattributevalue.EdgeDefinition: + m.ClearDefinition() + return nil + } + return fmt.Errorf("unknown UserAttributeValue unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserAttributeValueMutation) ResetEdge(name string) error { + switch name { + case userattributevalue.EdgeUser: + m.ResetUser() + return nil + case userattributevalue.EdgeDefinition: + m.ResetDefinition() + return nil + } + return fmt.Errorf("unknown UserAttributeValue edge %s", name) +} + +// UserSubscriptionMutation represents an operation that mutates the UserSubscription nodes in the graph. +type UserSubscriptionMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + starts_at *time.Time + expires_at *time.Time + status *string + daily_window_start *time.Time + weekly_window_start *time.Time + monthly_window_start *time.Time + daily_usage_usd *float64 + adddaily_usage_usd *float64 + weekly_usage_usd *float64 + addweekly_usage_usd *float64 + monthly_usage_usd *float64 + addmonthly_usage_usd *float64 + assigned_at *time.Time + notes *string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + assigned_by_user *int64 + clearedassigned_by_user bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + done bool + oldValue func(context.Context) (*UserSubscription, error) + predicates []predicate.UserSubscription +} + +var _ ent.Mutation = (*UserSubscriptionMutation)(nil) + +// usersubscriptionOption allows management of the mutation configuration using functional options. +type usersubscriptionOption func(*UserSubscriptionMutation) + +// newUserSubscriptionMutation creates new mutation for the UserSubscription entity. +func newUserSubscriptionMutation(c config, op Op, opts ...usersubscriptionOption) *UserSubscriptionMutation { + m := &UserSubscriptionMutation{ + config: c, + op: op, + typ: TypeUserSubscription, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserSubscriptionID sets the ID field of the mutation. +func withUserSubscriptionID(id int64) usersubscriptionOption { + return func(m *UserSubscriptionMutation) { + var ( + err error + once sync.Once + value *UserSubscription + ) + m.oldValue = func(ctx context.Context) (*UserSubscription, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().UserSubscription.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUserSubscription sets the old UserSubscription of the mutation. +func withUserSubscription(node *UserSubscription) usersubscriptionOption { + return func(m *UserSubscriptionMutation) { + m.oldValue = func(context.Context) (*UserSubscription, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserSubscriptionMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserSubscriptionMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserSubscriptionMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserSubscriptionMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().UserSubscription.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserSubscriptionMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserSubscriptionMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserSubscriptionMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserSubscriptionMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserSubscriptionMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserSubscriptionMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDeletedAt sets the "deleted_at" field. +func (m *UserSubscriptionMutation) SetDeletedAt(t time.Time) { + m.deleted_at = &t +} + +// DeletedAt returns the value of the "deleted_at" field in the mutation. +func (m *UserSubscriptionMutation) DeletedAt() (r time.Time, exists bool) { + v := m.deleted_at + if v == nil { + return + } + return *v, true +} + +// OldDeletedAt returns the old "deleted_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldDeletedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDeletedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDeletedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDeletedAt: %w", err) + } + return oldValue.DeletedAt, nil +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (m *UserSubscriptionMutation) ClearDeletedAt() { + m.deleted_at = nil + m.clearedFields[usersubscription.FieldDeletedAt] = struct{}{} +} + +// DeletedAtCleared returns if the "deleted_at" field was cleared in this mutation. +func (m *UserSubscriptionMutation) DeletedAtCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldDeletedAt] + return ok +} + +// ResetDeletedAt resets all changes to the "deleted_at" field. +func (m *UserSubscriptionMutation) ResetDeletedAt() { + m.deleted_at = nil + delete(m.clearedFields, usersubscription.FieldDeletedAt) +} + +// SetUserID sets the "user_id" field. +func (m *UserSubscriptionMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *UserSubscriptionMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *UserSubscriptionMutation) ResetUserID() { + m.user = nil +} + +// SetGroupID sets the "group_id" field. +func (m *UserSubscriptionMutation) SetGroupID(i int64) { + m.group = &i +} + +// GroupID returns the value of the "group_id" field in the mutation. +func (m *UserSubscriptionMutation) GroupID() (r int64, exists bool) { + v := m.group + if v == nil { + return + } + return *v, true +} + +// OldGroupID returns the old "group_id" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldGroupID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldGroupID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldGroupID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldGroupID: %w", err) + } + return oldValue.GroupID, nil +} + +// ResetGroupID resets all changes to the "group_id" field. +func (m *UserSubscriptionMutation) ResetGroupID() { + m.group = nil +} + +// SetStartsAt sets the "starts_at" field. +func (m *UserSubscriptionMutation) SetStartsAt(t time.Time) { + m.starts_at = &t +} + +// StartsAt returns the value of the "starts_at" field in the mutation. +func (m *UserSubscriptionMutation) StartsAt() (r time.Time, exists bool) { + v := m.starts_at + if v == nil { + return + } + return *v, true +} + +// OldStartsAt returns the old "starts_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldStartsAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartsAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartsAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartsAt: %w", err) + } + return oldValue.StartsAt, nil +} + +// ResetStartsAt resets all changes to the "starts_at" field. +func (m *UserSubscriptionMutation) ResetStartsAt() { + m.starts_at = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *UserSubscriptionMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *UserSubscriptionMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *UserSubscriptionMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// SetStatus sets the "status" field. +func (m *UserSubscriptionMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *UserSubscriptionMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *UserSubscriptionMutation) ResetStatus() { + m.status = nil +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (m *UserSubscriptionMutation) SetDailyWindowStart(t time.Time) { + m.daily_window_start = &t +} + +// DailyWindowStart returns the value of the "daily_window_start" field in the mutation. +func (m *UserSubscriptionMutation) DailyWindowStart() (r time.Time, exists bool) { + v := m.daily_window_start + if v == nil { + return + } + return *v, true +} + +// OldDailyWindowStart returns the old "daily_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldDailyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyWindowStart: %w", err) + } + return oldValue.DailyWindowStart, nil +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (m *UserSubscriptionMutation) ClearDailyWindowStart() { + m.daily_window_start = nil + m.clearedFields[usersubscription.FieldDailyWindowStart] = struct{}{} +} + +// DailyWindowStartCleared returns if the "daily_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) DailyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldDailyWindowStart] + return ok +} + +// ResetDailyWindowStart resets all changes to the "daily_window_start" field. +func (m *UserSubscriptionMutation) ResetDailyWindowStart() { + m.daily_window_start = nil + delete(m.clearedFields, usersubscription.FieldDailyWindowStart) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (m *UserSubscriptionMutation) SetWeeklyWindowStart(t time.Time) { + m.weekly_window_start = &t +} + +// WeeklyWindowStart returns the value of the "weekly_window_start" field in the mutation. +func (m *UserSubscriptionMutation) WeeklyWindowStart() (r time.Time, exists bool) { + v := m.weekly_window_start + if v == nil { + return + } + return *v, true +} + +// OldWeeklyWindowStart returns the old "weekly_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldWeeklyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyWindowStart: %w", err) + } + return oldValue.WeeklyWindowStart, nil +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (m *UserSubscriptionMutation) ClearWeeklyWindowStart() { + m.weekly_window_start = nil + m.clearedFields[usersubscription.FieldWeeklyWindowStart] = struct{}{} +} + +// WeeklyWindowStartCleared returns if the "weekly_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) WeeklyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldWeeklyWindowStart] + return ok +} + +// ResetWeeklyWindowStart resets all changes to the "weekly_window_start" field. +func (m *UserSubscriptionMutation) ResetWeeklyWindowStart() { + m.weekly_window_start = nil + delete(m.clearedFields, usersubscription.FieldWeeklyWindowStart) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (m *UserSubscriptionMutation) SetMonthlyWindowStart(t time.Time) { + m.monthly_window_start = &t +} + +// MonthlyWindowStart returns the value of the "monthly_window_start" field in the mutation. +func (m *UserSubscriptionMutation) MonthlyWindowStart() (r time.Time, exists bool) { + v := m.monthly_window_start + if v == nil { + return + } + return *v, true +} + +// OldMonthlyWindowStart returns the old "monthly_window_start" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldMonthlyWindowStart(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyWindowStart is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyWindowStart requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyWindowStart: %w", err) + } + return oldValue.MonthlyWindowStart, nil +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (m *UserSubscriptionMutation) ClearMonthlyWindowStart() { + m.monthly_window_start = nil + m.clearedFields[usersubscription.FieldMonthlyWindowStart] = struct{}{} +} + +// MonthlyWindowStartCleared returns if the "monthly_window_start" field was cleared in this mutation. +func (m *UserSubscriptionMutation) MonthlyWindowStartCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldMonthlyWindowStart] + return ok +} + +// ResetMonthlyWindowStart resets all changes to the "monthly_window_start" field. +func (m *UserSubscriptionMutation) ResetMonthlyWindowStart() { + m.monthly_window_start = nil + delete(m.clearedFields, usersubscription.FieldMonthlyWindowStart) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) SetDailyUsageUsd(f float64) { + m.daily_usage_usd = &f + m.adddaily_usage_usd = nil +} + +// DailyUsageUsd returns the value of the "daily_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) DailyUsageUsd() (r float64, exists bool) { + v := m.daily_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldDailyUsageUsd returns the old "daily_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldDailyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDailyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDailyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDailyUsageUsd: %w", err) + } + return oldValue.DailyUsageUsd, nil +} + +// AddDailyUsageUsd adds f to the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) AddDailyUsageUsd(f float64) { + if m.adddaily_usage_usd != nil { + *m.adddaily_usage_usd += f + } else { + m.adddaily_usage_usd = &f + } +} + +// AddedDailyUsageUsd returns the value that was added to the "daily_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedDailyUsageUsd() (r float64, exists bool) { + v := m.adddaily_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetDailyUsageUsd resets all changes to the "daily_usage_usd" field. +func (m *UserSubscriptionMutation) ResetDailyUsageUsd() { + m.daily_usage_usd = nil + m.adddaily_usage_usd = nil +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) SetWeeklyUsageUsd(f float64) { + m.weekly_usage_usd = &f + m.addweekly_usage_usd = nil +} + +// WeeklyUsageUsd returns the value of the "weekly_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) WeeklyUsageUsd() (r float64, exists bool) { + v := m.weekly_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldWeeklyUsageUsd returns the old "weekly_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldWeeklyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWeeklyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWeeklyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWeeklyUsageUsd: %w", err) + } + return oldValue.WeeklyUsageUsd, nil +} + +// AddWeeklyUsageUsd adds f to the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) AddWeeklyUsageUsd(f float64) { + if m.addweekly_usage_usd != nil { + *m.addweekly_usage_usd += f + } else { + m.addweekly_usage_usd = &f + } +} + +// AddedWeeklyUsageUsd returns the value that was added to the "weekly_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedWeeklyUsageUsd() (r float64, exists bool) { + v := m.addweekly_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetWeeklyUsageUsd resets all changes to the "weekly_usage_usd" field. +func (m *UserSubscriptionMutation) ResetWeeklyUsageUsd() { + m.weekly_usage_usd = nil + m.addweekly_usage_usd = nil +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) SetMonthlyUsageUsd(f float64) { + m.monthly_usage_usd = &f + m.addmonthly_usage_usd = nil +} + +// MonthlyUsageUsd returns the value of the "monthly_usage_usd" field in the mutation. +func (m *UserSubscriptionMutation) MonthlyUsageUsd() (r float64, exists bool) { + v := m.monthly_usage_usd + if v == nil { + return + } + return *v, true +} + +// OldMonthlyUsageUsd returns the old "monthly_usage_usd" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldMonthlyUsageUsd(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMonthlyUsageUsd is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMonthlyUsageUsd requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMonthlyUsageUsd: %w", err) + } + return oldValue.MonthlyUsageUsd, nil +} + +// AddMonthlyUsageUsd adds f to the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) AddMonthlyUsageUsd(f float64) { + if m.addmonthly_usage_usd != nil { + *m.addmonthly_usage_usd += f + } else { + m.addmonthly_usage_usd = &f + } +} + +// AddedMonthlyUsageUsd returns the value that was added to the "monthly_usage_usd" field in this mutation. +func (m *UserSubscriptionMutation) AddedMonthlyUsageUsd() (r float64, exists bool) { + v := m.addmonthly_usage_usd + if v == nil { + return + } + return *v, true +} + +// ResetMonthlyUsageUsd resets all changes to the "monthly_usage_usd" field. +func (m *UserSubscriptionMutation) ResetMonthlyUsageUsd() { + m.monthly_usage_usd = nil + m.addmonthly_usage_usd = nil +} + +// SetAssignedBy sets the "assigned_by" field. +func (m *UserSubscriptionMutation) SetAssignedBy(i int64) { + m.assigned_by_user = &i +} + +// AssignedBy returns the value of the "assigned_by" field in the mutation. +func (m *UserSubscriptionMutation) AssignedBy() (r int64, exists bool) { + v := m.assigned_by_user + if v == nil { + return + } + return *v, true +} + +// OldAssignedBy returns the old "assigned_by" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldAssignedBy(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAssignedBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAssignedBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAssignedBy: %w", err) + } + return oldValue.AssignedBy, nil +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (m *UserSubscriptionMutation) ClearAssignedBy() { + m.assigned_by_user = nil + m.clearedFields[usersubscription.FieldAssignedBy] = struct{}{} +} + +// AssignedByCleared returns if the "assigned_by" field was cleared in this mutation. +func (m *UserSubscriptionMutation) AssignedByCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldAssignedBy] + return ok +} + +// ResetAssignedBy resets all changes to the "assigned_by" field. +func (m *UserSubscriptionMutation) ResetAssignedBy() { + m.assigned_by_user = nil + delete(m.clearedFields, usersubscription.FieldAssignedBy) +} + +// SetAssignedAt sets the "assigned_at" field. +func (m *UserSubscriptionMutation) SetAssignedAt(t time.Time) { + m.assigned_at = &t +} + +// AssignedAt returns the value of the "assigned_at" field in the mutation. +func (m *UserSubscriptionMutation) AssignedAt() (r time.Time, exists bool) { + v := m.assigned_at + if v == nil { + return + } + return *v, true +} + +// OldAssignedAt returns the old "assigned_at" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldAssignedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAssignedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAssignedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAssignedAt: %w", err) + } + return oldValue.AssignedAt, nil +} + +// ResetAssignedAt resets all changes to the "assigned_at" field. +func (m *UserSubscriptionMutation) ResetAssignedAt() { + m.assigned_at = nil +} + +// SetNotes sets the "notes" field. +func (m *UserSubscriptionMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *UserSubscriptionMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the UserSubscription entity. +// If the UserSubscription object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserSubscriptionMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *UserSubscriptionMutation) ClearNotes() { + m.notes = nil + m.clearedFields[usersubscription.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *UserSubscriptionMutation) NotesCleared() bool { + _, ok := m.clearedFields[usersubscription.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *UserSubscriptionMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, usersubscription.FieldNotes) +} + +// ClearUser clears the "user" edge to the User entity. +func (m *UserSubscriptionMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[usersubscription.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *UserSubscriptionMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *UserSubscriptionMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// ClearGroup clears the "group" edge to the Group entity. +func (m *UserSubscriptionMutation) ClearGroup() { + m.clearedgroup = true + m.clearedFields[usersubscription.FieldGroupID] = struct{}{} +} + +// GroupCleared reports if the "group" edge to the Group entity was cleared. +func (m *UserSubscriptionMutation) GroupCleared() bool { + return m.clearedgroup +} + +// GroupIDs returns the "group" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// GroupID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) GroupIDs() (ids []int64) { + if id := m.group; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetGroup resets all changes to the "group" edge. +func (m *UserSubscriptionMutation) ResetGroup() { + m.group = nil + m.clearedgroup = false +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by id. +func (m *UserSubscriptionMutation) SetAssignedByUserID(id int64) { + m.assigned_by_user = &id +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (m *UserSubscriptionMutation) ClearAssignedByUser() { + m.clearedassigned_by_user = true + m.clearedFields[usersubscription.FieldAssignedBy] = struct{}{} +} + +// AssignedByUserCleared reports if the "assigned_by_user" edge to the User entity was cleared. +func (m *UserSubscriptionMutation) AssignedByUserCleared() bool { + return m.AssignedByCleared() || m.clearedassigned_by_user +} + +// AssignedByUserID returns the "assigned_by_user" edge ID in the mutation. +func (m *UserSubscriptionMutation) AssignedByUserID() (id int64, exists bool) { + if m.assigned_by_user != nil { + return *m.assigned_by_user, true + } + return +} + +// AssignedByUserIDs returns the "assigned_by_user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AssignedByUserID instead. It exists only for internal usage by the builders. +func (m *UserSubscriptionMutation) AssignedByUserIDs() (ids []int64) { + if id := m.assigned_by_user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAssignedByUser resets all changes to the "assigned_by_user" edge. +func (m *UserSubscriptionMutation) ResetAssignedByUser() { + m.assigned_by_user = nil + m.clearedassigned_by_user = false +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by ids. +func (m *UserSubscriptionMutation) AddUsageLogIDs(ids ...int64) { + if m.usage_logs == nil { + m.usage_logs = make(map[int64]struct{}) + } + for i := range ids { + m.usage_logs[ids[i]] = struct{}{} + } +} + +// ClearUsageLogs clears the "usage_logs" edge to the UsageLog entity. +func (m *UserSubscriptionMutation) ClearUsageLogs() { + m.clearedusage_logs = true +} + +// UsageLogsCleared reports if the "usage_logs" edge to the UsageLog entity was cleared. +func (m *UserSubscriptionMutation) UsageLogsCleared() bool { + return m.clearedusage_logs +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to the UsageLog entity by IDs. +func (m *UserSubscriptionMutation) RemoveUsageLogIDs(ids ...int64) { + if m.removedusage_logs == nil { + m.removedusage_logs = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_logs, ids[i]) + m.removedusage_logs[ids[i]] = struct{}{} + } +} + +// RemovedUsageLogs returns the removed IDs of the "usage_logs" edge to the UsageLog entity. +func (m *UserSubscriptionMutation) RemovedUsageLogsIDs() (ids []int64) { + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return +} + +// UsageLogsIDs returns the "usage_logs" edge IDs in the mutation. +func (m *UserSubscriptionMutation) UsageLogsIDs() (ids []int64) { + for id := range m.usage_logs { + ids = append(ids, id) + } + return +} + +// ResetUsageLogs resets all changes to the "usage_logs" edge. +func (m *UserSubscriptionMutation) ResetUsageLogs() { + m.usage_logs = nil + m.clearedusage_logs = false + m.removedusage_logs = nil +} + +// Where appends a list predicates to the UserSubscriptionMutation builder. +func (m *UserSubscriptionMutation) Where(ps ...predicate.UserSubscription) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserSubscriptionMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserSubscriptionMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.UserSubscription, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserSubscriptionMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserSubscriptionMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (UserSubscription). +func (m *UserSubscriptionMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserSubscriptionMutation) Fields() []string { + fields := make([]string, 0, 17) + if m.created_at != nil { + fields = append(fields, usersubscription.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, usersubscription.FieldUpdatedAt) + } + if m.deleted_at != nil { + fields = append(fields, usersubscription.FieldDeletedAt) + } + if m.user != nil { + fields = append(fields, usersubscription.FieldUserID) + } + if m.group != nil { + fields = append(fields, usersubscription.FieldGroupID) + } + if m.starts_at != nil { + fields = append(fields, usersubscription.FieldStartsAt) + } + if m.expires_at != nil { + fields = append(fields, usersubscription.FieldExpiresAt) + } + if m.status != nil { + fields = append(fields, usersubscription.FieldStatus) + } + if m.daily_window_start != nil { + fields = append(fields, usersubscription.FieldDailyWindowStart) + } + if m.weekly_window_start != nil { + fields = append(fields, usersubscription.FieldWeeklyWindowStart) + } + if m.monthly_window_start != nil { + fields = append(fields, usersubscription.FieldMonthlyWindowStart) + } + if m.daily_usage_usd != nil { + fields = append(fields, usersubscription.FieldDailyUsageUsd) + } + if m.weekly_usage_usd != nil { + fields = append(fields, usersubscription.FieldWeeklyUsageUsd) + } + if m.monthly_usage_usd != nil { + fields = append(fields, usersubscription.FieldMonthlyUsageUsd) + } + if m.assigned_by_user != nil { + fields = append(fields, usersubscription.FieldAssignedBy) + } + if m.assigned_at != nil { + fields = append(fields, usersubscription.FieldAssignedAt) + } + if m.notes != nil { + fields = append(fields, usersubscription.FieldNotes) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserSubscriptionMutation) Field(name string) (ent.Value, bool) { + switch name { + case usersubscription.FieldCreatedAt: + return m.CreatedAt() + case usersubscription.FieldUpdatedAt: + return m.UpdatedAt() + case usersubscription.FieldDeletedAt: + return m.DeletedAt() + case usersubscription.FieldUserID: + return m.UserID() + case usersubscription.FieldGroupID: + return m.GroupID() + case usersubscription.FieldStartsAt: + return m.StartsAt() + case usersubscription.FieldExpiresAt: + return m.ExpiresAt() + case usersubscription.FieldStatus: + return m.Status() + case usersubscription.FieldDailyWindowStart: + return m.DailyWindowStart() + case usersubscription.FieldWeeklyWindowStart: + return m.WeeklyWindowStart() + case usersubscription.FieldMonthlyWindowStart: + return m.MonthlyWindowStart() + case usersubscription.FieldDailyUsageUsd: + return m.DailyUsageUsd() + case usersubscription.FieldWeeklyUsageUsd: + return m.WeeklyUsageUsd() + case usersubscription.FieldMonthlyUsageUsd: + return m.MonthlyUsageUsd() + case usersubscription.FieldAssignedBy: + return m.AssignedBy() + case usersubscription.FieldAssignedAt: + return m.AssignedAt() + case usersubscription.FieldNotes: + return m.Notes() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserSubscriptionMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case usersubscription.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case usersubscription.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case usersubscription.FieldDeletedAt: + return m.OldDeletedAt(ctx) + case usersubscription.FieldUserID: + return m.OldUserID(ctx) + case usersubscription.FieldGroupID: + return m.OldGroupID(ctx) + case usersubscription.FieldStartsAt: + return m.OldStartsAt(ctx) + case usersubscription.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case usersubscription.FieldStatus: + return m.OldStatus(ctx) + case usersubscription.FieldDailyWindowStart: + return m.OldDailyWindowStart(ctx) + case usersubscription.FieldWeeklyWindowStart: + return m.OldWeeklyWindowStart(ctx) + case usersubscription.FieldMonthlyWindowStart: + return m.OldMonthlyWindowStart(ctx) + case usersubscription.FieldDailyUsageUsd: + return m.OldDailyUsageUsd(ctx) + case usersubscription.FieldWeeklyUsageUsd: + return m.OldWeeklyUsageUsd(ctx) + case usersubscription.FieldMonthlyUsageUsd: + return m.OldMonthlyUsageUsd(ctx) + case usersubscription.FieldAssignedBy: + return m.OldAssignedBy(ctx) + case usersubscription.FieldAssignedAt: + return m.OldAssignedAt(ctx) + case usersubscription.FieldNotes: + return m.OldNotes(ctx) + } + return nil, fmt.Errorf("unknown UserSubscription field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserSubscriptionMutation) SetField(name string, value ent.Value) error { + switch name { + case usersubscription.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case usersubscription.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case usersubscription.FieldDeletedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDeletedAt(v) + return nil + case usersubscription.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case usersubscription.FieldGroupID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetGroupID(v) + return nil + case usersubscription.FieldStartsAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartsAt(v) + return nil + case usersubscription.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case usersubscription.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case usersubscription.FieldDailyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyWindowStart(v) + return nil + case usersubscription.FieldWeeklyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyWindowStart(v) + return nil + case usersubscription.FieldMonthlyWindowStart: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyWindowStart(v) + return nil + case usersubscription.FieldDailyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDailyUsageUsd(v) + return nil + case usersubscription.FieldWeeklyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWeeklyUsageUsd(v) + return nil + case usersubscription.FieldMonthlyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMonthlyUsageUsd(v) + return nil + case usersubscription.FieldAssignedBy: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAssignedBy(v) + return nil + case usersubscription.FieldAssignedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAssignedAt(v) + return nil + case usersubscription.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + } + return fmt.Errorf("unknown UserSubscription field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserSubscriptionMutation) AddedFields() []string { + var fields []string + if m.adddaily_usage_usd != nil { + fields = append(fields, usersubscription.FieldDailyUsageUsd) + } + if m.addweekly_usage_usd != nil { + fields = append(fields, usersubscription.FieldWeeklyUsageUsd) + } + if m.addmonthly_usage_usd != nil { + fields = append(fields, usersubscription.FieldMonthlyUsageUsd) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserSubscriptionMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case usersubscription.FieldDailyUsageUsd: + return m.AddedDailyUsageUsd() + case usersubscription.FieldWeeklyUsageUsd: + return m.AddedWeeklyUsageUsd() + case usersubscription.FieldMonthlyUsageUsd: + return m.AddedMonthlyUsageUsd() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserSubscriptionMutation) AddField(name string, value ent.Value) error { + switch name { + case usersubscription.FieldDailyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddDailyUsageUsd(v) + return nil + case usersubscription.FieldWeeklyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddWeeklyUsageUsd(v) + return nil + case usersubscription.FieldMonthlyUsageUsd: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMonthlyUsageUsd(v) + return nil + } + return fmt.Errorf("unknown UserSubscription numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserSubscriptionMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(usersubscription.FieldDeletedAt) { + fields = append(fields, usersubscription.FieldDeletedAt) + } + if m.FieldCleared(usersubscription.FieldDailyWindowStart) { + fields = append(fields, usersubscription.FieldDailyWindowStart) + } + if m.FieldCleared(usersubscription.FieldWeeklyWindowStart) { + fields = append(fields, usersubscription.FieldWeeklyWindowStart) + } + if m.FieldCleared(usersubscription.FieldMonthlyWindowStart) { + fields = append(fields, usersubscription.FieldMonthlyWindowStart) + } + if m.FieldCleared(usersubscription.FieldAssignedBy) { + fields = append(fields, usersubscription.FieldAssignedBy) + } + if m.FieldCleared(usersubscription.FieldNotes) { + fields = append(fields, usersubscription.FieldNotes) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserSubscriptionMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserSubscriptionMutation) ClearField(name string) error { + switch name { + case usersubscription.FieldDeletedAt: + m.ClearDeletedAt() + return nil + case usersubscription.FieldDailyWindowStart: + m.ClearDailyWindowStart() + return nil + case usersubscription.FieldWeeklyWindowStart: + m.ClearWeeklyWindowStart() + return nil + case usersubscription.FieldMonthlyWindowStart: + m.ClearMonthlyWindowStart() + return nil + case usersubscription.FieldAssignedBy: + m.ClearAssignedBy() + return nil + case usersubscription.FieldNotes: + m.ClearNotes() + return nil + } + return fmt.Errorf("unknown UserSubscription nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserSubscriptionMutation) ResetField(name string) error { + switch name { + case usersubscription.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case usersubscription.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case usersubscription.FieldDeletedAt: + m.ResetDeletedAt() + return nil + case usersubscription.FieldUserID: + m.ResetUserID() + return nil + case usersubscription.FieldGroupID: + m.ResetGroupID() + return nil + case usersubscription.FieldStartsAt: + m.ResetStartsAt() + return nil + case usersubscription.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case usersubscription.FieldStatus: + m.ResetStatus() + return nil + case usersubscription.FieldDailyWindowStart: + m.ResetDailyWindowStart() + return nil + case usersubscription.FieldWeeklyWindowStart: + m.ResetWeeklyWindowStart() + return nil + case usersubscription.FieldMonthlyWindowStart: + m.ResetMonthlyWindowStart() + return nil + case usersubscription.FieldDailyUsageUsd: + m.ResetDailyUsageUsd() + return nil + case usersubscription.FieldWeeklyUsageUsd: + m.ResetWeeklyUsageUsd() + return nil + case usersubscription.FieldMonthlyUsageUsd: + m.ResetMonthlyUsageUsd() + return nil + case usersubscription.FieldAssignedBy: + m.ResetAssignedBy() + return nil + case usersubscription.FieldAssignedAt: + m.ResetAssignedAt() + return nil + case usersubscription.FieldNotes: + m.ResetNotes() + return nil + } + return fmt.Errorf("unknown UserSubscription field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserSubscriptionMutation) AddedEdges() []string { + edges := make([]string, 0, 4) + if m.user != nil { + edges = append(edges, usersubscription.EdgeUser) + } + if m.group != nil { + edges = append(edges, usersubscription.EdgeGroup) + } + if m.assigned_by_user != nil { + edges = append(edges, usersubscription.EdgeAssignedByUser) + } + if m.usage_logs != nil { + edges = append(edges, usersubscription.EdgeUsageLogs) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserSubscriptionMutation) AddedIDs(name string) []ent.Value { + switch name { + case usersubscription.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + case usersubscription.EdgeGroup: + if id := m.group; id != nil { + return []ent.Value{*id} + } + case usersubscription.EdgeAssignedByUser: + if id := m.assigned_by_user; id != nil { + return []ent.Value{*id} + } + case usersubscription.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.usage_logs)) + for id := range m.usage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserSubscriptionMutation) RemovedEdges() []string { + edges := make([]string, 0, 4) + if m.removedusage_logs != nil { + edges = append(edges, usersubscription.EdgeUsageLogs) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserSubscriptionMutation) RemovedIDs(name string) []ent.Value { + switch name { + case usersubscription.EdgeUsageLogs: + ids := make([]ent.Value, 0, len(m.removedusage_logs)) + for id := range m.removedusage_logs { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserSubscriptionMutation) ClearedEdges() []string { + edges := make([]string, 0, 4) + if m.cleareduser { + edges = append(edges, usersubscription.EdgeUser) + } + if m.clearedgroup { + edges = append(edges, usersubscription.EdgeGroup) + } + if m.clearedassigned_by_user { + edges = append(edges, usersubscription.EdgeAssignedByUser) + } + if m.clearedusage_logs { + edges = append(edges, usersubscription.EdgeUsageLogs) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserSubscriptionMutation) EdgeCleared(name string) bool { + switch name { + case usersubscription.EdgeUser: + return m.cleareduser + case usersubscription.EdgeGroup: + return m.clearedgroup + case usersubscription.EdgeAssignedByUser: + return m.clearedassigned_by_user + case usersubscription.EdgeUsageLogs: + return m.clearedusage_logs + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserSubscriptionMutation) ClearEdge(name string) error { + switch name { + case usersubscription.EdgeUser: + m.ClearUser() + return nil + case usersubscription.EdgeGroup: + m.ClearGroup() + return nil + case usersubscription.EdgeAssignedByUser: + m.ClearAssignedByUser() + return nil + } + return fmt.Errorf("unknown UserSubscription unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserSubscriptionMutation) ResetEdge(name string) error { + switch name { + case usersubscription.EdgeUser: + m.ResetUser() + return nil + case usersubscription.EdgeGroup: + m.ResetGroup() + return nil + case usersubscription.EdgeAssignedByUser: + m.ResetAssignedByUser() + return nil + case usersubscription.EdgeUsageLogs: + m.ResetUsageLogs() + return nil + } + return fmt.Errorf("unknown UserSubscription edge %s", name) +} diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go new file mode 100644 index 00000000..7a443c5d --- /dev/null +++ b/backend/ent/predicate/predicate.go @@ -0,0 +1,52 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// APIKey is the predicate function for apikey builders. +type APIKey func(*sql.Selector) + +// Account is the predicate function for account builders. +type Account func(*sql.Selector) + +// AccountGroup is the predicate function for accountgroup builders. +type AccountGroup func(*sql.Selector) + +// Group is the predicate function for group builders. +type Group func(*sql.Selector) + +// PromoCode is the predicate function for promocode builders. +type PromoCode func(*sql.Selector) + +// PromoCodeUsage is the predicate function for promocodeusage builders. +type PromoCodeUsage func(*sql.Selector) + +// Proxy is the predicate function for proxy builders. +type Proxy func(*sql.Selector) + +// RedeemCode is the predicate function for redeemcode builders. +type RedeemCode func(*sql.Selector) + +// Setting is the predicate function for setting builders. +type Setting func(*sql.Selector) + +// UsageLog is the predicate function for usagelog builders. +type UsageLog func(*sql.Selector) + +// User is the predicate function for user builders. +type User func(*sql.Selector) + +// UserAllowedGroup is the predicate function for userallowedgroup builders. +type UserAllowedGroup func(*sql.Selector) + +// UserAttributeDefinition is the predicate function for userattributedefinition builders. +type UserAttributeDefinition func(*sql.Selector) + +// UserAttributeValue is the predicate function for userattributevalue builders. +type UserAttributeValue func(*sql.Selector) + +// UserSubscription is the predicate function for usersubscription builders. +type UserSubscription func(*sql.Selector) diff --git a/backend/ent/promocode.go b/backend/ent/promocode.go new file mode 100644 index 00000000..1123bbd6 --- /dev/null +++ b/backend/ent/promocode.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/promocode" +) + +// PromoCode is the model entity for the PromoCode schema. +type PromoCode struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 优惠码 + Code string `json:"code,omitempty"` + // 赠送余额金额 + BonusAmount float64 `json:"bonus_amount,omitempty"` + // 最大使用次数,0表示无限制 + MaxUses int `json:"max_uses,omitempty"` + // 已使用次数 + UsedCount int `json:"used_count,omitempty"` + // 状态: active, disabled + Status string `json:"status,omitempty"` + // 过期时间,null表示永不过期 + ExpiresAt *time.Time `json:"expires_at,omitempty"` + // 备注 + Notes *string `json:"notes,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PromoCodeQuery when eager-loading is set. + Edges PromoCodeEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PromoCodeEdges holds the relations/edges for other nodes in the graph. +type PromoCodeEdges struct { + // UsageRecords holds the value of the usage_records edge. + UsageRecords []*PromoCodeUsage `json:"usage_records,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// UsageRecordsOrErr returns the UsageRecords value or an error if the edge +// was not loaded in eager-loading. +func (e PromoCodeEdges) UsageRecordsOrErr() ([]*PromoCodeUsage, error) { + if e.loadedTypes[0] { + return e.UsageRecords, nil + } + return nil, &NotLoadedError{edge: "usage_records"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PromoCode) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case promocode.FieldBonusAmount: + values[i] = new(sql.NullFloat64) + case promocode.FieldID, promocode.FieldMaxUses, promocode.FieldUsedCount: + values[i] = new(sql.NullInt64) + case promocode.FieldCode, promocode.FieldStatus, promocode.FieldNotes: + values[i] = new(sql.NullString) + case promocode.FieldExpiresAt, promocode.FieldCreatedAt, promocode.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PromoCode fields. +func (_m *PromoCode) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case promocode.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case promocode.FieldCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code", values[i]) + } else if value.Valid { + _m.Code = value.String + } + case promocode.FieldBonusAmount: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field bonus_amount", values[i]) + } else if value.Valid { + _m.BonusAmount = value.Float64 + } + case promocode.FieldMaxUses: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field max_uses", values[i]) + } else if value.Valid { + _m.MaxUses = int(value.Int64) + } + case promocode.FieldUsedCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field used_count", values[i]) + } else if value.Valid { + _m.UsedCount = int(value.Int64) + } + case promocode.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case promocode.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = new(time.Time) + *_m.ExpiresAt = value.Time + } + case promocode.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + case promocode.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case promocode.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PromoCode. +// This includes values selected through modifiers, order, etc. +func (_m *PromoCode) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUsageRecords queries the "usage_records" edge of the PromoCode entity. +func (_m *PromoCode) QueryUsageRecords() *PromoCodeUsageQuery { + return NewPromoCodeClient(_m.config).QueryUsageRecords(_m) +} + +// Update returns a builder for updating this PromoCode. +// Note that you need to call PromoCode.Unwrap() before calling this method if this PromoCode +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *PromoCode) Update() *PromoCodeUpdateOne { + return NewPromoCodeClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the PromoCode entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *PromoCode) Unwrap() *PromoCode { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: PromoCode is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *PromoCode) String() string { + var builder strings.Builder + builder.WriteString("PromoCode(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("code=") + builder.WriteString(_m.Code) + builder.WriteString(", ") + builder.WriteString("bonus_amount=") + builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount)) + builder.WriteString(", ") + builder.WriteString("max_uses=") + builder.WriteString(fmt.Sprintf("%v", _m.MaxUses)) + builder.WriteString(", ") + builder.WriteString("used_count=") + builder.WriteString(fmt.Sprintf("%v", _m.UsedCount)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ExpiresAt; v != nil { + builder.WriteString("expires_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// PromoCodes is a parsable slice of PromoCode. +type PromoCodes []*PromoCode diff --git a/backend/ent/promocode/promocode.go b/backend/ent/promocode/promocode.go new file mode 100644 index 00000000..ba91658f --- /dev/null +++ b/backend/ent/promocode/promocode.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package promocode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the promocode type in the database. + Label = "promo_code" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCode holds the string denoting the code field in the database. + FieldCode = "code" + // FieldBonusAmount holds the string denoting the bonus_amount field in the database. + FieldBonusAmount = "bonus_amount" + // FieldMaxUses holds the string denoting the max_uses field in the database. + FieldMaxUses = "max_uses" + // FieldUsedCount holds the string denoting the used_count field in the database. + FieldUsedCount = "used_count" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeUsageRecords holds the string denoting the usage_records edge name in mutations. + EdgeUsageRecords = "usage_records" + // Table holds the table name of the promocode in the database. + Table = "promo_codes" + // UsageRecordsTable is the table that holds the usage_records relation/edge. + UsageRecordsTable = "promo_code_usages" + // UsageRecordsInverseTable is the table name for the PromoCodeUsage entity. + // It exists in this package in order to avoid circular dependency with the "promocodeusage" package. + UsageRecordsInverseTable = "promo_code_usages" + // UsageRecordsColumn is the table column denoting the usage_records relation/edge. + UsageRecordsColumn = "promo_code_id" +) + +// Columns holds all SQL columns for promocode fields. +var Columns = []string{ + FieldID, + FieldCode, + FieldBonusAmount, + FieldMaxUses, + FieldUsedCount, + FieldStatus, + FieldExpiresAt, + FieldNotes, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // CodeValidator is a validator for the "code" field. It is called by the builders before save. + CodeValidator func(string) error + // DefaultBonusAmount holds the default value on creation for the "bonus_amount" field. + DefaultBonusAmount float64 + // DefaultMaxUses holds the default value on creation for the "max_uses" field. + DefaultMaxUses int + // DefaultUsedCount holds the default value on creation for the "used_count" field. + DefaultUsedCount int + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the PromoCode queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCode orders the results by the code field. +func ByCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCode, opts...).ToFunc() +} + +// ByBonusAmount orders the results by the bonus_amount field. +func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBonusAmount, opts...).ToFunc() +} + +// ByMaxUses orders the results by the max_uses field. +func ByMaxUses(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMaxUses, opts...).ToFunc() +} + +// ByUsedCount orders the results by the used_count field. +func ByUsedCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedCount, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUsageRecordsCount orders the results by usage_records count. +func ByUsageRecordsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageRecordsStep(), opts...) + } +} + +// ByUsageRecords orders the results by usage_records terms. +func ByUsageRecords(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageRecordsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUsageRecordsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageRecordsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn), + ) +} diff --git a/backend/ent/promocode/where.go b/backend/ent/promocode/where.go new file mode 100644 index 00000000..84b6460a --- /dev/null +++ b/backend/ent/promocode/where.go @@ -0,0 +1,594 @@ +// Code generated by ent, DO NOT EDIT. + +package promocode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldID, id)) +} + +// Code applies equality check predicate on the "code" field. It's identical to CodeEQ. +func Code(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCode, v)) +} + +// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ. +func BonusAmount(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v)) +} + +// MaxUses applies equality check predicate on the "max_uses" field. It's identical to MaxUsesEQ. +func MaxUses(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v)) +} + +// UsedCount applies equality check predicate on the "used_count" field. It's identical to UsedCountEQ. +func UsedCount(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldStatus, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CodeEQ applies the EQ predicate on the "code" field. +func CodeEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCode, v)) +} + +// CodeNEQ applies the NEQ predicate on the "code" field. +func CodeNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldCode, v)) +} + +// CodeIn applies the In predicate on the "code" field. +func CodeIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldCode, vs...)) +} + +// CodeNotIn applies the NotIn predicate on the "code" field. +func CodeNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldCode, vs...)) +} + +// CodeGT applies the GT predicate on the "code" field. +func CodeGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldCode, v)) +} + +// CodeGTE applies the GTE predicate on the "code" field. +func CodeGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldCode, v)) +} + +// CodeLT applies the LT predicate on the "code" field. +func CodeLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldCode, v)) +} + +// CodeLTE applies the LTE predicate on the "code" field. +func CodeLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldCode, v)) +} + +// CodeContains applies the Contains predicate on the "code" field. +func CodeContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldCode, v)) +} + +// CodeHasPrefix applies the HasPrefix predicate on the "code" field. +func CodeHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldCode, v)) +} + +// CodeHasSuffix applies the HasSuffix predicate on the "code" field. +func CodeHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldCode, v)) +} + +// CodeEqualFold applies the EqualFold predicate on the "code" field. +func CodeEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldCode, v)) +} + +// CodeContainsFold applies the ContainsFold predicate on the "code" field. +func CodeContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldCode, v)) +} + +// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field. +func BonusAmountEQ(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v)) +} + +// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field. +func BonusAmountNEQ(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldBonusAmount, v)) +} + +// BonusAmountIn applies the In predicate on the "bonus_amount" field. +func BonusAmountIn(vs ...float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldBonusAmount, vs...)) +} + +// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field. +func BonusAmountNotIn(vs ...float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldBonusAmount, vs...)) +} + +// BonusAmountGT applies the GT predicate on the "bonus_amount" field. +func BonusAmountGT(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldBonusAmount, v)) +} + +// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field. +func BonusAmountGTE(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldBonusAmount, v)) +} + +// BonusAmountLT applies the LT predicate on the "bonus_amount" field. +func BonusAmountLT(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldBonusAmount, v)) +} + +// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field. +func BonusAmountLTE(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldBonusAmount, v)) +} + +// MaxUsesEQ applies the EQ predicate on the "max_uses" field. +func MaxUsesEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v)) +} + +// MaxUsesNEQ applies the NEQ predicate on the "max_uses" field. +func MaxUsesNEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldMaxUses, v)) +} + +// MaxUsesIn applies the In predicate on the "max_uses" field. +func MaxUsesIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldMaxUses, vs...)) +} + +// MaxUsesNotIn applies the NotIn predicate on the "max_uses" field. +func MaxUsesNotIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldMaxUses, vs...)) +} + +// MaxUsesGT applies the GT predicate on the "max_uses" field. +func MaxUsesGT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldMaxUses, v)) +} + +// MaxUsesGTE applies the GTE predicate on the "max_uses" field. +func MaxUsesGTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldMaxUses, v)) +} + +// MaxUsesLT applies the LT predicate on the "max_uses" field. +func MaxUsesLT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldMaxUses, v)) +} + +// MaxUsesLTE applies the LTE predicate on the "max_uses" field. +func MaxUsesLTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldMaxUses, v)) +} + +// UsedCountEQ applies the EQ predicate on the "used_count" field. +func UsedCountEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v)) +} + +// UsedCountNEQ applies the NEQ predicate on the "used_count" field. +func UsedCountNEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldUsedCount, v)) +} + +// UsedCountIn applies the In predicate on the "used_count" field. +func UsedCountIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldUsedCount, vs...)) +} + +// UsedCountNotIn applies the NotIn predicate on the "used_count" field. +func UsedCountNotIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldUsedCount, vs...)) +} + +// UsedCountGT applies the GT predicate on the "used_count" field. +func UsedCountGT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldUsedCount, v)) +} + +// UsedCountGTE applies the GTE predicate on the "used_count" field. +func UsedCountGTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldUsedCount, v)) +} + +// UsedCountLT applies the LT predicate on the "used_count" field. +func UsedCountLT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldUsedCount, v)) +} + +// UsedCountLTE applies the LTE predicate on the "used_count" field. +func UsedCountLTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldUsedCount, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldStatus, v)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldExpiresAt, v)) +} + +// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field. +func ExpiresAtIsNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldIsNull(FieldExpiresAt)) +} + +// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field. +func ExpiresAtNotNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotNull(FieldExpiresAt)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasUsageRecords applies the HasEdge predicate on the "usage_records" edge. +func HasUsageRecords() predicate.PromoCode { + return predicate.PromoCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageRecordsWith applies the HasEdge predicate on the "usage_records" edge with a given conditions (other predicates). +func HasUsageRecordsWith(preds ...predicate.PromoCodeUsage) predicate.PromoCode { + return predicate.PromoCode(func(s *sql.Selector) { + step := newUsageRecordsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.NotPredicates(p)) +} diff --git a/backend/ent/promocode_create.go b/backend/ent/promocode_create.go new file mode 100644 index 00000000..4fd2c39c --- /dev/null +++ b/backend/ent/promocode_create.go @@ -0,0 +1,1081 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeCreate is the builder for creating a PromoCode entity. +type PromoCodeCreate struct { + config + mutation *PromoCodeMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCode sets the "code" field. +func (_c *PromoCodeCreate) SetCode(v string) *PromoCodeCreate { + _c.mutation.SetCode(v) + return _c +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_c *PromoCodeCreate) SetBonusAmount(v float64) *PromoCodeCreate { + _c.mutation.SetBonusAmount(v) + return _c +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableBonusAmount(v *float64) *PromoCodeCreate { + if v != nil { + _c.SetBonusAmount(*v) + } + return _c +} + +// SetMaxUses sets the "max_uses" field. +func (_c *PromoCodeCreate) SetMaxUses(v int) *PromoCodeCreate { + _c.mutation.SetMaxUses(v) + return _c +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableMaxUses(v *int) *PromoCodeCreate { + if v != nil { + _c.SetMaxUses(*v) + } + return _c +} + +// SetUsedCount sets the "used_count" field. +func (_c *PromoCodeCreate) SetUsedCount(v int) *PromoCodeCreate { + _c.mutation.SetUsedCount(v) + return _c +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableUsedCount(v *int) *PromoCodeCreate { + if v != nil { + _c.SetUsedCount(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *PromoCodeCreate) SetStatus(v string) *PromoCodeCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableStatus(v *string) *PromoCodeCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *PromoCodeCreate) SetExpiresAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableExpiresAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetExpiresAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *PromoCodeCreate) SetNotes(v string) *PromoCodeCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableNotes(v *string) *PromoCodeCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *PromoCodeCreate) SetCreatedAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableCreatedAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *PromoCodeCreate) SetUpdatedAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableUpdatedAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_c *PromoCodeCreate) AddUsageRecordIDs(ids ...int64) *PromoCodeCreate { + _c.mutation.AddUsageRecordIDs(ids...) + return _c +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_c *PromoCodeCreate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_c *PromoCodeCreate) Mutation() *PromoCodeMutation { + return _c.mutation +} + +// Save creates the PromoCode in the database. +func (_c *PromoCodeCreate) Save(ctx context.Context) (*PromoCode, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *PromoCodeCreate) SaveX(ctx context.Context) *PromoCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *PromoCodeCreate) defaults() { + if _, ok := _c.mutation.BonusAmount(); !ok { + v := promocode.DefaultBonusAmount + _c.mutation.SetBonusAmount(v) + } + if _, ok := _c.mutation.MaxUses(); !ok { + v := promocode.DefaultMaxUses + _c.mutation.SetMaxUses(v) + } + if _, ok := _c.mutation.UsedCount(); !ok { + v := promocode.DefaultUsedCount + _c.mutation.SetUsedCount(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := promocode.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := promocode.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := promocode.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *PromoCodeCreate) check() error { + if _, ok := _c.mutation.Code(); !ok { + return &ValidationError{Name: "code", err: errors.New(`ent: missing required field "PromoCode.code"`)} + } + if v, ok := _c.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if _, ok := _c.mutation.BonusAmount(); !ok { + return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCode.bonus_amount"`)} + } + if _, ok := _c.mutation.MaxUses(); !ok { + return &ValidationError{Name: "max_uses", err: errors.New(`ent: missing required field "PromoCode.max_uses"`)} + } + if _, ok := _c.mutation.UsedCount(); !ok { + return &ValidationError{Name: "used_count", err: errors.New(`ent: missing required field "PromoCode.used_count"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "PromoCode.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "PromoCode.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "PromoCode.updated_at"`)} + } + return nil +} + +func (_c *PromoCodeCreate) sqlSave(ctx context.Context) (*PromoCode, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *PromoCodeCreate) createSpec() (*PromoCode, *sqlgraph.CreateSpec) { + var ( + _node = &PromoCode{config: _c.config} + _spec = sqlgraph.NewCreateSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + _node.Code = value + } + if value, ok := _c.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + _node.BonusAmount = value + } + if value, ok := _c.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + _node.MaxUses = value + } + if value, ok := _c.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + _node.UsedCount = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = &value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(promocode.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := _c.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCode.Create(). +// SetCode(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUpsertOne { + _c.conflict = opts + return &PromoCodeUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeCreate) OnConflictColumns(columns ...string) *PromoCodeUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUpsertOne{ + create: _c, + } +} + +type ( + // PromoCodeUpsertOne is the builder for "upsert"-ing + // one PromoCode node. + PromoCodeUpsertOne struct { + create *PromoCodeCreate + } + + // PromoCodeUpsert is the "OnConflict" setter. + PromoCodeUpsert struct { + *sql.UpdateSet + } +) + +// SetCode sets the "code" field. +func (u *PromoCodeUpsert) SetCode(v string) *PromoCodeUpsert { + u.Set(promocode.FieldCode, v) + return u +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateCode() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldCode) + return u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsert) SetBonusAmount(v float64) *PromoCodeUpsert { + u.Set(promocode.FieldBonusAmount, v) + return u +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateBonusAmount() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldBonusAmount) + return u +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsert) AddBonusAmount(v float64) *PromoCodeUpsert { + u.Add(promocode.FieldBonusAmount, v) + return u +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsert) SetMaxUses(v int) *PromoCodeUpsert { + u.Set(promocode.FieldMaxUses, v) + return u +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateMaxUses() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldMaxUses) + return u +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsert) AddMaxUses(v int) *PromoCodeUpsert { + u.Add(promocode.FieldMaxUses, v) + return u +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsert) SetUsedCount(v int) *PromoCodeUpsert { + u.Set(promocode.FieldUsedCount, v) + return u +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateUsedCount() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldUsedCount) + return u +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsert) AddUsedCount(v int) *PromoCodeUpsert { + u.Add(promocode.FieldUsedCount, v) + return u +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsert) SetStatus(v string) *PromoCodeUpsert { + u.Set(promocode.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateStatus() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldStatus) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsert) SetExpiresAt(v time.Time) *PromoCodeUpsert { + u.Set(promocode.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateExpiresAt() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldExpiresAt) + return u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsert) ClearExpiresAt() *PromoCodeUpsert { + u.SetNull(promocode.FieldExpiresAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsert) SetNotes(v string) *PromoCodeUpsert { + u.Set(promocode.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateNotes() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsert) ClearNotes() *PromoCodeUpsert { + u.SetNull(promocode.FieldNotes) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsert) SetUpdatedAt(v time.Time) *PromoCodeUpsert { + u.Set(promocode.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateUpdatedAt() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUpsertOne) UpdateNewValues() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(promocode.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUpsertOne) Ignore() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUpsertOne) DoNothing() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeCreate.OnConflict +// documentation for more info. +func (u *PromoCodeUpsertOne) Update(set func(*PromoCodeUpsert)) *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *PromoCodeUpsertOne) SetCode(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateCode() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateCode() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsertOne) SetBonusAmount(v float64) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsertOne) AddBonusAmount(v float64) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateBonusAmount() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsertOne) SetMaxUses(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetMaxUses(v) + }) +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsertOne) AddMaxUses(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddMaxUses(v) + }) +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateMaxUses() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateMaxUses() + }) +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsertOne) SetUsedCount(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUsedCount(v) + }) +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsertOne) AddUsedCount(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddUsedCount(v) + }) +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateUsedCount() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUsedCount() + }) +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsertOne) SetStatus(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateStatus() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsertOne) SetExpiresAt(v time.Time) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateExpiresAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsertOne) ClearExpiresAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearExpiresAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsertOne) SetNotes(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateNotes() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsertOne) ClearNotes() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearNotes() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsertOne) SetUpdatedAt(v time.Time) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateUpdatedAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PromoCodeUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PromoCodeUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PromoCodeCreateBulk is the builder for creating many PromoCode entities in bulk. +type PromoCodeCreateBulk struct { + config + err error + builders []*PromoCodeCreate + conflict []sql.ConflictOption +} + +// Save creates the PromoCode entities in the database. +func (_c *PromoCodeCreateBulk) Save(ctx context.Context) ([]*PromoCode, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*PromoCode, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PromoCodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *PromoCodeCreateBulk) SaveX(ctx context.Context) []*PromoCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCode.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUpsertBulk { + _c.conflict = opts + return &PromoCodeUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUpsertBulk{ + create: _c, + } +} + +// PromoCodeUpsertBulk is the builder for "upsert"-ing +// a bulk of PromoCode nodes. +type PromoCodeUpsertBulk struct { + create *PromoCodeCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUpsertBulk) UpdateNewValues() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(promocode.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUpsertBulk) Ignore() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUpsertBulk) DoNothing() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeCreateBulk.OnConflict +// documentation for more info. +func (u *PromoCodeUpsertBulk) Update(set func(*PromoCodeUpsert)) *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *PromoCodeUpsertBulk) SetCode(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateCode() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateCode() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsertBulk) SetBonusAmount(v float64) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsertBulk) AddBonusAmount(v float64) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateBonusAmount() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsertBulk) SetMaxUses(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetMaxUses(v) + }) +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsertBulk) AddMaxUses(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddMaxUses(v) + }) +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateMaxUses() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateMaxUses() + }) +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsertBulk) SetUsedCount(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUsedCount(v) + }) +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsertBulk) AddUsedCount(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddUsedCount(v) + }) +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateUsedCount() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUsedCount() + }) +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsertBulk) SetStatus(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateStatus() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsertBulk) SetExpiresAt(v time.Time) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateExpiresAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsertBulk) ClearExpiresAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearExpiresAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsertBulk) SetNotes(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateNotes() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsertBulk) ClearNotes() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearNotes() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsertBulk) SetUpdatedAt(v time.Time) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateUpdatedAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocode_delete.go b/backend/ent/promocode_delete.go new file mode 100644 index 00000000..7e4fa3a6 --- /dev/null +++ b/backend/ent/promocode_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" +) + +// PromoCodeDelete is the builder for deleting a PromoCode entity. +type PromoCodeDelete struct { + config + hooks []Hook + mutation *PromoCodeMutation +} + +// Where appends a list predicates to the PromoCodeDelete builder. +func (_d *PromoCodeDelete) Where(ps ...predicate.PromoCode) *PromoCodeDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *PromoCodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *PromoCodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// PromoCodeDeleteOne is the builder for deleting a single PromoCode entity. +type PromoCodeDeleteOne struct { + _d *PromoCodeDelete +} + +// Where appends a list predicates to the PromoCodeDelete builder. +func (_d *PromoCodeDeleteOne) Where(ps ...predicate.PromoCode) *PromoCodeDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *PromoCodeDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{promocode.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocode_query.go b/backend/ent/promocode_query.go new file mode 100644 index 00000000..2156b0f0 --- /dev/null +++ b/backend/ent/promocode_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeQuery is the builder for querying PromoCode entities. +type PromoCodeQuery struct { + config + ctx *QueryContext + order []promocode.OrderOption + inters []Interceptor + predicates []predicate.PromoCode + withUsageRecords *PromoCodeUsageQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PromoCodeQuery builder. +func (_q *PromoCodeQuery) Where(ps ...predicate.PromoCode) *PromoCodeQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *PromoCodeQuery) Limit(limit int) *PromoCodeQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *PromoCodeQuery) Offset(offset int) *PromoCodeQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *PromoCodeQuery) Unique(unique bool) *PromoCodeQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *PromoCodeQuery) Order(o ...promocode.OrderOption) *PromoCodeQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUsageRecords chains the current query on the "usage_records" edge. +func (_q *PromoCodeQuery) QueryUsageRecords() *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocode.Table, promocode.FieldID, selector), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PromoCode entity from the query. +// Returns a *NotFoundError when no PromoCode was found. +func (_q *PromoCodeQuery) First(ctx context.Context) (*PromoCode, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{promocode.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *PromoCodeQuery) FirstX(ctx context.Context) *PromoCode { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PromoCode ID from the query. +// Returns a *NotFoundError when no PromoCode ID was found. +func (_q *PromoCodeQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{promocode.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *PromoCodeQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PromoCode entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PromoCode entity is found. +// Returns a *NotFoundError when no PromoCode entities are found. +func (_q *PromoCodeQuery) Only(ctx context.Context) (*PromoCode, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{promocode.Label} + default: + return nil, &NotSingularError{promocode.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *PromoCodeQuery) OnlyX(ctx context.Context) *PromoCode { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PromoCode ID in the query. +// Returns a *NotSingularError when more than one PromoCode ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *PromoCodeQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{promocode.Label} + default: + err = &NotSingularError{promocode.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *PromoCodeQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PromoCodes. +func (_q *PromoCodeQuery) All(ctx context.Context) ([]*PromoCode, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PromoCode, *PromoCodeQuery]() + return withInterceptors[[]*PromoCode](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *PromoCodeQuery) AllX(ctx context.Context) []*PromoCode { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PromoCode IDs. +func (_q *PromoCodeQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(promocode.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *PromoCodeQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *PromoCodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*PromoCodeQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *PromoCodeQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *PromoCodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *PromoCodeQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PromoCodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *PromoCodeQuery) Clone() *PromoCodeQuery { + if _q == nil { + return nil + } + return &PromoCodeQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]promocode.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.PromoCode{}, _q.predicates...), + withUsageRecords: _q.withUsageRecords.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUsageRecords tells the query-builder to eager-load the nodes that are connected to +// the "usage_records" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeQuery) WithUsageRecords(opts ...func(*PromoCodeUsageQuery)) *PromoCodeQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageRecords = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PromoCode.Query(). +// GroupBy(promocode.FieldCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *PromoCodeQuery) GroupBy(field string, fields ...string) *PromoCodeGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &PromoCodeGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = promocode.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// } +// +// client.PromoCode.Query(). +// Select(promocode.FieldCode). +// Scan(ctx, &v) +func (_q *PromoCodeQuery) Select(fields ...string) *PromoCodeSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &PromoCodeSelect{PromoCodeQuery: _q} + sbuild.label = promocode.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PromoCodeSelect configured with the given aggregations. +func (_q *PromoCodeQuery) Aggregate(fns ...AggregateFunc) *PromoCodeSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *PromoCodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !promocode.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *PromoCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCode, error) { + var ( + nodes = []*PromoCode{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withUsageRecords != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PromoCode).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PromoCode{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUsageRecords; query != nil { + if err := _q.loadUsageRecords(ctx, query, nodes, + func(n *PromoCode) { n.Edges.UsageRecords = []*PromoCodeUsage{} }, + func(n *PromoCode, e *PromoCodeUsage) { n.Edges.UsageRecords = append(n.Edges.UsageRecords, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *PromoCodeQuery) loadUsageRecords(ctx context.Context, query *PromoCodeUsageQuery, nodes []*PromoCode, init func(*PromoCode), assign func(*PromoCode, *PromoCodeUsage)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*PromoCode) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(promocodeusage.FieldPromoCodeID) + } + query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(promocode.UsageRecordsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.PromoCodeID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "promo_code_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *PromoCodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *PromoCodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID) + for i := range fields { + if fields[i] != promocode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *PromoCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(promocode.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = promocode.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *PromoCodeQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *PromoCodeQuery) ForShare(opts ...sql.LockOption) *PromoCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// PromoCodeGroupBy is the group-by builder for PromoCode entities. +type PromoCodeGroupBy struct { + selector + build *PromoCodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *PromoCodeGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *PromoCodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeQuery, *PromoCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *PromoCodeGroupBy) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PromoCodeSelect is the builder for selecting fields of PromoCode entities. +type PromoCodeSelect struct { + *PromoCodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *PromoCodeSelect) Aggregate(fns ...AggregateFunc) *PromoCodeSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *PromoCodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeQuery, *PromoCodeSelect](ctx, _s.PromoCodeQuery, _s, _s.inters, v) +} + +func (_s *PromoCodeSelect) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/promocode_update.go b/backend/ent/promocode_update.go new file mode 100644 index 00000000..1a7481c8 --- /dev/null +++ b/backend/ent/promocode_update.go @@ -0,0 +1,745 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeUpdate is the builder for updating PromoCode entities. +type PromoCodeUpdate struct { + config + hooks []Hook + mutation *PromoCodeMutation +} + +// Where appends a list predicates to the PromoCodeUpdate builder. +func (_u *PromoCodeUpdate) Where(ps ...predicate.PromoCode) *PromoCodeUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetCode sets the "code" field. +func (_u *PromoCodeUpdate) SetCode(v string) *PromoCodeUpdate { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableCode(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUpdate) SetBonusAmount(v float64) *PromoCodeUpdate { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUpdate { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUpdate) AddBonusAmount(v float64) *PromoCodeUpdate { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetMaxUses sets the "max_uses" field. +func (_u *PromoCodeUpdate) SetMaxUses(v int) *PromoCodeUpdate { + _u.mutation.ResetMaxUses() + _u.mutation.SetMaxUses(v) + return _u +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableMaxUses(v *int) *PromoCodeUpdate { + if v != nil { + _u.SetMaxUses(*v) + } + return _u +} + +// AddMaxUses adds value to the "max_uses" field. +func (_u *PromoCodeUpdate) AddMaxUses(v int) *PromoCodeUpdate { + _u.mutation.AddMaxUses(v) + return _u +} + +// SetUsedCount sets the "used_count" field. +func (_u *PromoCodeUpdate) SetUsedCount(v int) *PromoCodeUpdate { + _u.mutation.ResetUsedCount() + _u.mutation.SetUsedCount(v) + return _u +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableUsedCount(v *int) *PromoCodeUpdate { + if v != nil { + _u.SetUsedCount(*v) + } + return _u +} + +// AddUsedCount adds value to the "used_count" field. +func (_u *PromoCodeUpdate) AddUsedCount(v int) *PromoCodeUpdate { + _u.mutation.AddUsedCount(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *PromoCodeUpdate) SetStatus(v string) *PromoCodeUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableStatus(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *PromoCodeUpdate) SetExpiresAt(v time.Time) *PromoCodeUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *PromoCodeUpdate) ClearExpiresAt() *PromoCodeUpdate { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *PromoCodeUpdate) SetNotes(v string) *PromoCodeUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableNotes(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *PromoCodeUpdate) ClearNotes() *PromoCodeUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *PromoCodeUpdate) SetUpdatedAt(v time.Time) *PromoCodeUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_u *PromoCodeUpdate) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdate { + _u.mutation.AddUsageRecordIDs(ids...) + return _u +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_u *PromoCodeUpdate) Mutation() *PromoCodeMutation { + return _u.mutation +} + +// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdate) ClearUsageRecords() *PromoCodeUpdate { + _u.mutation.ClearUsageRecords() + return _u +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs. +func (_u *PromoCodeUpdate) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdate { + _u.mutation.RemoveUsageRecordIDs(ids...) + return _u +} + +// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities. +func (_u *PromoCodeUpdate) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageRecordIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *PromoCodeUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *PromoCodeUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *PromoCodeUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := promocode.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUpdate) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + return nil +} + +func (_u *PromoCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedMaxUses(); ok { + _spec.AddField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedUsedCount(); ok { + _spec.AddField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(promocode.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(promocode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// PromoCodeUpdateOne is the builder for updating a single PromoCode entity. +type PromoCodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PromoCodeMutation +} + +// SetCode sets the "code" field. +func (_u *PromoCodeUpdateOne) SetCode(v string) *PromoCodeUpdateOne { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableCode(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUpdateOne) SetBonusAmount(v float64) *PromoCodeUpdateOne { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUpdateOne { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUpdateOne) AddBonusAmount(v float64) *PromoCodeUpdateOne { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetMaxUses sets the "max_uses" field. +func (_u *PromoCodeUpdateOne) SetMaxUses(v int) *PromoCodeUpdateOne { + _u.mutation.ResetMaxUses() + _u.mutation.SetMaxUses(v) + return _u +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableMaxUses(v *int) *PromoCodeUpdateOne { + if v != nil { + _u.SetMaxUses(*v) + } + return _u +} + +// AddMaxUses adds value to the "max_uses" field. +func (_u *PromoCodeUpdateOne) AddMaxUses(v int) *PromoCodeUpdateOne { + _u.mutation.AddMaxUses(v) + return _u +} + +// SetUsedCount sets the "used_count" field. +func (_u *PromoCodeUpdateOne) SetUsedCount(v int) *PromoCodeUpdateOne { + _u.mutation.ResetUsedCount() + _u.mutation.SetUsedCount(v) + return _u +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableUsedCount(v *int) *PromoCodeUpdateOne { + if v != nil { + _u.SetUsedCount(*v) + } + return _u +} + +// AddUsedCount adds value to the "used_count" field. +func (_u *PromoCodeUpdateOne) AddUsedCount(v int) *PromoCodeUpdateOne { + _u.mutation.AddUsedCount(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *PromoCodeUpdateOne) SetStatus(v string) *PromoCodeUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableStatus(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *PromoCodeUpdateOne) SetExpiresAt(v time.Time) *PromoCodeUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *PromoCodeUpdateOne) ClearExpiresAt() *PromoCodeUpdateOne { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *PromoCodeUpdateOne) SetNotes(v string) *PromoCodeUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableNotes(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *PromoCodeUpdateOne) ClearNotes() *PromoCodeUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *PromoCodeUpdateOne) SetUpdatedAt(v time.Time) *PromoCodeUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_u *PromoCodeUpdateOne) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne { + _u.mutation.AddUsageRecordIDs(ids...) + return _u +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdateOne) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_u *PromoCodeUpdateOne) Mutation() *PromoCodeMutation { + return _u.mutation +} + +// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdateOne) ClearUsageRecords() *PromoCodeUpdateOne { + _u.mutation.ClearUsageRecords() + return _u +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs. +func (_u *PromoCodeUpdateOne) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne { + _u.mutation.RemoveUsageRecordIDs(ids...) + return _u +} + +// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities. +func (_u *PromoCodeUpdateOne) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageRecordIDs(ids...) +} + +// Where appends a list predicates to the PromoCodeUpdate builder. +func (_u *PromoCodeUpdateOne) Where(ps ...predicate.PromoCode) *PromoCodeUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *PromoCodeUpdateOne) Select(field string, fields ...string) *PromoCodeUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated PromoCode entity. +func (_u *PromoCodeUpdateOne) Save(ctx context.Context) (*PromoCode, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUpdateOne) SaveX(ctx context.Context) *PromoCode { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *PromoCodeUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *PromoCodeUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := promocode.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUpdateOne) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + return nil +} + +func (_u *PromoCodeUpdateOne) sqlSave(ctx context.Context) (_node *PromoCode, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCode.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID) + for _, f := range fields { + if !promocode.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != promocode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedMaxUses(); ok { + _spec.AddField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedUsedCount(); ok { + _spec.AddField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(promocode.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(promocode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PromoCode{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/promocodeusage.go b/backend/ent/promocodeusage.go new file mode 100644 index 00000000..1ba3a8bf --- /dev/null +++ b/backend/ent/promocodeusage.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsage is the model entity for the PromoCodeUsage schema. +type PromoCodeUsage struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 优惠码ID + PromoCodeID int64 `json:"promo_code_id,omitempty"` + // 使用用户ID + UserID int64 `json:"user_id,omitempty"` + // 实际赠送金额 + BonusAmount float64 `json:"bonus_amount,omitempty"` + // 使用时间 + UsedAt time.Time `json:"used_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PromoCodeUsageQuery when eager-loading is set. + Edges PromoCodeUsageEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PromoCodeUsageEdges holds the relations/edges for other nodes in the graph. +type PromoCodeUsageEdges struct { + // PromoCode holds the value of the promo_code edge. + PromoCode *PromoCode `json:"promo_code,omitempty"` + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// PromoCodeOrErr returns the PromoCode value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PromoCodeUsageEdges) PromoCodeOrErr() (*PromoCode, error) { + if e.PromoCode != nil { + return e.PromoCode, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: promocode.Label} + } + return nil, &NotLoadedError{edge: "promo_code"} +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PromoCodeUsageEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PromoCodeUsage) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case promocodeusage.FieldBonusAmount: + values[i] = new(sql.NullFloat64) + case promocodeusage.FieldID, promocodeusage.FieldPromoCodeID, promocodeusage.FieldUserID: + values[i] = new(sql.NullInt64) + case promocodeusage.FieldUsedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PromoCodeUsage fields. +func (_m *PromoCodeUsage) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case promocodeusage.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case promocodeusage.FieldPromoCodeID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field promo_code_id", values[i]) + } else if value.Valid { + _m.PromoCodeID = value.Int64 + } + case promocodeusage.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case promocodeusage.FieldBonusAmount: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field bonus_amount", values[i]) + } else if value.Valid { + _m.BonusAmount = value.Float64 + } + case promocodeusage.FieldUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field used_at", values[i]) + } else if value.Valid { + _m.UsedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PromoCodeUsage. +// This includes values selected through modifiers, order, etc. +func (_m *PromoCodeUsage) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryPromoCode queries the "promo_code" edge of the PromoCodeUsage entity. +func (_m *PromoCodeUsage) QueryPromoCode() *PromoCodeQuery { + return NewPromoCodeUsageClient(_m.config).QueryPromoCode(_m) +} + +// QueryUser queries the "user" edge of the PromoCodeUsage entity. +func (_m *PromoCodeUsage) QueryUser() *UserQuery { + return NewPromoCodeUsageClient(_m.config).QueryUser(_m) +} + +// Update returns a builder for updating this PromoCodeUsage. +// Note that you need to call PromoCodeUsage.Unwrap() before calling this method if this PromoCodeUsage +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *PromoCodeUsage) Update() *PromoCodeUsageUpdateOne { + return NewPromoCodeUsageClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the PromoCodeUsage entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *PromoCodeUsage) Unwrap() *PromoCodeUsage { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: PromoCodeUsage is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *PromoCodeUsage) String() string { + var builder strings.Builder + builder.WriteString("PromoCodeUsage(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("promo_code_id=") + builder.WriteString(fmt.Sprintf("%v", _m.PromoCodeID)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("bonus_amount=") + builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount)) + builder.WriteString(", ") + builder.WriteString("used_at=") + builder.WriteString(_m.UsedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// PromoCodeUsages is a parsable slice of PromoCodeUsage. +type PromoCodeUsages []*PromoCodeUsage diff --git a/backend/ent/promocodeusage/promocodeusage.go b/backend/ent/promocodeusage/promocodeusage.go new file mode 100644 index 00000000..f4e05970 --- /dev/null +++ b/backend/ent/promocodeusage/promocodeusage.go @@ -0,0 +1,125 @@ +// Code generated by ent, DO NOT EDIT. + +package promocodeusage + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the promocodeusage type in the database. + Label = "promo_code_usage" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldPromoCodeID holds the string denoting the promo_code_id field in the database. + FieldPromoCodeID = "promo_code_id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldBonusAmount holds the string denoting the bonus_amount field in the database. + FieldBonusAmount = "bonus_amount" + // FieldUsedAt holds the string denoting the used_at field in the database. + FieldUsedAt = "used_at" + // EdgePromoCode holds the string denoting the promo_code edge name in mutations. + EdgePromoCode = "promo_code" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // Table holds the table name of the promocodeusage in the database. + Table = "promo_code_usages" + // PromoCodeTable is the table that holds the promo_code relation/edge. + PromoCodeTable = "promo_code_usages" + // PromoCodeInverseTable is the table name for the PromoCode entity. + // It exists in this package in order to avoid circular dependency with the "promocode" package. + PromoCodeInverseTable = "promo_codes" + // PromoCodeColumn is the table column denoting the promo_code relation/edge. + PromoCodeColumn = "promo_code_id" + // UserTable is the table that holds the user relation/edge. + UserTable = "promo_code_usages" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" +) + +// Columns holds all SQL columns for promocodeusage fields. +var Columns = []string{ + FieldID, + FieldPromoCodeID, + FieldUserID, + FieldBonusAmount, + FieldUsedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultUsedAt holds the default value on creation for the "used_at" field. + DefaultUsedAt func() time.Time +) + +// OrderOption defines the ordering options for the PromoCodeUsage queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByPromoCodeID orders the results by the promo_code_id field. +func ByPromoCodeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPromoCodeID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByBonusAmount orders the results by the bonus_amount field. +func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBonusAmount, opts...).ToFunc() +} + +// ByUsedAt orders the results by the used_at field. +func ByUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedAt, opts...).ToFunc() +} + +// ByPromoCodeField orders the results by promo_code field. +func ByPromoCodeField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPromoCodeStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} +func newPromoCodeStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PromoCodeInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn), + ) +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} diff --git a/backend/ent/promocodeusage/where.go b/backend/ent/promocodeusage/where.go new file mode 100644 index 00000000..fe657fd4 --- /dev/null +++ b/backend/ent/promocodeusage/where.go @@ -0,0 +1,257 @@ +// Code generated by ent, DO NOT EDIT. + +package promocodeusage + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldID, id)) +} + +// PromoCodeID applies equality check predicate on the "promo_code_id" field. It's identical to PromoCodeIDEQ. +func PromoCodeID(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v)) +} + +// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ. +func BonusAmount(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v)) +} + +// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ. +func UsedAt(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v)) +} + +// PromoCodeIDEQ applies the EQ predicate on the "promo_code_id" field. +func PromoCodeIDEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v)) +} + +// PromoCodeIDNEQ applies the NEQ predicate on the "promo_code_id" field. +func PromoCodeIDNEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldPromoCodeID, v)) +} + +// PromoCodeIDIn applies the In predicate on the "promo_code_id" field. +func PromoCodeIDIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldPromoCodeID, vs...)) +} + +// PromoCodeIDNotIn applies the NotIn predicate on the "promo_code_id" field. +func PromoCodeIDNotIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldPromoCodeID, vs...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUserID, vs...)) +} + +// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field. +func BonusAmountEQ(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v)) +} + +// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field. +func BonusAmountNEQ(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldBonusAmount, v)) +} + +// BonusAmountIn applies the In predicate on the "bonus_amount" field. +func BonusAmountIn(vs ...float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldBonusAmount, vs...)) +} + +// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field. +func BonusAmountNotIn(vs ...float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldBonusAmount, vs...)) +} + +// BonusAmountGT applies the GT predicate on the "bonus_amount" field. +func BonusAmountGT(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldBonusAmount, v)) +} + +// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field. +func BonusAmountGTE(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldBonusAmount, v)) +} + +// BonusAmountLT applies the LT predicate on the "bonus_amount" field. +func BonusAmountLT(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldBonusAmount, v)) +} + +// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field. +func BonusAmountLTE(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldBonusAmount, v)) +} + +// UsedAtEQ applies the EQ predicate on the "used_at" field. +func UsedAtEQ(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v)) +} + +// UsedAtNEQ applies the NEQ predicate on the "used_at" field. +func UsedAtNEQ(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUsedAt, v)) +} + +// UsedAtIn applies the In predicate on the "used_at" field. +func UsedAtIn(vs ...time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldUsedAt, vs...)) +} + +// UsedAtNotIn applies the NotIn predicate on the "used_at" field. +func UsedAtNotIn(vs ...time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUsedAt, vs...)) +} + +// UsedAtGT applies the GT predicate on the "used_at" field. +func UsedAtGT(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldUsedAt, v)) +} + +// UsedAtGTE applies the GTE predicate on the "used_at" field. +func UsedAtGTE(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldUsedAt, v)) +} + +// UsedAtLT applies the LT predicate on the "used_at" field. +func UsedAtLT(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldUsedAt, v)) +} + +// UsedAtLTE applies the LTE predicate on the "used_at" field. +func UsedAtLTE(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldUsedAt, v)) +} + +// HasPromoCode applies the HasEdge predicate on the "promo_code" edge. +func HasPromoCode() predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPromoCodeWith applies the HasEdge predicate on the "promo_code" edge with a given conditions (other predicates). +func HasPromoCodeWith(preds ...predicate.PromoCode) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := newPromoCodeStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.NotPredicates(p)) +} diff --git a/backend/ent/promocodeusage_create.go b/backend/ent/promocodeusage_create.go new file mode 100644 index 00000000..79d9c768 --- /dev/null +++ b/backend/ent/promocodeusage_create.go @@ -0,0 +1,696 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageCreate is the builder for creating a PromoCodeUsage entity. +type PromoCodeUsageCreate struct { + config + mutation *PromoCodeUsageMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_c *PromoCodeUsageCreate) SetPromoCodeID(v int64) *PromoCodeUsageCreate { + _c.mutation.SetPromoCodeID(v) + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *PromoCodeUsageCreate) SetUserID(v int64) *PromoCodeUsageCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_c *PromoCodeUsageCreate) SetBonusAmount(v float64) *PromoCodeUsageCreate { + _c.mutation.SetBonusAmount(v) + return _c +} + +// SetUsedAt sets the "used_at" field. +func (_c *PromoCodeUsageCreate) SetUsedAt(v time.Time) *PromoCodeUsageCreate { + _c.mutation.SetUsedAt(v) + return _c +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_c *PromoCodeUsageCreate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageCreate { + if v != nil { + _c.SetUsedAt(*v) + } + return _c +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_c *PromoCodeUsageCreate) SetPromoCode(v *PromoCode) *PromoCodeUsageCreate { + return _c.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_c *PromoCodeUsageCreate) SetUser(v *User) *PromoCodeUsageCreate { + return _c.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_c *PromoCodeUsageCreate) Mutation() *PromoCodeUsageMutation { + return _c.mutation +} + +// Save creates the PromoCodeUsage in the database. +func (_c *PromoCodeUsageCreate) Save(ctx context.Context) (*PromoCodeUsage, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *PromoCodeUsageCreate) SaveX(ctx context.Context) *PromoCodeUsage { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeUsageCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeUsageCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *PromoCodeUsageCreate) defaults() { + if _, ok := _c.mutation.UsedAt(); !ok { + v := promocodeusage.DefaultUsedAt() + _c.mutation.SetUsedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *PromoCodeUsageCreate) check() error { + if _, ok := _c.mutation.PromoCodeID(); !ok { + return &ValidationError{Name: "promo_code_id", err: errors.New(`ent: missing required field "PromoCodeUsage.promo_code_id"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "PromoCodeUsage.user_id"`)} + } + if _, ok := _c.mutation.BonusAmount(); !ok { + return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCodeUsage.bonus_amount"`)} + } + if _, ok := _c.mutation.UsedAt(); !ok { + return &ValidationError{Name: "used_at", err: errors.New(`ent: missing required field "PromoCodeUsage.used_at"`)} + } + if len(_c.mutation.PromoCodeIDs()) == 0 { + return &ValidationError{Name: "promo_code", err: errors.New(`ent: missing required edge "PromoCodeUsage.promo_code"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PromoCodeUsage.user"`)} + } + return nil +} + +func (_c *PromoCodeUsageCreate) sqlSave(ctx context.Context) (*PromoCodeUsage, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *PromoCodeUsageCreate) createSpec() (*PromoCodeUsage, *sqlgraph.CreateSpec) { + var ( + _node = &PromoCodeUsage{config: _c.config} + _spec = sqlgraph.NewCreateSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + _node.BonusAmount = value + } + if value, ok := _c.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + _node.UsedAt = value + } + if nodes := _c.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.PromoCodeID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCodeUsage.Create(). +// SetPromoCodeID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUsageUpsert) { +// SetPromoCodeID(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeUsageCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertOne { + _c.conflict = opts + return &PromoCodeUsageUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeUsageCreate) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUsageUpsertOne{ + create: _c, + } +} + +type ( + // PromoCodeUsageUpsertOne is the builder for "upsert"-ing + // one PromoCodeUsage node. + PromoCodeUsageUpsertOne struct { + create *PromoCodeUsageCreate + } + + // PromoCodeUsageUpsert is the "OnConflict" setter. + PromoCodeUsageUpsert struct { + *sql.UpdateSet + } +) + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsert) SetPromoCodeID(v int64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldPromoCodeID, v) + return u +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdatePromoCodeID() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldPromoCodeID) + return u +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsert) SetUserID(v int64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateUserID() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldUserID) + return u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsert) SetBonusAmount(v float64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldBonusAmount, v) + return u +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateBonusAmount() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldBonusAmount) + return u +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsert) AddBonusAmount(v float64) *PromoCodeUsageUpsert { + u.Add(promocodeusage.FieldBonusAmount, v) + return u +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsert) SetUsedAt(v time.Time) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldUsedAt, v) + return u +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateUsedAt() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldUsedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUsageUpsertOne) UpdateNewValues() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUsageUpsertOne) Ignore() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUsageUpsertOne) DoNothing() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreate.OnConflict +// documentation for more info. +func (u *PromoCodeUsageUpsertOne) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUsageUpsert{UpdateSet: update}) + })) + return u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsertOne) SetPromoCodeID(v int64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetPromoCodeID(v) + }) +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdatePromoCodeID() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdatePromoCodeID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsertOne) SetUserID(v int64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateUserID() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUserID() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsertOne) SetBonusAmount(v float64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsertOne) AddBonusAmount(v float64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateBonusAmount() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsertOne) SetUsedAt(v time.Time) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateUsedAt() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUsedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUsageUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeUsageCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUsageUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PromoCodeUsageUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PromoCodeUsageUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PromoCodeUsageCreateBulk is the builder for creating many PromoCodeUsage entities in bulk. +type PromoCodeUsageCreateBulk struct { + config + err error + builders []*PromoCodeUsageCreate + conflict []sql.ConflictOption +} + +// Save creates the PromoCodeUsage entities in the database. +func (_c *PromoCodeUsageCreateBulk) Save(ctx context.Context) ([]*PromoCodeUsage, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*PromoCodeUsage, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PromoCodeUsageMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *PromoCodeUsageCreateBulk) SaveX(ctx context.Context) []*PromoCodeUsage { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeUsageCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeUsageCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCodeUsage.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUsageUpsert) { +// SetPromoCodeID(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeUsageCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertBulk { + _c.conflict = opts + return &PromoCodeUsageUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeUsageCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUsageUpsertBulk{ + create: _c, + } +} + +// PromoCodeUsageUpsertBulk is the builder for "upsert"-ing +// a bulk of PromoCodeUsage nodes. +type PromoCodeUsageUpsertBulk struct { + create *PromoCodeUsageCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUsageUpsertBulk) UpdateNewValues() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUsageUpsertBulk) Ignore() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUsageUpsertBulk) DoNothing() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreateBulk.OnConflict +// documentation for more info. +func (u *PromoCodeUsageUpsertBulk) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUsageUpsert{UpdateSet: update}) + })) + return u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsertBulk) SetPromoCodeID(v int64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetPromoCodeID(v) + }) +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdatePromoCodeID() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdatePromoCodeID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsertBulk) SetUserID(v int64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateUserID() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUserID() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsertBulk) SetBonusAmount(v float64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsertBulk) AddBonusAmount(v float64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateBonusAmount() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsertBulk) SetUsedAt(v time.Time) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateUsedAt() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUsedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUsageUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeUsageCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeUsageCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUsageUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocodeusage_delete.go b/backend/ent/promocodeusage_delete.go new file mode 100644 index 00000000..bd3fa5e1 --- /dev/null +++ b/backend/ent/promocodeusage_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeUsageDelete is the builder for deleting a PromoCodeUsage entity. +type PromoCodeUsageDelete struct { + config + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// Where appends a list predicates to the PromoCodeUsageDelete builder. +func (_d *PromoCodeUsageDelete) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *PromoCodeUsageDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeUsageDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *PromoCodeUsageDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// PromoCodeUsageDeleteOne is the builder for deleting a single PromoCodeUsage entity. +type PromoCodeUsageDeleteOne struct { + _d *PromoCodeUsageDelete +} + +// Where appends a list predicates to the PromoCodeUsageDelete builder. +func (_d *PromoCodeUsageDeleteOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *PromoCodeUsageDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{promocodeusage.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeUsageDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocodeusage_query.go b/backend/ent/promocodeusage_query.go new file mode 100644 index 00000000..95b02a16 --- /dev/null +++ b/backend/ent/promocodeusage_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageQuery is the builder for querying PromoCodeUsage entities. +type PromoCodeUsageQuery struct { + config + ctx *QueryContext + order []promocodeusage.OrderOption + inters []Interceptor + predicates []predicate.PromoCodeUsage + withPromoCode *PromoCodeQuery + withUser *UserQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PromoCodeUsageQuery builder. +func (_q *PromoCodeUsageQuery) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *PromoCodeUsageQuery) Limit(limit int) *PromoCodeUsageQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *PromoCodeUsageQuery) Offset(offset int) *PromoCodeUsageQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *PromoCodeUsageQuery) Unique(unique bool) *PromoCodeUsageQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *PromoCodeUsageQuery) Order(o ...promocodeusage.OrderOption) *PromoCodeUsageQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryPromoCode chains the current query on the "promo_code" edge. +func (_q *PromoCodeUsageQuery) QueryPromoCode() *PromoCodeQuery { + query := (&PromoCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector), + sqlgraph.To(promocode.Table, promocode.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUser chains the current query on the "user" edge. +func (_q *PromoCodeUsageQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PromoCodeUsage entity from the query. +// Returns a *NotFoundError when no PromoCodeUsage was found. +func (_q *PromoCodeUsageQuery) First(ctx context.Context) (*PromoCodeUsage, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{promocodeusage.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) FirstX(ctx context.Context) *PromoCodeUsage { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PromoCodeUsage ID from the query. +// Returns a *NotFoundError when no PromoCodeUsage ID was found. +func (_q *PromoCodeUsageQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{promocodeusage.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PromoCodeUsage entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PromoCodeUsage entity is found. +// Returns a *NotFoundError when no PromoCodeUsage entities are found. +func (_q *PromoCodeUsageQuery) Only(ctx context.Context) (*PromoCodeUsage, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{promocodeusage.Label} + default: + return nil, &NotSingularError{promocodeusage.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) OnlyX(ctx context.Context) *PromoCodeUsage { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PromoCodeUsage ID in the query. +// Returns a *NotSingularError when more than one PromoCodeUsage ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *PromoCodeUsageQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{promocodeusage.Label} + default: + err = &NotSingularError{promocodeusage.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PromoCodeUsages. +func (_q *PromoCodeUsageQuery) All(ctx context.Context) ([]*PromoCodeUsage, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PromoCodeUsage, *PromoCodeUsageQuery]() + return withInterceptors[[]*PromoCodeUsage](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) AllX(ctx context.Context) []*PromoCodeUsage { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PromoCodeUsage IDs. +func (_q *PromoCodeUsageQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(promocodeusage.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *PromoCodeUsageQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*PromoCodeUsageQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *PromoCodeUsageQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PromoCodeUsageQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *PromoCodeUsageQuery) Clone() *PromoCodeUsageQuery { + if _q == nil { + return nil + } + return &PromoCodeUsageQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]promocodeusage.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.PromoCodeUsage{}, _q.predicates...), + withPromoCode: _q.withPromoCode.Clone(), + withUser: _q.withUser.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithPromoCode tells the query-builder to eager-load the nodes that are connected to +// the "promo_code" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeUsageQuery) WithPromoCode(opts ...func(*PromoCodeQuery)) *PromoCodeUsageQuery { + query := (&PromoCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withPromoCode = query + return _q +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeUsageQuery) WithUser(opts ...func(*UserQuery)) *PromoCodeUsageQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// PromoCodeID int64 `json:"promo_code_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PromoCodeUsage.Query(). +// GroupBy(promocodeusage.FieldPromoCodeID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *PromoCodeUsageQuery) GroupBy(field string, fields ...string) *PromoCodeUsageGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &PromoCodeUsageGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = promocodeusage.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// PromoCodeID int64 `json:"promo_code_id,omitempty"` +// } +// +// client.PromoCodeUsage.Query(). +// Select(promocodeusage.FieldPromoCodeID). +// Scan(ctx, &v) +func (_q *PromoCodeUsageQuery) Select(fields ...string) *PromoCodeUsageSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &PromoCodeUsageSelect{PromoCodeUsageQuery: _q} + sbuild.label = promocodeusage.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PromoCodeUsageSelect configured with the given aggregations. +func (_q *PromoCodeUsageQuery) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *PromoCodeUsageQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !promocodeusage.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *PromoCodeUsageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCodeUsage, error) { + var ( + nodes = []*PromoCodeUsage{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withPromoCode != nil, + _q.withUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PromoCodeUsage).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PromoCodeUsage{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withPromoCode; query != nil { + if err := _q.loadPromoCode(ctx, query, nodes, nil, + func(n *PromoCodeUsage, e *PromoCode) { n.Edges.PromoCode = e }); err != nil { + return nil, err + } + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *PromoCodeUsage, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *PromoCodeUsageQuery) loadPromoCode(ctx context.Context, query *PromoCodeQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *PromoCode)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*PromoCodeUsage) + for i := range nodes { + fk := nodes[i].PromoCodeID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(promocode.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "promo_code_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *PromoCodeUsageQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*PromoCodeUsage) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *PromoCodeUsageQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *PromoCodeUsageQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID) + for i := range fields { + if fields[i] != promocodeusage.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withPromoCode != nil { + _spec.Node.AddColumnOnce(promocodeusage.FieldPromoCodeID) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(promocodeusage.FieldUserID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *PromoCodeUsageQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(promocodeusage.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = promocodeusage.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *PromoCodeUsageQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeUsageQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *PromoCodeUsageQuery) ForShare(opts ...sql.LockOption) *PromoCodeUsageQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// PromoCodeUsageGroupBy is the group-by builder for PromoCodeUsage entities. +type PromoCodeUsageGroupBy struct { + selector + build *PromoCodeUsageQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *PromoCodeUsageGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeUsageGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *PromoCodeUsageGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *PromoCodeUsageGroupBy) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PromoCodeUsageSelect is the builder for selecting fields of PromoCodeUsage entities. +type PromoCodeUsageSelect struct { + *PromoCodeUsageQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *PromoCodeUsageSelect) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *PromoCodeUsageSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageSelect](ctx, _s.PromoCodeUsageQuery, _s, _s.inters, v) +} + +func (_s *PromoCodeUsageSelect) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/promocodeusage_update.go b/backend/ent/promocodeusage_update.go new file mode 100644 index 00000000..d91a1f10 --- /dev/null +++ b/backend/ent/promocodeusage_update.go @@ -0,0 +1,510 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageUpdate is the builder for updating PromoCodeUsage entities. +type PromoCodeUsageUpdate struct { + config + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// Where appends a list predicates to the PromoCodeUsageUpdate builder. +func (_u *PromoCodeUsageUpdate) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_u *PromoCodeUsageUpdate) SetPromoCodeID(v int64) *PromoCodeUsageUpdate { + _u.mutation.SetPromoCodeID(v) + return _u +} + +// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetPromoCodeID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *PromoCodeUsageUpdate) SetUserID(v int64) *PromoCodeUsageUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableUserID(v *int64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUsageUpdate) SetBonusAmount(v float64) *PromoCodeUsageUpdate { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUsageUpdate) AddBonusAmount(v float64) *PromoCodeUsageUpdate { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *PromoCodeUsageUpdate) SetUsedAt(v time.Time) *PromoCodeUsageUpdate { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdate { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdate) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdate { + return _u.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdate) SetUser(v *User) *PromoCodeUsageUpdate { + return _u.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_u *PromoCodeUsageUpdate) Mutation() *PromoCodeUsageMutation { + return _u.mutation +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdate) ClearPromoCode() *PromoCodeUsageUpdate { + _u.mutation.ClearPromoCode() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdate) ClearUser() *PromoCodeUsageUpdate { + _u.mutation.ClearUser() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *PromoCodeUsageUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUsageUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *PromoCodeUsageUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUsageUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUsageUpdate) check() error { + if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`) + } + return nil +} + +func (_u *PromoCodeUsageUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.PromoCodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocodeusage.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// PromoCodeUsageUpdateOne is the builder for updating a single PromoCodeUsage entity. +type PromoCodeUsageUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_u *PromoCodeUsageUpdateOne) SetPromoCodeID(v int64) *PromoCodeUsageUpdateOne { + _u.mutation.SetPromoCodeID(v) + return _u +} + +// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetPromoCodeID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *PromoCodeUsageUpdateOne) SetUserID(v int64) *PromoCodeUsageUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableUserID(v *int64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUsageUpdateOne) SetBonusAmount(v float64) *PromoCodeUsageUpdateOne { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUsageUpdateOne) AddBonusAmount(v float64) *PromoCodeUsageUpdateOne { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *PromoCodeUsageUpdateOne) SetUsedAt(v time.Time) *PromoCodeUsageUpdateOne { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdateOne) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdateOne { + return _u.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdateOne) SetUser(v *User) *PromoCodeUsageUpdateOne { + return _u.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_u *PromoCodeUsageUpdateOne) Mutation() *PromoCodeUsageMutation { + return _u.mutation +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdateOne) ClearPromoCode() *PromoCodeUsageUpdateOne { + _u.mutation.ClearPromoCode() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdateOne) ClearUser() *PromoCodeUsageUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// Where appends a list predicates to the PromoCodeUsageUpdate builder. +func (_u *PromoCodeUsageUpdateOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *PromoCodeUsageUpdateOne) Select(field string, fields ...string) *PromoCodeUsageUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated PromoCodeUsage entity. +func (_u *PromoCodeUsageUpdateOne) Save(ctx context.Context) (*PromoCodeUsage, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUsageUpdateOne) SaveX(ctx context.Context) *PromoCodeUsage { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *PromoCodeUsageUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUsageUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUsageUpdateOne) check() error { + if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`) + } + return nil +} + +func (_u *PromoCodeUsageUpdateOne) sqlSave(ctx context.Context) (_node *PromoCodeUsage, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCodeUsage.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID) + for _, f := range fields { + if !promocodeusage.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != promocodeusage.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.PromoCodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PromoCodeUsage{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocodeusage.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/proxy.go b/backend/ent/proxy.go new file mode 100644 index 00000000..5228b73e --- /dev/null +++ b/backend/ent/proxy.go @@ -0,0 +1,240 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// Proxy is the model entity for the Proxy schema. +type Proxy struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Protocol holds the value of the "protocol" field. + Protocol string `json:"protocol,omitempty"` + // Host holds the value of the "host" field. + Host string `json:"host,omitempty"` + // Port holds the value of the "port" field. + Port int `json:"port,omitempty"` + // Username holds the value of the "username" field. + Username *string `json:"username,omitempty"` + // Password holds the value of the "password" field. + Password *string `json:"password,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ProxyQuery when eager-loading is set. + Edges ProxyEdges `json:"edges"` + selectValues sql.SelectValues +} + +// ProxyEdges holds the relations/edges for other nodes in the graph. +type ProxyEdges struct { + // Accounts holds the value of the accounts edge. + Accounts []*Account `json:"accounts,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AccountsOrErr returns the Accounts value or an error if the edge +// was not loaded in eager-loading. +func (e ProxyEdges) AccountsOrErr() ([]*Account, error) { + if e.loadedTypes[0] { + return e.Accounts, nil + } + return nil, &NotLoadedError{edge: "accounts"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Proxy) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case proxy.FieldID, proxy.FieldPort: + values[i] = new(sql.NullInt64) + case proxy.FieldName, proxy.FieldProtocol, proxy.FieldHost, proxy.FieldUsername, proxy.FieldPassword, proxy.FieldStatus: + values[i] = new(sql.NullString) + case proxy.FieldCreatedAt, proxy.FieldUpdatedAt, proxy.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Proxy fields. +func (_m *Proxy) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case proxy.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case proxy.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case proxy.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case proxy.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case proxy.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case proxy.FieldProtocol: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field protocol", values[i]) + } else if value.Valid { + _m.Protocol = value.String + } + case proxy.FieldHost: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field host", values[i]) + } else if value.Valid { + _m.Host = value.String + } + case proxy.FieldPort: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field port", values[i]) + } else if value.Valid { + _m.Port = int(value.Int64) + } + case proxy.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + _m.Username = new(string) + *_m.Username = value.String + } + case proxy.FieldPassword: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password", values[i]) + } else if value.Valid { + _m.Password = new(string) + *_m.Password = value.String + } + case proxy.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Proxy. +// This includes values selected through modifiers, order, etc. +func (_m *Proxy) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAccounts queries the "accounts" edge of the Proxy entity. +func (_m *Proxy) QueryAccounts() *AccountQuery { + return NewProxyClient(_m.config).QueryAccounts(_m) +} + +// Update returns a builder for updating this Proxy. +// Note that you need to call Proxy.Unwrap() before calling this method if this Proxy +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Proxy) Update() *ProxyUpdateOne { + return NewProxyClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Proxy entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Proxy) Unwrap() *Proxy { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Proxy is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Proxy) String() string { + var builder strings.Builder + builder.WriteString("Proxy(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("protocol=") + builder.WriteString(_m.Protocol) + builder.WriteString(", ") + builder.WriteString("host=") + builder.WriteString(_m.Host) + builder.WriteString(", ") + builder.WriteString("port=") + builder.WriteString(fmt.Sprintf("%v", _m.Port)) + builder.WriteString(", ") + if v := _m.Username; v != nil { + builder.WriteString("username=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.Password; v != nil { + builder.WriteString("password=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteByte(')') + return builder.String() +} + +// Proxies is a parsable slice of Proxy. +type Proxies []*Proxy diff --git a/backend/ent/proxy/proxy.go b/backend/ent/proxy/proxy.go new file mode 100644 index 00000000..db7abcda --- /dev/null +++ b/backend/ent/proxy/proxy.go @@ -0,0 +1,183 @@ +// Code generated by ent, DO NOT EDIT. + +package proxy + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the proxy type in the database. + Label = "proxy" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldProtocol holds the string denoting the protocol field in the database. + FieldProtocol = "protocol" + // FieldHost holds the string denoting the host field in the database. + FieldHost = "host" + // FieldPort holds the string denoting the port field in the database. + FieldPort = "port" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldPassword holds the string denoting the password field in the database. + FieldPassword = "password" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // EdgeAccounts holds the string denoting the accounts edge name in mutations. + EdgeAccounts = "accounts" + // Table holds the table name of the proxy in the database. + Table = "proxies" + // AccountsTable is the table that holds the accounts relation/edge. + AccountsTable = "accounts" + // AccountsInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountsInverseTable = "accounts" + // AccountsColumn is the table column denoting the accounts relation/edge. + AccountsColumn = "proxy_id" +) + +// Columns holds all SQL columns for proxy fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldName, + FieldProtocol, + FieldHost, + FieldPort, + FieldUsername, + FieldPassword, + FieldStatus, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save. + ProtocolValidator func(string) error + // HostValidator is a validator for the "host" field. It is called by the builders before save. + HostValidator func(string) error + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // PasswordValidator is a validator for the "password" field. It is called by the builders before save. + PasswordValidator func(string) error + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error +) + +// OrderOption defines the ordering options for the Proxy queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByProtocol orders the results by the protocol field. +func ByProtocol(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProtocol, opts...).ToFunc() +} + +// ByHost orders the results by the host field. +func ByHost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHost, opts...).ToFunc() +} + +// ByPort orders the results by the port field. +func ByPort(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPort, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByPassword orders the results by the password field. +func ByPassword(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPassword, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByAccountsCount orders the results by accounts count. +func ByAccountsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAccountsStep(), opts...) + } +} + +// ByAccounts orders the results by accounts terms. +func ByAccounts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAccountsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountsTable, AccountsColumn), + ) +} diff --git a/backend/ent/proxy/where.go b/backend/ent/proxy/where.go new file mode 100644 index 00000000..0a31ad7e --- /dev/null +++ b/backend/ent/proxy/where.go @@ -0,0 +1,724 @@ +// Code generated by ent, DO NOT EDIT. + +package proxy + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldName, v)) +} + +// Protocol applies equality check predicate on the "protocol" field. It's identical to ProtocolEQ. +func Protocol(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldProtocol, v)) +} + +// Host applies equality check predicate on the "host" field. It's identical to HostEQ. +func Host(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldHost, v)) +} + +// Port applies equality check predicate on the "port" field. It's identical to PortEQ. +func Port(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPort, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUsername, v)) +} + +// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ. +func Password(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPassword, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldStatus, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldDeletedAt)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldName, v)) +} + +// ProtocolEQ applies the EQ predicate on the "protocol" field. +func ProtocolEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldProtocol, v)) +} + +// ProtocolNEQ applies the NEQ predicate on the "protocol" field. +func ProtocolNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldProtocol, v)) +} + +// ProtocolIn applies the In predicate on the "protocol" field. +func ProtocolIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldProtocol, vs...)) +} + +// ProtocolNotIn applies the NotIn predicate on the "protocol" field. +func ProtocolNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldProtocol, vs...)) +} + +// ProtocolGT applies the GT predicate on the "protocol" field. +func ProtocolGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldProtocol, v)) +} + +// ProtocolGTE applies the GTE predicate on the "protocol" field. +func ProtocolGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldProtocol, v)) +} + +// ProtocolLT applies the LT predicate on the "protocol" field. +func ProtocolLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldProtocol, v)) +} + +// ProtocolLTE applies the LTE predicate on the "protocol" field. +func ProtocolLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldProtocol, v)) +} + +// ProtocolContains applies the Contains predicate on the "protocol" field. +func ProtocolContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldProtocol, v)) +} + +// ProtocolHasPrefix applies the HasPrefix predicate on the "protocol" field. +func ProtocolHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldProtocol, v)) +} + +// ProtocolHasSuffix applies the HasSuffix predicate on the "protocol" field. +func ProtocolHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldProtocol, v)) +} + +// ProtocolEqualFold applies the EqualFold predicate on the "protocol" field. +func ProtocolEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldProtocol, v)) +} + +// ProtocolContainsFold applies the ContainsFold predicate on the "protocol" field. +func ProtocolContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldProtocol, v)) +} + +// HostEQ applies the EQ predicate on the "host" field. +func HostEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldHost, v)) +} + +// HostNEQ applies the NEQ predicate on the "host" field. +func HostNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldHost, v)) +} + +// HostIn applies the In predicate on the "host" field. +func HostIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldHost, vs...)) +} + +// HostNotIn applies the NotIn predicate on the "host" field. +func HostNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldHost, vs...)) +} + +// HostGT applies the GT predicate on the "host" field. +func HostGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldHost, v)) +} + +// HostGTE applies the GTE predicate on the "host" field. +func HostGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldHost, v)) +} + +// HostLT applies the LT predicate on the "host" field. +func HostLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldHost, v)) +} + +// HostLTE applies the LTE predicate on the "host" field. +func HostLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldHost, v)) +} + +// HostContains applies the Contains predicate on the "host" field. +func HostContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldHost, v)) +} + +// HostHasPrefix applies the HasPrefix predicate on the "host" field. +func HostHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldHost, v)) +} + +// HostHasSuffix applies the HasSuffix predicate on the "host" field. +func HostHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldHost, v)) +} + +// HostEqualFold applies the EqualFold predicate on the "host" field. +func HostEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldHost, v)) +} + +// HostContainsFold applies the ContainsFold predicate on the "host" field. +func HostContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldHost, v)) +} + +// PortEQ applies the EQ predicate on the "port" field. +func PortEQ(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPort, v)) +} + +// PortNEQ applies the NEQ predicate on the "port" field. +func PortNEQ(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldPort, v)) +} + +// PortIn applies the In predicate on the "port" field. +func PortIn(vs ...int) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldPort, vs...)) +} + +// PortNotIn applies the NotIn predicate on the "port" field. +func PortNotIn(vs ...int) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldPort, vs...)) +} + +// PortGT applies the GT predicate on the "port" field. +func PortGT(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldPort, v)) +} + +// PortGTE applies the GTE predicate on the "port" field. +func PortGTE(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldPort, v)) +} + +// PortLT applies the LT predicate on the "port" field. +func PortLT(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldPort, v)) +} + +// PortLTE applies the LTE predicate on the "port" field. +func PortLTE(v int) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldPort, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameIsNil applies the IsNil predicate on the "username" field. +func UsernameIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldUsername)) +} + +// UsernameNotNil applies the NotNil predicate on the "username" field. +func UsernameNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldUsername)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldUsername, v)) +} + +// PasswordEQ applies the EQ predicate on the "password" field. +func PasswordEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldPassword, v)) +} + +// PasswordNEQ applies the NEQ predicate on the "password" field. +func PasswordNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldPassword, v)) +} + +// PasswordIn applies the In predicate on the "password" field. +func PasswordIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldPassword, vs...)) +} + +// PasswordNotIn applies the NotIn predicate on the "password" field. +func PasswordNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldPassword, vs...)) +} + +// PasswordGT applies the GT predicate on the "password" field. +func PasswordGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldPassword, v)) +} + +// PasswordGTE applies the GTE predicate on the "password" field. +func PasswordGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldPassword, v)) +} + +// PasswordLT applies the LT predicate on the "password" field. +func PasswordLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldPassword, v)) +} + +// PasswordLTE applies the LTE predicate on the "password" field. +func PasswordLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldPassword, v)) +} + +// PasswordContains applies the Contains predicate on the "password" field. +func PasswordContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldPassword, v)) +} + +// PasswordHasPrefix applies the HasPrefix predicate on the "password" field. +func PasswordHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldPassword, v)) +} + +// PasswordHasSuffix applies the HasSuffix predicate on the "password" field. +func PasswordHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldPassword, v)) +} + +// PasswordIsNil applies the IsNil predicate on the "password" field. +func PasswordIsNil() predicate.Proxy { + return predicate.Proxy(sql.FieldIsNull(FieldPassword)) +} + +// PasswordNotNil applies the NotNil predicate on the "password" field. +func PasswordNotNil() predicate.Proxy { + return predicate.Proxy(sql.FieldNotNull(FieldPassword)) +} + +// PasswordEqualFold applies the EqualFold predicate on the "password" field. +func PasswordEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldPassword, v)) +} + +// PasswordContainsFold applies the ContainsFold predicate on the "password" field. +func PasswordContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldPassword, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.Proxy { + return predicate.Proxy(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.Proxy { + return predicate.Proxy(sql.FieldContainsFold(FieldStatus, v)) +} + +// HasAccounts applies the HasEdge predicate on the "accounts" edge. +func HasAccounts() predicate.Proxy { + return predicate.Proxy(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AccountsTable, AccountsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountsWith applies the HasEdge predicate on the "accounts" edge with a given conditions (other predicates). +func HasAccountsWith(preds ...predicate.Account) predicate.Proxy { + return predicate.Proxy(func(s *sql.Selector) { + step := newAccountsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Proxy) predicate.Proxy { + return predicate.Proxy(sql.NotPredicates(p)) +} diff --git a/backend/ent/proxy_create.go b/backend/ent/proxy_create.go new file mode 100644 index 00000000..9687aaa2 --- /dev/null +++ b/backend/ent/proxy_create.go @@ -0,0 +1,1112 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyCreate is the builder for creating a Proxy entity. +type ProxyCreate struct { + config + mutation *ProxyMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *ProxyCreate) SetCreatedAt(v time.Time) *ProxyCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableCreatedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *ProxyCreate) SetUpdatedAt(v time.Time) *ProxyCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableUpdatedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *ProxyCreate) SetDeletedAt(v time.Time) *ProxyCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableDeletedAt(v *time.Time) *ProxyCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *ProxyCreate) SetName(v string) *ProxyCreate { + _c.mutation.SetName(v) + return _c +} + +// SetProtocol sets the "protocol" field. +func (_c *ProxyCreate) SetProtocol(v string) *ProxyCreate { + _c.mutation.SetProtocol(v) + return _c +} + +// SetHost sets the "host" field. +func (_c *ProxyCreate) SetHost(v string) *ProxyCreate { + _c.mutation.SetHost(v) + return _c +} + +// SetPort sets the "port" field. +func (_c *ProxyCreate) SetPort(v int) *ProxyCreate { + _c.mutation.SetPort(v) + return _c +} + +// SetUsername sets the "username" field. +func (_c *ProxyCreate) SetUsername(v string) *ProxyCreate { + _c.mutation.SetUsername(v) + return _c +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableUsername(v *string) *ProxyCreate { + if v != nil { + _c.SetUsername(*v) + } + return _c +} + +// SetPassword sets the "password" field. +func (_c *ProxyCreate) SetPassword(v string) *ProxyCreate { + _c.mutation.SetPassword(v) + return _c +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_c *ProxyCreate) SetNillablePassword(v *string) *ProxyCreate { + if v != nil { + _c.SetPassword(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *ProxyCreate) SetStatus(v string) *ProxyCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *ProxyCreate) SetNillableStatus(v *string) *ProxyCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_c *ProxyCreate) AddAccountIDs(ids ...int64) *ProxyCreate { + _c.mutation.AddAccountIDs(ids...) + return _c +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_c *ProxyCreate) AddAccounts(v ...*Account) *ProxyCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAccountIDs(ids...) +} + +// Mutation returns the ProxyMutation object of the builder. +func (_c *ProxyCreate) Mutation() *ProxyMutation { + return _c.mutation +} + +// Save creates the Proxy in the database. +func (_c *ProxyCreate) Save(ctx context.Context) (*Proxy, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *ProxyCreate) SaveX(ctx context.Context) *Proxy { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ProxyCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ProxyCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *ProxyCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if proxy.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := proxy.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if proxy.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := proxy.DefaultStatus + _c.mutation.SetStatus(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *ProxyCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Proxy.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Proxy.updated_at"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Proxy.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if _, ok := _c.mutation.Protocol(); !ok { + return &ValidationError{Name: "protocol", err: errors.New(`ent: missing required field "Proxy.protocol"`)} + } + if v, ok := _c.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if _, ok := _c.mutation.Host(); !ok { + return &ValidationError{Name: "host", err: errors.New(`ent: missing required field "Proxy.host"`)} + } + if v, ok := _c.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if _, ok := _c.mutation.Port(); !ok { + return &ValidationError{Name: "port", err: errors.New(`ent: missing required field "Proxy.port"`)} + } + if v, ok := _c.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _c.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Proxy.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_c *ProxyCreate) sqlSave(ctx context.Context) (*Proxy, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *ProxyCreate) createSpec() (*Proxy, *sqlgraph.CreateSpec) { + var ( + _node = &Proxy{config: _c.config} + _spec = sqlgraph.NewCreateSpec(proxy.Table, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(proxy.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + _node.Protocol = value + } + if value, ok := _c.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + _node.Host = value + } + if value, ok := _c.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + _node.Port = value + } + if value, ok := _c.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + _node.Username = &value + } + if value, ok := _c.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + _node.Password = &value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + _node.Status = value + } + if nodes := _c.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Proxy.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ProxyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ProxyCreate) OnConflict(opts ...sql.ConflictOption) *ProxyUpsertOne { + _c.conflict = opts + return &ProxyUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ProxyCreate) OnConflictColumns(columns ...string) *ProxyUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ProxyUpsertOne{ + create: _c, + } +} + +type ( + // ProxyUpsertOne is the builder for "upsert"-ing + // one Proxy node. + ProxyUpsertOne struct { + create *ProxyCreate + } + + // ProxyUpsert is the "OnConflict" setter. + ProxyUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsert) SetUpdatedAt(v time.Time) *ProxyUpsert { + u.Set(proxy.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateUpdatedAt() *ProxyUpsert { + u.SetExcluded(proxy.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsert) SetDeletedAt(v time.Time) *ProxyUpsert { + u.Set(proxy.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateDeletedAt() *ProxyUpsert { + u.SetExcluded(proxy.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsert) ClearDeletedAt() *ProxyUpsert { + u.SetNull(proxy.FieldDeletedAt) + return u +} + +// SetName sets the "name" field. +func (u *ProxyUpsert) SetName(v string) *ProxyUpsert { + u.Set(proxy.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateName() *ProxyUpsert { + u.SetExcluded(proxy.FieldName) + return u +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsert) SetProtocol(v string) *ProxyUpsert { + u.Set(proxy.FieldProtocol, v) + return u +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateProtocol() *ProxyUpsert { + u.SetExcluded(proxy.FieldProtocol) + return u +} + +// SetHost sets the "host" field. +func (u *ProxyUpsert) SetHost(v string) *ProxyUpsert { + u.Set(proxy.FieldHost, v) + return u +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateHost() *ProxyUpsert { + u.SetExcluded(proxy.FieldHost) + return u +} + +// SetPort sets the "port" field. +func (u *ProxyUpsert) SetPort(v int) *ProxyUpsert { + u.Set(proxy.FieldPort, v) + return u +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsert) UpdatePort() *ProxyUpsert { + u.SetExcluded(proxy.FieldPort) + return u +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsert) AddPort(v int) *ProxyUpsert { + u.Add(proxy.FieldPort, v) + return u +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsert) SetUsername(v string) *ProxyUpsert { + u.Set(proxy.FieldUsername, v) + return u +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateUsername() *ProxyUpsert { + u.SetExcluded(proxy.FieldUsername) + return u +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsert) ClearUsername() *ProxyUpsert { + u.SetNull(proxy.FieldUsername) + return u +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsert) SetPassword(v string) *ProxyUpsert { + u.Set(proxy.FieldPassword, v) + return u +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsert) UpdatePassword() *ProxyUpsert { + u.SetExcluded(proxy.FieldPassword) + return u +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsert) ClearPassword() *ProxyUpsert { + u.SetNull(proxy.FieldPassword) + return u +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsert) SetStatus(v string) *ProxyUpsert { + u.Set(proxy.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsert) UpdateStatus() *ProxyUpsert { + u.SetExcluded(proxy.FieldStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ProxyUpsertOne) UpdateNewValues() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(proxy.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ProxyUpsertOne) Ignore() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ProxyUpsertOne) DoNothing() *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ProxyCreate.OnConflict +// documentation for more info. +func (u *ProxyUpsertOne) Update(set func(*ProxyUpsert)) *ProxyUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ProxyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsertOne) SetUpdatedAt(v time.Time) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateUpdatedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsertOne) SetDeletedAt(v time.Time) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateDeletedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsertOne) ClearDeletedAt() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *ProxyUpsertOne) SetName(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateName() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateName() + }) +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsertOne) SetProtocol(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetProtocol(v) + }) +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateProtocol() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateProtocol() + }) +} + +// SetHost sets the "host" field. +func (u *ProxyUpsertOne) SetHost(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetHost(v) + }) +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateHost() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateHost() + }) +} + +// SetPort sets the "port" field. +func (u *ProxyUpsertOne) SetPort(v int) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetPort(v) + }) +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsertOne) AddPort(v int) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.AddPort(v) + }) +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdatePort() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePort() + }) +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsertOne) SetUsername(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateUsername() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUsername() + }) +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsertOne) ClearUsername() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearUsername() + }) +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsertOne) SetPassword(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetPassword(v) + }) +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdatePassword() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePassword() + }) +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsertOne) ClearPassword() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.ClearPassword() + }) +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsertOne) SetStatus(v string) *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsertOne) UpdateStatus() *ProxyUpsertOne { + return u.Update(func(s *ProxyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ProxyUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ProxyCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ProxyUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ProxyUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ProxyUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ProxyCreateBulk is the builder for creating many Proxy entities in bulk. +type ProxyCreateBulk struct { + config + err error + builders []*ProxyCreate + conflict []sql.ConflictOption +} + +// Save creates the Proxy entities in the database. +func (_c *ProxyCreateBulk) Save(ctx context.Context) ([]*Proxy, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Proxy, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ProxyMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *ProxyCreateBulk) SaveX(ctx context.Context) []*Proxy { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *ProxyCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *ProxyCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Proxy.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ProxyUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *ProxyCreateBulk) OnConflict(opts ...sql.ConflictOption) *ProxyUpsertBulk { + _c.conflict = opts + return &ProxyUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *ProxyCreateBulk) OnConflictColumns(columns ...string) *ProxyUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &ProxyUpsertBulk{ + create: _c, + } +} + +// ProxyUpsertBulk is the builder for "upsert"-ing +// a bulk of Proxy nodes. +type ProxyUpsertBulk struct { + create *ProxyCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ProxyUpsertBulk) UpdateNewValues() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(proxy.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Proxy.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ProxyUpsertBulk) Ignore() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ProxyUpsertBulk) DoNothing() *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ProxyCreateBulk.OnConflict +// documentation for more info. +func (u *ProxyUpsertBulk) Update(set func(*ProxyUpsert)) *ProxyUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ProxyUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ProxyUpsertBulk) SetUpdatedAt(v time.Time) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateUpdatedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *ProxyUpsertBulk) SetDeletedAt(v time.Time) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateDeletedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *ProxyUpsertBulk) ClearDeletedAt() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearDeletedAt() + }) +} + +// SetName sets the "name" field. +func (u *ProxyUpsertBulk) SetName(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateName() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateName() + }) +} + +// SetProtocol sets the "protocol" field. +func (u *ProxyUpsertBulk) SetProtocol(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetProtocol(v) + }) +} + +// UpdateProtocol sets the "protocol" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateProtocol() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateProtocol() + }) +} + +// SetHost sets the "host" field. +func (u *ProxyUpsertBulk) SetHost(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetHost(v) + }) +} + +// UpdateHost sets the "host" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateHost() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateHost() + }) +} + +// SetPort sets the "port" field. +func (u *ProxyUpsertBulk) SetPort(v int) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetPort(v) + }) +} + +// AddPort adds v to the "port" field. +func (u *ProxyUpsertBulk) AddPort(v int) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.AddPort(v) + }) +} + +// UpdatePort sets the "port" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdatePort() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePort() + }) +} + +// SetUsername sets the "username" field. +func (u *ProxyUpsertBulk) SetUsername(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateUsername() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateUsername() + }) +} + +// ClearUsername clears the value of the "username" field. +func (u *ProxyUpsertBulk) ClearUsername() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearUsername() + }) +} + +// SetPassword sets the "password" field. +func (u *ProxyUpsertBulk) SetPassword(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetPassword(v) + }) +} + +// UpdatePassword sets the "password" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdatePassword() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdatePassword() + }) +} + +// ClearPassword clears the value of the "password" field. +func (u *ProxyUpsertBulk) ClearPassword() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.ClearPassword() + }) +} + +// SetStatus sets the "status" field. +func (u *ProxyUpsertBulk) SetStatus(v string) *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *ProxyUpsertBulk) UpdateStatus() *ProxyUpsertBulk { + return u.Update(func(s *ProxyUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *ProxyUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ProxyCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ProxyCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ProxyUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/proxy_delete.go b/backend/ent/proxy_delete.go new file mode 100644 index 00000000..eeeea58b --- /dev/null +++ b/backend/ent/proxy_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyDelete is the builder for deleting a Proxy entity. +type ProxyDelete struct { + config + hooks []Hook + mutation *ProxyMutation +} + +// Where appends a list predicates to the ProxyDelete builder. +func (_d *ProxyDelete) Where(ps ...predicate.Proxy) *ProxyDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *ProxyDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ProxyDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *ProxyDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(proxy.Table, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// ProxyDeleteOne is the builder for deleting a single Proxy entity. +type ProxyDeleteOne struct { + _d *ProxyDelete +} + +// Where appends a list predicates to the ProxyDelete builder. +func (_d *ProxyDeleteOne) Where(ps ...predicate.Proxy) *ProxyDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *ProxyDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{proxy.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *ProxyDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/proxy_query.go b/backend/ent/proxy_query.go new file mode 100644 index 00000000..b817d139 --- /dev/null +++ b/backend/ent/proxy_query.go @@ -0,0 +1,646 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyQuery is the builder for querying Proxy entities. +type ProxyQuery struct { + config + ctx *QueryContext + order []proxy.OrderOption + inters []Interceptor + predicates []predicate.Proxy + withAccounts *AccountQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ProxyQuery builder. +func (_q *ProxyQuery) Where(ps ...predicate.Proxy) *ProxyQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *ProxyQuery) Limit(limit int) *ProxyQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *ProxyQuery) Offset(offset int) *ProxyQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *ProxyQuery) Unique(unique bool) *ProxyQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *ProxyQuery) Order(o ...proxy.OrderOption) *ProxyQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAccounts chains the current query on the "accounts" edge. +func (_q *ProxyQuery) QueryAccounts() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(proxy.Table, proxy.FieldID, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, proxy.AccountsTable, proxy.AccountsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Proxy entity from the query. +// Returns a *NotFoundError when no Proxy was found. +func (_q *ProxyQuery) First(ctx context.Context) (*Proxy, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{proxy.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *ProxyQuery) FirstX(ctx context.Context) *Proxy { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Proxy ID from the query. +// Returns a *NotFoundError when no Proxy ID was found. +func (_q *ProxyQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{proxy.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *ProxyQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Proxy entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Proxy entity is found. +// Returns a *NotFoundError when no Proxy entities are found. +func (_q *ProxyQuery) Only(ctx context.Context) (*Proxy, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{proxy.Label} + default: + return nil, &NotSingularError{proxy.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *ProxyQuery) OnlyX(ctx context.Context) *Proxy { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Proxy ID in the query. +// Returns a *NotSingularError when more than one Proxy ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *ProxyQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{proxy.Label} + default: + err = &NotSingularError{proxy.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *ProxyQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Proxies. +func (_q *ProxyQuery) All(ctx context.Context) ([]*Proxy, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Proxy, *ProxyQuery]() + return withInterceptors[[]*Proxy](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *ProxyQuery) AllX(ctx context.Context) []*Proxy { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Proxy IDs. +func (_q *ProxyQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(proxy.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *ProxyQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *ProxyQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*ProxyQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *ProxyQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *ProxyQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *ProxyQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ProxyQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *ProxyQuery) Clone() *ProxyQuery { + if _q == nil { + return nil + } + return &ProxyQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]proxy.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Proxy{}, _q.predicates...), + withAccounts: _q.withAccounts.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAccounts tells the query-builder to eager-load the nodes that are connected to +// the "accounts" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *ProxyQuery) WithAccounts(opts ...func(*AccountQuery)) *ProxyQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccounts = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Proxy.Query(). +// GroupBy(proxy.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *ProxyQuery) GroupBy(field string, fields ...string) *ProxyGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &ProxyGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = proxy.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.Proxy.Query(). +// Select(proxy.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *ProxyQuery) Select(fields ...string) *ProxySelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &ProxySelect{ProxyQuery: _q} + sbuild.label = proxy.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ProxySelect configured with the given aggregations. +func (_q *ProxyQuery) Aggregate(fns ...AggregateFunc) *ProxySelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *ProxyQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !proxy.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *ProxyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proxy, error) { + var ( + nodes = []*Proxy{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withAccounts != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Proxy).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Proxy{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAccounts; query != nil { + if err := _q.loadAccounts(ctx, query, nodes, + func(n *Proxy) { n.Edges.Accounts = []*Account{} }, + func(n *Proxy, e *Account) { n.Edges.Accounts = append(n.Edges.Accounts, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *ProxyQuery) loadAccounts(ctx context.Context, query *AccountQuery, nodes []*Proxy, init func(*Proxy), assign func(*Proxy, *Account)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*Proxy) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(account.FieldProxyID) + } + query.Where(predicate.Account(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(proxy.AccountsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.ProxyID + if fk == nil { + return fmt.Errorf(`foreign-key "proxy_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "proxy_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *ProxyQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID) + for i := range fields { + if fields[i] != proxy.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(proxy.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = proxy.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *ProxyQuery) ForUpdate(opts ...sql.LockOption) *ProxyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *ProxyQuery) ForShare(opts ...sql.LockOption) *ProxyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// ProxyGroupBy is the group-by builder for Proxy entities. +type ProxyGroupBy struct { + selector + build *ProxyQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *ProxyGroupBy) Aggregate(fns ...AggregateFunc) *ProxyGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *ProxyGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ProxyQuery, *ProxyGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *ProxyGroupBy) sqlScan(ctx context.Context, root *ProxyQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ProxySelect is the builder for selecting fields of Proxy entities. +type ProxySelect struct { + *ProxyQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *ProxySelect) Aggregate(fns ...AggregateFunc) *ProxySelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *ProxySelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ProxyQuery, *ProxySelect](ctx, _s.ProxyQuery, _s, _s.inters, v) +} + +func (_s *ProxySelect) sqlScan(ctx context.Context, root *ProxyQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/proxy_update.go b/backend/ent/proxy_update.go new file mode 100644 index 00000000..d487857f --- /dev/null +++ b/backend/ent/proxy_update.go @@ -0,0 +1,809 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/proxy" +) + +// ProxyUpdate is the builder for updating Proxy entities. +type ProxyUpdate struct { + config + hooks []Hook + mutation *ProxyMutation +} + +// Where appends a list predicates to the ProxyUpdate builder. +func (_u *ProxyUpdate) Where(ps ...predicate.Proxy) *ProxyUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ProxyUpdate) SetUpdatedAt(v time.Time) *ProxyUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ProxyUpdate) SetDeletedAt(v time.Time) *ProxyUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableDeletedAt(v *time.Time) *ProxyUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ProxyUpdate) ClearDeletedAt() *ProxyUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *ProxyUpdate) SetName(v string) *ProxyUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableName(v *string) *ProxyUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProtocol sets the "protocol" field. +func (_u *ProxyUpdate) SetProtocol(v string) *ProxyUpdate { + _u.mutation.SetProtocol(v) + return _u +} + +// SetNillableProtocol sets the "protocol" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableProtocol(v *string) *ProxyUpdate { + if v != nil { + _u.SetProtocol(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *ProxyUpdate) SetHost(v string) *ProxyUpdate { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableHost(v *string) *ProxyUpdate { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// SetPort sets the "port" field. +func (_u *ProxyUpdate) SetPort(v int) *ProxyUpdate { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillablePort(v *int) *ProxyUpdate { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *ProxyUpdate) AddPort(v int) *ProxyUpdate { + _u.mutation.AddPort(v) + return _u +} + +// SetUsername sets the "username" field. +func (_u *ProxyUpdate) SetUsername(v string) *ProxyUpdate { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableUsername(v *string) *ProxyUpdate { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *ProxyUpdate) ClearUsername() *ProxyUpdate { + _u.mutation.ClearUsername() + return _u +} + +// SetPassword sets the "password" field. +func (_u *ProxyUpdate) SetPassword(v string) *ProxyUpdate { + _u.mutation.SetPassword(v) + return _u +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillablePassword(v *string) *ProxyUpdate { + if v != nil { + _u.SetPassword(*v) + } + return _u +} + +// ClearPassword clears the value of the "password" field. +func (_u *ProxyUpdate) ClearPassword() *ProxyUpdate { + _u.mutation.ClearPassword() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ProxyUpdate) SetStatus(v string) *ProxyUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ProxyUpdate) SetNillableStatus(v *string) *ProxyUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *ProxyUpdate) AddAccountIDs(ids ...int64) *ProxyUpdate { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *ProxyUpdate) AddAccounts(v ...*Account) *ProxyUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// Mutation returns the ProxyMutation object of the builder. +func (_u *ProxyUpdate) Mutation() *ProxyMutation { + return _u.mutation +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *ProxyUpdate) ClearAccounts() *ProxyUpdate { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *ProxyUpdate) RemoveAccountIDs(ids ...int64) *ProxyUpdate { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *ProxyUpdate) RemoveAccounts(v ...*Account) *ProxyUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *ProxyUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ProxyUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *ProxyUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ProxyUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ProxyUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if proxy.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ProxyUpdate) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if v, ok := _u.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if v, ok := _u.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _u.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_u *ProxyUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(proxy.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(proxy.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + } + if _u.mutation.PasswordCleared() { + _spec.ClearField(proxy.FieldPassword, field.TypeString) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{proxy.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// ProxyUpdateOne is the builder for updating a single Proxy entity. +type ProxyUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ProxyMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *ProxyUpdateOne) SetUpdatedAt(v time.Time) *ProxyUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *ProxyUpdateOne) SetDeletedAt(v time.Time) *ProxyUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableDeletedAt(v *time.Time) *ProxyUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *ProxyUpdateOne) ClearDeletedAt() *ProxyUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetName sets the "name" field. +func (_u *ProxyUpdateOne) SetName(v string) *ProxyUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableName(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetProtocol sets the "protocol" field. +func (_u *ProxyUpdateOne) SetProtocol(v string) *ProxyUpdateOne { + _u.mutation.SetProtocol(v) + return _u +} + +// SetNillableProtocol sets the "protocol" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableProtocol(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetProtocol(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *ProxyUpdateOne) SetHost(v string) *ProxyUpdateOne { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableHost(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// SetPort sets the "port" field. +func (_u *ProxyUpdateOne) SetPort(v int) *ProxyUpdateOne { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillablePort(v *int) *ProxyUpdateOne { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *ProxyUpdateOne) AddPort(v int) *ProxyUpdateOne { + _u.mutation.AddPort(v) + return _u +} + +// SetUsername sets the "username" field. +func (_u *ProxyUpdateOne) SetUsername(v string) *ProxyUpdateOne { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableUsername(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *ProxyUpdateOne) ClearUsername() *ProxyUpdateOne { + _u.mutation.ClearUsername() + return _u +} + +// SetPassword sets the "password" field. +func (_u *ProxyUpdateOne) SetPassword(v string) *ProxyUpdateOne { + _u.mutation.SetPassword(v) + return _u +} + +// SetNillablePassword sets the "password" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillablePassword(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetPassword(*v) + } + return _u +} + +// ClearPassword clears the value of the "password" field. +func (_u *ProxyUpdateOne) ClearPassword() *ProxyUpdateOne { + _u.mutation.ClearPassword() + return _u +} + +// SetStatus sets the "status" field. +func (_u *ProxyUpdateOne) SetStatus(v string) *ProxyUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *ProxyUpdateOne) SetNillableStatus(v *string) *ProxyUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// AddAccountIDs adds the "accounts" edge to the Account entity by IDs. +func (_u *ProxyUpdateOne) AddAccountIDs(ids ...int64) *ProxyUpdateOne { + _u.mutation.AddAccountIDs(ids...) + return _u +} + +// AddAccounts adds the "accounts" edges to the Account entity. +func (_u *ProxyUpdateOne) AddAccounts(v ...*Account) *ProxyUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAccountIDs(ids...) +} + +// Mutation returns the ProxyMutation object of the builder. +func (_u *ProxyUpdateOne) Mutation() *ProxyMutation { + return _u.mutation +} + +// ClearAccounts clears all "accounts" edges to the Account entity. +func (_u *ProxyUpdateOne) ClearAccounts() *ProxyUpdateOne { + _u.mutation.ClearAccounts() + return _u +} + +// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs. +func (_u *ProxyUpdateOne) RemoveAccountIDs(ids ...int64) *ProxyUpdateOne { + _u.mutation.RemoveAccountIDs(ids...) + return _u +} + +// RemoveAccounts removes "accounts" edges to Account entities. +func (_u *ProxyUpdateOne) RemoveAccounts(v ...*Account) *ProxyUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAccountIDs(ids...) +} + +// Where appends a list predicates to the ProxyUpdate builder. +func (_u *ProxyUpdateOne) Where(ps ...predicate.Proxy) *ProxyUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *ProxyUpdateOne) Select(field string, fields ...string) *ProxyUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Proxy entity. +func (_u *ProxyUpdateOne) Save(ctx context.Context) (*Proxy, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *ProxyUpdateOne) SaveX(ctx context.Context) *Proxy { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *ProxyUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *ProxyUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *ProxyUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if proxy.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := proxy.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *ProxyUpdateOne) check() error { + if v, ok := _u.mutation.Name(); ok { + if err := proxy.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)} + } + } + if v, ok := _u.mutation.Protocol(); ok { + if err := proxy.ProtocolValidator(v); err != nil { + return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)} + } + } + if v, ok := _u.mutation.Host(); ok { + if err := proxy.HostValidator(v); err != nil { + return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := proxy.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)} + } + } + if v, ok := _u.mutation.Password(); ok { + if err := proxy.PasswordValidator(v); err != nil { + return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := proxy.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)} + } + } + return nil +} + +func (_u *ProxyUpdateOne) sqlSave(ctx context.Context) (_node *Proxy, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Proxy.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID) + for _, f := range fields { + if !proxy.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != proxy.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(proxy.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(proxy.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Protocol(); ok { + _spec.SetField(proxy.FieldProtocol, field.TypeString, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(proxy.FieldHost, field.TypeString, value) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(proxy.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(proxy.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(proxy.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.Password(); ok { + _spec.SetField(proxy.FieldPassword, field.TypeString, value) + } + if _u.mutation.PasswordCleared() { + _spec.ClearField(proxy.FieldPassword, field.TypeString) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(proxy.FieldStatus, field.TypeString, value) + } + if _u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: proxy.AccountsTable, + Columns: []string{proxy.AccountsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Proxy{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{proxy.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/redeemcode.go b/backend/ent/redeemcode.go new file mode 100644 index 00000000..24cd4231 --- /dev/null +++ b/backend/ent/redeemcode.go @@ -0,0 +1,267 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCode is the model entity for the RedeemCode schema. +type RedeemCode struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // Code holds the value of the "code" field. + Code string `json:"code,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Value holds the value of the "value" field. + Value float64 `json:"value,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // UsedBy holds the value of the "used_by" field. + UsedBy *int64 `json:"used_by,omitempty"` + // UsedAt holds the value of the "used_at" field. + UsedAt *time.Time `json:"used_at,omitempty"` + // Notes holds the value of the "notes" field. + Notes *string `json:"notes,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID *int64 `json:"group_id,omitempty"` + // ValidityDays holds the value of the "validity_days" field. + ValidityDays int `json:"validity_days,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the RedeemCodeQuery when eager-loading is set. + Edges RedeemCodeEdges `json:"edges"` + selectValues sql.SelectValues +} + +// RedeemCodeEdges holds the relations/edges for other nodes in the graph. +type RedeemCodeEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e RedeemCodeEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e RedeemCodeEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*RedeemCode) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case redeemcode.FieldValue: + values[i] = new(sql.NullFloat64) + case redeemcode.FieldID, redeemcode.FieldUsedBy, redeemcode.FieldGroupID, redeemcode.FieldValidityDays: + values[i] = new(sql.NullInt64) + case redeemcode.FieldCode, redeemcode.FieldType, redeemcode.FieldStatus, redeemcode.FieldNotes: + values[i] = new(sql.NullString) + case redeemcode.FieldUsedAt, redeemcode.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the RedeemCode fields. +func (_m *RedeemCode) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case redeemcode.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case redeemcode.FieldCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code", values[i]) + } else if value.Valid { + _m.Code = value.String + } + case redeemcode.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case redeemcode.FieldValue: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.Float64 + } + case redeemcode.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case redeemcode.FieldUsedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field used_by", values[i]) + } else if value.Valid { + _m.UsedBy = new(int64) + *_m.UsedBy = value.Int64 + } + case redeemcode.FieldUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field used_at", values[i]) + } else if value.Valid { + _m.UsedAt = new(time.Time) + *_m.UsedAt = value.Time + } + case redeemcode.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + case redeemcode.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case redeemcode.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = new(int64) + *_m.GroupID = value.Int64 + } + case redeemcode.FieldValidityDays: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field validity_days", values[i]) + } else if value.Valid { + _m.ValidityDays = int(value.Int64) + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the RedeemCode. +// This includes values selected through modifiers, order, etc. +func (_m *RedeemCode) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the RedeemCode entity. +func (_m *RedeemCode) QueryUser() *UserQuery { + return NewRedeemCodeClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the RedeemCode entity. +func (_m *RedeemCode) QueryGroup() *GroupQuery { + return NewRedeemCodeClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this RedeemCode. +// Note that you need to call RedeemCode.Unwrap() before calling this method if this RedeemCode +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *RedeemCode) Update() *RedeemCodeUpdateOne { + return NewRedeemCodeClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the RedeemCode entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *RedeemCode) Unwrap() *RedeemCode { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: RedeemCode is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *RedeemCode) String() string { + var builder strings.Builder + builder.WriteString("RedeemCode(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("code=") + builder.WriteString(_m.Code) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(fmt.Sprintf("%v", _m.Value)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.UsedBy; v != nil { + builder.WriteString("used_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.UsedAt; v != nil { + builder.WriteString("used_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.GroupID; v != nil { + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("validity_days=") + builder.WriteString(fmt.Sprintf("%v", _m.ValidityDays)) + builder.WriteByte(')') + return builder.String() +} + +// RedeemCodes is a parsable slice of RedeemCode. +type RedeemCodes []*RedeemCode diff --git a/backend/ent/redeemcode/redeemcode.go b/backend/ent/redeemcode/redeemcode.go new file mode 100644 index 00000000..b010476c --- /dev/null +++ b/backend/ent/redeemcode/redeemcode.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package redeemcode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the redeemcode type in the database. + Label = "redeem_code" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCode holds the string denoting the code field in the database. + FieldCode = "code" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldUsedBy holds the string denoting the used_by field in the database. + FieldUsedBy = "used_by" + // FieldUsedAt holds the string denoting the used_at field in the database. + FieldUsedAt = "used_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldValidityDays holds the string denoting the validity_days field in the database. + FieldValidityDays = "validity_days" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // Table holds the table name of the redeemcode in the database. + Table = "redeem_codes" + // UserTable is the table that holds the user relation/edge. + UserTable = "redeem_codes" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "used_by" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "redeem_codes" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for redeemcode fields. +var Columns = []string{ + FieldID, + FieldCode, + FieldType, + FieldValue, + FieldStatus, + FieldUsedBy, + FieldUsedAt, + FieldNotes, + FieldCreatedAt, + FieldGroupID, + FieldValidityDays, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // CodeValidator is a validator for the "code" field. It is called by the builders before save. + CodeValidator func(string) error + // DefaultType holds the default value on creation for the "type" field. + DefaultType string + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultValue holds the default value on creation for the "value" field. + DefaultValue float64 + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultValidityDays holds the default value on creation for the "validity_days" field. + DefaultValidityDays int +) + +// OrderOption defines the ordering options for the RedeemCode queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCode orders the results by the code field. +func ByCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCode, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUsedBy orders the results by the used_by field. +func ByUsedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedBy, opts...).ToFunc() +} + +// ByUsedAt orders the results by the used_at field. +func ByUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByValidityDays orders the results by the validity_days field. +func ByValidityDays(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValidityDays, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/redeemcode/where.go b/backend/ent/redeemcode/where.go new file mode 100644 index 00000000..1fdedba5 --- /dev/null +++ b/backend/ent/redeemcode/where.go @@ -0,0 +1,667 @@ +// Code generated by ent, DO NOT EDIT. + +package redeemcode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldID, id)) +} + +// Code applies equality check predicate on the "code" field. It's identical to CodeEQ. +func Code(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCode, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldType, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValue, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v)) +} + +// UsedBy applies equality check predicate on the "used_by" field. It's identical to UsedByEQ. +func UsedBy(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v)) +} + +// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ. +func UsedAt(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v)) +} + +// ValidityDays applies equality check predicate on the "validity_days" field. It's identical to ValidityDaysEQ. +func ValidityDays(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v)) +} + +// CodeEQ applies the EQ predicate on the "code" field. +func CodeEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCode, v)) +} + +// CodeNEQ applies the NEQ predicate on the "code" field. +func CodeNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldCode, v)) +} + +// CodeIn applies the In predicate on the "code" field. +func CodeIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldCode, vs...)) +} + +// CodeNotIn applies the NotIn predicate on the "code" field. +func CodeNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldCode, vs...)) +} + +// CodeGT applies the GT predicate on the "code" field. +func CodeGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldCode, v)) +} + +// CodeGTE applies the GTE predicate on the "code" field. +func CodeGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldCode, v)) +} + +// CodeLT applies the LT predicate on the "code" field. +func CodeLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldCode, v)) +} + +// CodeLTE applies the LTE predicate on the "code" field. +func CodeLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldCode, v)) +} + +// CodeContains applies the Contains predicate on the "code" field. +func CodeContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldCode, v)) +} + +// CodeHasPrefix applies the HasPrefix predicate on the "code" field. +func CodeHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldCode, v)) +} + +// CodeHasSuffix applies the HasSuffix predicate on the "code" field. +func CodeHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldCode, v)) +} + +// CodeEqualFold applies the EqualFold predicate on the "code" field. +func CodeEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldCode, v)) +} + +// CodeContainsFold applies the ContainsFold predicate on the "code" field. +func CodeContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldCode, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldType, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v float64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldValue, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldStatus, v)) +} + +// UsedByEQ applies the EQ predicate on the "used_by" field. +func UsedByEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v)) +} + +// UsedByNEQ applies the NEQ predicate on the "used_by" field. +func UsedByNEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldUsedBy, v)) +} + +// UsedByIn applies the In predicate on the "used_by" field. +func UsedByIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldUsedBy, vs...)) +} + +// UsedByNotIn applies the NotIn predicate on the "used_by" field. +func UsedByNotIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldUsedBy, vs...)) +} + +// UsedByIsNil applies the IsNil predicate on the "used_by" field. +func UsedByIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldUsedBy)) +} + +// UsedByNotNil applies the NotNil predicate on the "used_by" field. +func UsedByNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldUsedBy)) +} + +// UsedAtEQ applies the EQ predicate on the "used_at" field. +func UsedAtEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v)) +} + +// UsedAtNEQ applies the NEQ predicate on the "used_at" field. +func UsedAtNEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldUsedAt, v)) +} + +// UsedAtIn applies the In predicate on the "used_at" field. +func UsedAtIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldUsedAt, vs...)) +} + +// UsedAtNotIn applies the NotIn predicate on the "used_at" field. +func UsedAtNotIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldUsedAt, vs...)) +} + +// UsedAtGT applies the GT predicate on the "used_at" field. +func UsedAtGT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldUsedAt, v)) +} + +// UsedAtGTE applies the GTE predicate on the "used_at" field. +func UsedAtGTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldUsedAt, v)) +} + +// UsedAtLT applies the LT predicate on the "used_at" field. +func UsedAtLT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldUsedAt, v)) +} + +// UsedAtLTE applies the LTE predicate on the "used_at" field. +func UsedAtLTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldUsedAt, v)) +} + +// UsedAtIsNil applies the IsNil predicate on the "used_at" field. +func UsedAtIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldUsedAt)) +} + +// UsedAtNotNil applies the NotNil predicate on the "used_at" field. +func UsedAtNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldUsedAt)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldContainsFold(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldCreatedAt, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDIsNil applies the IsNil predicate on the "group_id" field. +func GroupIDIsNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIsNull(FieldGroupID)) +} + +// GroupIDNotNil applies the NotNil predicate on the "group_id" field. +func GroupIDNotNil() predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotNull(FieldGroupID)) +} + +// ValidityDaysEQ applies the EQ predicate on the "validity_days" field. +func ValidityDaysEQ(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v)) +} + +// ValidityDaysNEQ applies the NEQ predicate on the "validity_days" field. +func ValidityDaysNEQ(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNEQ(FieldValidityDays, v)) +} + +// ValidityDaysIn applies the In predicate on the "validity_days" field. +func ValidityDaysIn(vs ...int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldIn(FieldValidityDays, vs...)) +} + +// ValidityDaysNotIn applies the NotIn predicate on the "validity_days" field. +func ValidityDaysNotIn(vs ...int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldNotIn(FieldValidityDays, vs...)) +} + +// ValidityDaysGT applies the GT predicate on the "validity_days" field. +func ValidityDaysGT(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGT(FieldValidityDays, v)) +} + +// ValidityDaysGTE applies the GTE predicate on the "validity_days" field. +func ValidityDaysGTE(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldGTE(FieldValidityDays, v)) +} + +// ValidityDaysLT applies the LT predicate on the "validity_days" field. +func ValidityDaysLT(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLT(FieldValidityDays, v)) +} + +// ValidityDaysLTE applies the LTE predicate on the "validity_days" field. +func ValidityDaysLTE(v int) predicate.RedeemCode { + return predicate.RedeemCode(sql.FieldLTE(FieldValidityDays, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.RedeemCode { + return predicate.RedeemCode(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.RedeemCode) predicate.RedeemCode { + return predicate.RedeemCode(sql.NotPredicates(p)) +} diff --git a/backend/ent/redeemcode_create.go b/backend/ent/redeemcode_create.go new file mode 100644 index 00000000..efdcee40 --- /dev/null +++ b/backend/ent/redeemcode_create.go @@ -0,0 +1,1177 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeCreate is the builder for creating a RedeemCode entity. +type RedeemCodeCreate struct { + config + mutation *RedeemCodeMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCode sets the "code" field. +func (_c *RedeemCodeCreate) SetCode(v string) *RedeemCodeCreate { + _c.mutation.SetCode(v) + return _c +} + +// SetType sets the "type" field. +func (_c *RedeemCodeCreate) SetType(v string) *RedeemCodeCreate { + _c.mutation.SetType(v) + return _c +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableType(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetType(*v) + } + return _c +} + +// SetValue sets the "value" field. +func (_c *RedeemCodeCreate) SetValue(v float64) *RedeemCodeCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableValue(v *float64) *RedeemCodeCreate { + if v != nil { + _c.SetValue(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *RedeemCodeCreate) SetStatus(v string) *RedeemCodeCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableStatus(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetUsedBy sets the "used_by" field. +func (_c *RedeemCodeCreate) SetUsedBy(v int64) *RedeemCodeCreate { + _c.mutation.SetUsedBy(v) + return _c +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUsedBy(v *int64) *RedeemCodeCreate { + if v != nil { + _c.SetUsedBy(*v) + } + return _c +} + +// SetUsedAt sets the "used_at" field. +func (_c *RedeemCodeCreate) SetUsedAt(v time.Time) *RedeemCodeCreate { + _c.mutation.SetUsedAt(v) + return _c +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUsedAt(v *time.Time) *RedeemCodeCreate { + if v != nil { + _c.SetUsedAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *RedeemCodeCreate) SetNotes(v string) *RedeemCodeCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableNotes(v *string) *RedeemCodeCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *RedeemCodeCreate) SetCreatedAt(v time.Time) *RedeemCodeCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableCreatedAt(v *time.Time) *RedeemCodeCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *RedeemCodeCreate) SetGroupID(v int64) *RedeemCodeCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableGroupID(v *int64) *RedeemCodeCreate { + if v != nil { + _c.SetGroupID(*v) + } + return _c +} + +// SetValidityDays sets the "validity_days" field. +func (_c *RedeemCodeCreate) SetValidityDays(v int) *RedeemCodeCreate { + _c.mutation.SetValidityDays(v) + return _c +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableValidityDays(v *int) *RedeemCodeCreate { + if v != nil { + _c.SetValidityDays(*v) + } + return _c +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_c *RedeemCodeCreate) SetUserID(id int64) *RedeemCodeCreate { + _c.mutation.SetUserID(id) + return _c +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_c *RedeemCodeCreate) SetNillableUserID(id *int64) *RedeemCodeCreate { + if id != nil { + _c = _c.SetUserID(*id) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *RedeemCodeCreate) SetUser(v *User) *RedeemCodeCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *RedeemCodeCreate) SetGroup(v *Group) *RedeemCodeCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_c *RedeemCodeCreate) Mutation() *RedeemCodeMutation { + return _c.mutation +} + +// Save creates the RedeemCode in the database. +func (_c *RedeemCodeCreate) Save(ctx context.Context) (*RedeemCode, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *RedeemCodeCreate) SaveX(ctx context.Context) *RedeemCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *RedeemCodeCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *RedeemCodeCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *RedeemCodeCreate) defaults() { + if _, ok := _c.mutation.GetType(); !ok { + v := redeemcode.DefaultType + _c.mutation.SetType(v) + } + if _, ok := _c.mutation.Value(); !ok { + v := redeemcode.DefaultValue + _c.mutation.SetValue(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := redeemcode.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := redeemcode.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.ValidityDays(); !ok { + v := redeemcode.DefaultValidityDays + _c.mutation.SetValidityDays(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *RedeemCodeCreate) check() error { + if _, ok := _c.mutation.Code(); !ok { + return &ValidationError{Name: "code", err: errors.New(`ent: missing required field "RedeemCode.code"`)} + } + if v, ok := _c.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "RedeemCode.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "RedeemCode.value"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "RedeemCode.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "RedeemCode.created_at"`)} + } + if _, ok := _c.mutation.ValidityDays(); !ok { + return &ValidationError{Name: "validity_days", err: errors.New(`ent: missing required field "RedeemCode.validity_days"`)} + } + return nil +} + +func (_c *RedeemCodeCreate) sqlSave(ctx context.Context) (*RedeemCode, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *RedeemCodeCreate) createSpec() (*RedeemCode, *sqlgraph.CreateSpec) { + var ( + _node = &RedeemCode{config: _c.config} + _spec = sqlgraph.NewCreateSpec(redeemcode.Table, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + _node.Code = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + _node.Value = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + _node.UsedAt = &value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(redeemcode.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + _node.ValidityDays = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UsedBy = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.RedeemCode.Create(). +// SetCode(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.RedeemCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *RedeemCodeCreate) OnConflict(opts ...sql.ConflictOption) *RedeemCodeUpsertOne { + _c.conflict = opts + return &RedeemCodeUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *RedeemCodeCreate) OnConflictColumns(columns ...string) *RedeemCodeUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &RedeemCodeUpsertOne{ + create: _c, + } +} + +type ( + // RedeemCodeUpsertOne is the builder for "upsert"-ing + // one RedeemCode node. + RedeemCodeUpsertOne struct { + create *RedeemCodeCreate + } + + // RedeemCodeUpsert is the "OnConflict" setter. + RedeemCodeUpsert struct { + *sql.UpdateSet + } +) + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsert) SetCode(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldCode, v) + return u +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateCode() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldCode) + return u +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsert) SetType(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateType() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldType) + return u +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsert) SetValue(v float64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateValue() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldValue) + return u +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsert) AddValue(v float64) *RedeemCodeUpsert { + u.Add(redeemcode.FieldValue, v) + return u +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsert) SetStatus(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateStatus() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldStatus) + return u +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsert) SetUsedBy(v int64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldUsedBy, v) + return u +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateUsedBy() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldUsedBy) + return u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsert) ClearUsedBy() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldUsedBy) + return u +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsert) SetUsedAt(v time.Time) *RedeemCodeUpsert { + u.Set(redeemcode.FieldUsedAt, v) + return u +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateUsedAt() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldUsedAt) + return u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsert) ClearUsedAt() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldUsedAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsert) SetNotes(v string) *RedeemCodeUpsert { + u.Set(redeemcode.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateNotes() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsert) ClearNotes() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldNotes) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsert) SetGroupID(v int64) *RedeemCodeUpsert { + u.Set(redeemcode.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateGroupID() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldGroupID) + return u +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsert) ClearGroupID() *RedeemCodeUpsert { + u.SetNull(redeemcode.FieldGroupID) + return u +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsert) SetValidityDays(v int) *RedeemCodeUpsert { + u.Set(redeemcode.FieldValidityDays, v) + return u +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsert) UpdateValidityDays() *RedeemCodeUpsert { + u.SetExcluded(redeemcode.FieldValidityDays) + return u +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsert) AddValidityDays(v int) *RedeemCodeUpsert { + u.Add(redeemcode.FieldValidityDays, v) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *RedeemCodeUpsertOne) UpdateNewValues() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(redeemcode.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *RedeemCodeUpsertOne) Ignore() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *RedeemCodeUpsertOne) DoNothing() *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the RedeemCodeCreate.OnConflict +// documentation for more info. +func (u *RedeemCodeUpsertOne) Update(set func(*RedeemCodeUpsert)) *RedeemCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&RedeemCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsertOne) SetCode(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateCode() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateCode() + }) +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsertOne) SetType(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateType() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateType() + }) +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsertOne) SetValue(v float64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValue(v) + }) +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsertOne) AddValue(v float64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateValue() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValue() + }) +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsertOne) SetStatus(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateStatus() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsertOne) SetUsedBy(v int64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedBy(v) + }) +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateUsedBy() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedBy() + }) +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsertOne) ClearUsedBy() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedBy() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsertOne) SetUsedAt(v time.Time) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateUsedAt() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedAt() + }) +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsertOne) ClearUsedAt() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsertOne) SetNotes(v string) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateNotes() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsertOne) ClearNotes() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearNotes() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsertOne) SetGroupID(v int64) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateGroupID() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsertOne) ClearGroupID() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearGroupID() + }) +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsertOne) SetValidityDays(v int) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValidityDays(v) + }) +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsertOne) AddValidityDays(v int) *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValidityDays(v) + }) +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsertOne) UpdateValidityDays() *RedeemCodeUpsertOne { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValidityDays() + }) +} + +// Exec executes the query. +func (u *RedeemCodeUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for RedeemCodeCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *RedeemCodeUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *RedeemCodeUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *RedeemCodeUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// RedeemCodeCreateBulk is the builder for creating many RedeemCode entities in bulk. +type RedeemCodeCreateBulk struct { + config + err error + builders []*RedeemCodeCreate + conflict []sql.ConflictOption +} + +// Save creates the RedeemCode entities in the database. +func (_c *RedeemCodeCreateBulk) Save(ctx context.Context) ([]*RedeemCode, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*RedeemCode, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*RedeemCodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *RedeemCodeCreateBulk) SaveX(ctx context.Context) []*RedeemCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *RedeemCodeCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *RedeemCodeCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.RedeemCode.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.RedeemCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *RedeemCodeCreateBulk) OnConflict(opts ...sql.ConflictOption) *RedeemCodeUpsertBulk { + _c.conflict = opts + return &RedeemCodeUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *RedeemCodeCreateBulk) OnConflictColumns(columns ...string) *RedeemCodeUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &RedeemCodeUpsertBulk{ + create: _c, + } +} + +// RedeemCodeUpsertBulk is the builder for "upsert"-ing +// a bulk of RedeemCode nodes. +type RedeemCodeUpsertBulk struct { + create *RedeemCodeCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *RedeemCodeUpsertBulk) UpdateNewValues() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(redeemcode.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.RedeemCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *RedeemCodeUpsertBulk) Ignore() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *RedeemCodeUpsertBulk) DoNothing() *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the RedeemCodeCreateBulk.OnConflict +// documentation for more info. +func (u *RedeemCodeUpsertBulk) Update(set func(*RedeemCodeUpsert)) *RedeemCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&RedeemCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *RedeemCodeUpsertBulk) SetCode(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateCode() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateCode() + }) +} + +// SetType sets the "type" field. +func (u *RedeemCodeUpsertBulk) SetType(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateType() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateType() + }) +} + +// SetValue sets the "value" field. +func (u *RedeemCodeUpsertBulk) SetValue(v float64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValue(v) + }) +} + +// AddValue adds v to the "value" field. +func (u *RedeemCodeUpsertBulk) AddValue(v float64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateValue() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValue() + }) +} + +// SetStatus sets the "status" field. +func (u *RedeemCodeUpsertBulk) SetStatus(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateStatus() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetUsedBy sets the "used_by" field. +func (u *RedeemCodeUpsertBulk) SetUsedBy(v int64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedBy(v) + }) +} + +// UpdateUsedBy sets the "used_by" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateUsedBy() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedBy() + }) +} + +// ClearUsedBy clears the value of the "used_by" field. +func (u *RedeemCodeUpsertBulk) ClearUsedBy() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedBy() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *RedeemCodeUpsertBulk) SetUsedAt(v time.Time) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateUsedAt() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateUsedAt() + }) +} + +// ClearUsedAt clears the value of the "used_at" field. +func (u *RedeemCodeUpsertBulk) ClearUsedAt() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearUsedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *RedeemCodeUpsertBulk) SetNotes(v string) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateNotes() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *RedeemCodeUpsertBulk) ClearNotes() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearNotes() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *RedeemCodeUpsertBulk) SetGroupID(v int64) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateGroupID() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *RedeemCodeUpsertBulk) ClearGroupID() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.ClearGroupID() + }) +} + +// SetValidityDays sets the "validity_days" field. +func (u *RedeemCodeUpsertBulk) SetValidityDays(v int) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.SetValidityDays(v) + }) +} + +// AddValidityDays adds v to the "validity_days" field. +func (u *RedeemCodeUpsertBulk) AddValidityDays(v int) *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.AddValidityDays(v) + }) +} + +// UpdateValidityDays sets the "validity_days" field to the value that was provided on create. +func (u *RedeemCodeUpsertBulk) UpdateValidityDays() *RedeemCodeUpsertBulk { + return u.Update(func(s *RedeemCodeUpsert) { + s.UpdateValidityDays() + }) +} + +// Exec executes the query. +func (u *RedeemCodeUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the RedeemCodeCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for RedeemCodeCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *RedeemCodeUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/redeemcode_delete.go b/backend/ent/redeemcode_delete.go new file mode 100644 index 00000000..f16ef1e9 --- /dev/null +++ b/backend/ent/redeemcode_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" +) + +// RedeemCodeDelete is the builder for deleting a RedeemCode entity. +type RedeemCodeDelete struct { + config + hooks []Hook + mutation *RedeemCodeMutation +} + +// Where appends a list predicates to the RedeemCodeDelete builder. +func (_d *RedeemCodeDelete) Where(ps ...predicate.RedeemCode) *RedeemCodeDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *RedeemCodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *RedeemCodeDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *RedeemCodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(redeemcode.Table, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// RedeemCodeDeleteOne is the builder for deleting a single RedeemCode entity. +type RedeemCodeDeleteOne struct { + _d *RedeemCodeDelete +} + +// Where appends a list predicates to the RedeemCodeDelete builder. +func (_d *RedeemCodeDeleteOne) Where(ps ...predicate.RedeemCode) *RedeemCodeDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *RedeemCodeDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{redeemcode.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *RedeemCodeDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/redeemcode_query.go b/backend/ent/redeemcode_query.go new file mode 100644 index 00000000..f5b8baef --- /dev/null +++ b/backend/ent/redeemcode_query.go @@ -0,0 +1,724 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeQuery is the builder for querying RedeemCode entities. +type RedeemCodeQuery struct { + config + ctx *QueryContext + order []redeemcode.OrderOption + inters []Interceptor + predicates []predicate.RedeemCode + withUser *UserQuery + withGroup *GroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the RedeemCodeQuery builder. +func (_q *RedeemCodeQuery) Where(ps ...predicate.RedeemCode) *RedeemCodeQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *RedeemCodeQuery) Limit(limit int) *RedeemCodeQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *RedeemCodeQuery) Offset(offset int) *RedeemCodeQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *RedeemCodeQuery) Unique(unique bool) *RedeemCodeQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *RedeemCodeQuery) Order(o ...redeemcode.OrderOption) *RedeemCodeQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *RedeemCodeQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.UserTable, redeemcode.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *RedeemCodeQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.GroupTable, redeemcode.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first RedeemCode entity from the query. +// Returns a *NotFoundError when no RedeemCode was found. +func (_q *RedeemCodeQuery) First(ctx context.Context) (*RedeemCode, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{redeemcode.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *RedeemCodeQuery) FirstX(ctx context.Context) *RedeemCode { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first RedeemCode ID from the query. +// Returns a *NotFoundError when no RedeemCode ID was found. +func (_q *RedeemCodeQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{redeemcode.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *RedeemCodeQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single RedeemCode entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one RedeemCode entity is found. +// Returns a *NotFoundError when no RedeemCode entities are found. +func (_q *RedeemCodeQuery) Only(ctx context.Context) (*RedeemCode, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{redeemcode.Label} + default: + return nil, &NotSingularError{redeemcode.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *RedeemCodeQuery) OnlyX(ctx context.Context) *RedeemCode { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only RedeemCode ID in the query. +// Returns a *NotSingularError when more than one RedeemCode ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *RedeemCodeQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{redeemcode.Label} + default: + err = &NotSingularError{redeemcode.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *RedeemCodeQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of RedeemCodes. +func (_q *RedeemCodeQuery) All(ctx context.Context) ([]*RedeemCode, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*RedeemCode, *RedeemCodeQuery]() + return withInterceptors[[]*RedeemCode](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *RedeemCodeQuery) AllX(ctx context.Context) []*RedeemCode { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of RedeemCode IDs. +func (_q *RedeemCodeQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(redeemcode.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *RedeemCodeQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *RedeemCodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*RedeemCodeQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *RedeemCodeQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *RedeemCodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *RedeemCodeQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the RedeemCodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *RedeemCodeQuery) Clone() *RedeemCodeQuery { + if _q == nil { + return nil + } + return &RedeemCodeQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]redeemcode.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.RedeemCode{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *RedeemCodeQuery) WithUser(opts ...func(*UserQuery)) *RedeemCodeQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *RedeemCodeQuery) WithGroup(opts ...func(*GroupQuery)) *RedeemCodeQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.RedeemCode.Query(). +// GroupBy(redeemcode.FieldCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *RedeemCodeQuery) GroupBy(field string, fields ...string) *RedeemCodeGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &RedeemCodeGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = redeemcode.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// } +// +// client.RedeemCode.Query(). +// Select(redeemcode.FieldCode). +// Scan(ctx, &v) +func (_q *RedeemCodeQuery) Select(fields ...string) *RedeemCodeSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &RedeemCodeSelect{RedeemCodeQuery: _q} + sbuild.label = redeemcode.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a RedeemCodeSelect configured with the given aggregations. +func (_q *RedeemCodeQuery) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *RedeemCodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !redeemcode.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *RedeemCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RedeemCode, error) { + var ( + nodes = []*RedeemCode{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*RedeemCode).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &RedeemCode{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *RedeemCode, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *RedeemCode, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *RedeemCodeQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*RedeemCode) + for i := range nodes { + if nodes[i].UsedBy == nil { + continue + } + fk := *nodes[i].UsedBy + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "used_by" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *RedeemCodeQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*RedeemCode) + for i := range nodes { + if nodes[i].GroupID == nil { + continue + } + fk := *nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *RedeemCodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID) + for i := range fields { + if fields[i] != redeemcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(redeemcode.FieldUsedBy) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(redeemcode.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(redeemcode.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = redeemcode.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *RedeemCodeQuery) ForUpdate(opts ...sql.LockOption) *RedeemCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *RedeemCodeQuery) ForShare(opts ...sql.LockOption) *RedeemCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// RedeemCodeGroupBy is the group-by builder for RedeemCode entities. +type RedeemCodeGroupBy struct { + selector + build *RedeemCodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *RedeemCodeGroupBy) Aggregate(fns ...AggregateFunc) *RedeemCodeGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *RedeemCodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *RedeemCodeGroupBy) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// RedeemCodeSelect is the builder for selecting fields of RedeemCode entities. +type RedeemCodeSelect struct { + *RedeemCodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *RedeemCodeSelect) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *RedeemCodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeSelect](ctx, _s.RedeemCodeQuery, _s, _s.inters, v) +} + +func (_s *RedeemCodeSelect) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/redeemcode_update.go b/backend/ent/redeemcode_update.go new file mode 100644 index 00000000..0f05e06d --- /dev/null +++ b/backend/ent/redeemcode_update.go @@ -0,0 +1,806 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// RedeemCodeUpdate is the builder for updating RedeemCode entities. +type RedeemCodeUpdate struct { + config + hooks []Hook + mutation *RedeemCodeMutation +} + +// Where appends a list predicates to the RedeemCodeUpdate builder. +func (_u *RedeemCodeUpdate) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetCode sets the "code" field. +func (_u *RedeemCodeUpdate) SetCode(v string) *RedeemCodeUpdate { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableCode(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *RedeemCodeUpdate) SetType(v string) *RedeemCodeUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableType(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *RedeemCodeUpdate) SetValue(v float64) *RedeemCodeUpdate { + _u.mutation.ResetValue() + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableValue(v *float64) *RedeemCodeUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// AddValue adds value to the "value" field. +func (_u *RedeemCodeUpdate) AddValue(v float64) *RedeemCodeUpdate { + _u.mutation.AddValue(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *RedeemCodeUpdate) SetStatus(v string) *RedeemCodeUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableStatus(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsedBy sets the "used_by" field. +func (_u *RedeemCodeUpdate) SetUsedBy(v int64) *RedeemCodeUpdate { + _u.mutation.SetUsedBy(v) + return _u +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUsedBy(v *int64) *RedeemCodeUpdate { + if v != nil { + _u.SetUsedBy(*v) + } + return _u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (_u *RedeemCodeUpdate) ClearUsedBy() *RedeemCodeUpdate { + _u.mutation.ClearUsedBy() + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *RedeemCodeUpdate) SetUsedAt(v time.Time) *RedeemCodeUpdate { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdate { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (_u *RedeemCodeUpdate) ClearUsedAt() *RedeemCodeUpdate { + _u.mutation.ClearUsedAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *RedeemCodeUpdate) SetNotes(v string) *RedeemCodeUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableNotes(v *string) *RedeemCodeUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *RedeemCodeUpdate) ClearNotes() *RedeemCodeUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *RedeemCodeUpdate) SetGroupID(v int64) *RedeemCodeUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableGroupID(v *int64) *RedeemCodeUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *RedeemCodeUpdate) ClearGroupID() *RedeemCodeUpdate { + _u.mutation.ClearGroupID() + return _u +} + +// SetValidityDays sets the "validity_days" field. +func (_u *RedeemCodeUpdate) SetValidityDays(v int) *RedeemCodeUpdate { + _u.mutation.ResetValidityDays() + _u.mutation.SetValidityDays(v) + return _u +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableValidityDays(v *int) *RedeemCodeUpdate { + if v != nil { + _u.SetValidityDays(*v) + } + return _u +} + +// AddValidityDays adds value to the "validity_days" field. +func (_u *RedeemCodeUpdate) AddValidityDays(v int) *RedeemCodeUpdate { + _u.mutation.AddValidityDays(v) + return _u +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_u *RedeemCodeUpdate) SetUserID(id int64) *RedeemCodeUpdate { + _u.mutation.SetUserID(id) + return _u +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_u *RedeemCodeUpdate) SetNillableUserID(id *int64) *RedeemCodeUpdate { + if id != nil { + _u = _u.SetUserID(*id) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *RedeemCodeUpdate) SetUser(v *User) *RedeemCodeUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *RedeemCodeUpdate) SetGroup(v *Group) *RedeemCodeUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_u *RedeemCodeUpdate) Mutation() *RedeemCodeMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *RedeemCodeUpdate) ClearUser() *RedeemCodeUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *RedeemCodeUpdate) ClearGroup() *RedeemCodeUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *RedeemCodeUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *RedeemCodeUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *RedeemCodeUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *RedeemCodeUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *RedeemCodeUpdate) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + return nil +} + +func (_u *RedeemCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedValue(); ok { + _spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.UsedAtCleared() { + _spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(redeemcode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedValidityDays(); ok { + _spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{redeemcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// RedeemCodeUpdateOne is the builder for updating a single RedeemCode entity. +type RedeemCodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *RedeemCodeMutation +} + +// SetCode sets the "code" field. +func (_u *RedeemCodeUpdateOne) SetCode(v string) *RedeemCodeUpdateOne { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableCode(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *RedeemCodeUpdateOne) SetType(v string) *RedeemCodeUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableType(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *RedeemCodeUpdateOne) SetValue(v float64) *RedeemCodeUpdateOne { + _u.mutation.ResetValue() + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableValue(v *float64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// AddValue adds value to the "value" field. +func (_u *RedeemCodeUpdateOne) AddValue(v float64) *RedeemCodeUpdateOne { + _u.mutation.AddValue(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *RedeemCodeUpdateOne) SetStatus(v string) *RedeemCodeUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableStatus(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsedBy sets the "used_by" field. +func (_u *RedeemCodeUpdateOne) SetUsedBy(v int64) *RedeemCodeUpdateOne { + _u.mutation.SetUsedBy(v) + return _u +} + +// SetNillableUsedBy sets the "used_by" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUsedBy(v *int64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetUsedBy(*v) + } + return _u +} + +// ClearUsedBy clears the value of the "used_by" field. +func (_u *RedeemCodeUpdateOne) ClearUsedBy() *RedeemCodeUpdateOne { + _u.mutation.ClearUsedBy() + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *RedeemCodeUpdateOne) SetUsedAt(v time.Time) *RedeemCodeUpdateOne { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdateOne { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// ClearUsedAt clears the value of the "used_at" field. +func (_u *RedeemCodeUpdateOne) ClearUsedAt() *RedeemCodeUpdateOne { + _u.mutation.ClearUsedAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *RedeemCodeUpdateOne) SetNotes(v string) *RedeemCodeUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableNotes(v *string) *RedeemCodeUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *RedeemCodeUpdateOne) ClearNotes() *RedeemCodeUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *RedeemCodeUpdateOne) SetGroupID(v int64) *RedeemCodeUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableGroupID(v *int64) *RedeemCodeUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *RedeemCodeUpdateOne) ClearGroupID() *RedeemCodeUpdateOne { + _u.mutation.ClearGroupID() + return _u +} + +// SetValidityDays sets the "validity_days" field. +func (_u *RedeemCodeUpdateOne) SetValidityDays(v int) *RedeemCodeUpdateOne { + _u.mutation.ResetValidityDays() + _u.mutation.SetValidityDays(v) + return _u +} + +// SetNillableValidityDays sets the "validity_days" field if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableValidityDays(v *int) *RedeemCodeUpdateOne { + if v != nil { + _u.SetValidityDays(*v) + } + return _u +} + +// AddValidityDays adds value to the "validity_days" field. +func (_u *RedeemCodeUpdateOne) AddValidityDays(v int) *RedeemCodeUpdateOne { + _u.mutation.AddValidityDays(v) + return _u +} + +// SetUserID sets the "user" edge to the User entity by ID. +func (_u *RedeemCodeUpdateOne) SetUserID(id int64) *RedeemCodeUpdateOne { + _u.mutation.SetUserID(id) + return _u +} + +// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil. +func (_u *RedeemCodeUpdateOne) SetNillableUserID(id *int64) *RedeemCodeUpdateOne { + if id != nil { + _u = _u.SetUserID(*id) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *RedeemCodeUpdateOne) SetUser(v *User) *RedeemCodeUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *RedeemCodeUpdateOne) SetGroup(v *Group) *RedeemCodeUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the RedeemCodeMutation object of the builder. +func (_u *RedeemCodeUpdateOne) Mutation() *RedeemCodeMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *RedeemCodeUpdateOne) ClearUser() *RedeemCodeUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *RedeemCodeUpdateOne) ClearGroup() *RedeemCodeUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the RedeemCodeUpdate builder. +func (_u *RedeemCodeUpdateOne) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *RedeemCodeUpdateOne) Select(field string, fields ...string) *RedeemCodeUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated RedeemCode entity. +func (_u *RedeemCodeUpdateOne) Save(ctx context.Context) (*RedeemCode, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *RedeemCodeUpdateOne) SaveX(ctx context.Context) *RedeemCode { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *RedeemCodeUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *RedeemCodeUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *RedeemCodeUpdateOne) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := redeemcode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := redeemcode.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := redeemcode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)} + } + } + return nil +} + +func (_u *RedeemCodeUpdateOne) sqlSave(ctx context.Context) (_node *RedeemCode, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "RedeemCode.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID) + for _, f := range fields { + if !redeemcode.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != redeemcode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(redeemcode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(redeemcode.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedValue(); ok { + _spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(redeemcode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.UsedAtCleared() { + _spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(redeemcode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(redeemcode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.ValidityDays(); ok { + _spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedValidityDays(); ok { + _spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.UserTable, + Columns: []string{redeemcode.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: redeemcode.GroupTable, + Columns: []string{redeemcode.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &RedeemCode{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{redeemcode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/runtime.go b/backend/ent/runtime.go new file mode 100644 index 00000000..ee3195e2 --- /dev/null +++ b/backend/ent/runtime.go @@ -0,0 +1,5 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +// The schema-stitching logic is generated in github.com/Wei-Shaw/sub2api/ent/runtime/runtime.go diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go new file mode 100644 index 00000000..ed13c852 --- /dev/null +++ b/backend/ent/runtime/runtime.go @@ -0,0 +1,871 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/schema" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + apikeyMixin := schema.APIKey{}.Mixin() + apikeyMixinHooks1 := apikeyMixin[1].Hooks() + apikey.Hooks[0] = apikeyMixinHooks1[0] + apikeyMixinInters1 := apikeyMixin[1].Interceptors() + apikey.Interceptors[0] = apikeyMixinInters1[0] + apikeyMixinFields0 := apikeyMixin[0].Fields() + _ = apikeyMixinFields0 + apikeyFields := schema.APIKey{}.Fields() + _ = apikeyFields + // apikeyDescCreatedAt is the schema descriptor for created_at field. + apikeyDescCreatedAt := apikeyMixinFields0[0].Descriptor() + // apikey.DefaultCreatedAt holds the default value on creation for the created_at field. + apikey.DefaultCreatedAt = apikeyDescCreatedAt.Default.(func() time.Time) + // apikeyDescUpdatedAt is the schema descriptor for updated_at field. + apikeyDescUpdatedAt := apikeyMixinFields0[1].Descriptor() + // apikey.DefaultUpdatedAt holds the default value on creation for the updated_at field. + apikey.DefaultUpdatedAt = apikeyDescUpdatedAt.Default.(func() time.Time) + // apikey.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + apikey.UpdateDefaultUpdatedAt = apikeyDescUpdatedAt.UpdateDefault.(func() time.Time) + // apikeyDescKey is the schema descriptor for key field. + apikeyDescKey := apikeyFields[1].Descriptor() + // apikey.KeyValidator is a validator for the "key" field. It is called by the builders before save. + apikey.KeyValidator = func() func(string) error { + validators := apikeyDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // apikeyDescName is the schema descriptor for name field. + apikeyDescName := apikeyFields[2].Descriptor() + // apikey.NameValidator is a validator for the "name" field. It is called by the builders before save. + apikey.NameValidator = func() func(string) error { + validators := apikeyDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // apikeyDescStatus is the schema descriptor for status field. + apikeyDescStatus := apikeyFields[4].Descriptor() + // apikey.DefaultStatus holds the default value on creation for the status field. + apikey.DefaultStatus = apikeyDescStatus.Default.(string) + // apikey.StatusValidator is a validator for the "status" field. It is called by the builders before save. + apikey.StatusValidator = apikeyDescStatus.Validators[0].(func(string) error) + accountMixin := schema.Account{}.Mixin() + accountMixinHooks1 := accountMixin[1].Hooks() + account.Hooks[0] = accountMixinHooks1[0] + accountMixinInters1 := accountMixin[1].Interceptors() + account.Interceptors[0] = accountMixinInters1[0] + accountMixinFields0 := accountMixin[0].Fields() + _ = accountMixinFields0 + accountFields := schema.Account{}.Fields() + _ = accountFields + // accountDescCreatedAt is the schema descriptor for created_at field. + accountDescCreatedAt := accountMixinFields0[0].Descriptor() + // account.DefaultCreatedAt holds the default value on creation for the created_at field. + account.DefaultCreatedAt = accountDescCreatedAt.Default.(func() time.Time) + // accountDescUpdatedAt is the schema descriptor for updated_at field. + accountDescUpdatedAt := accountMixinFields0[1].Descriptor() + // account.DefaultUpdatedAt holds the default value on creation for the updated_at field. + account.DefaultUpdatedAt = accountDescUpdatedAt.Default.(func() time.Time) + // account.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + account.UpdateDefaultUpdatedAt = accountDescUpdatedAt.UpdateDefault.(func() time.Time) + // accountDescName is the schema descriptor for name field. + accountDescName := accountFields[0].Descriptor() + // account.NameValidator is a validator for the "name" field. It is called by the builders before save. + account.NameValidator = func() func(string) error { + validators := accountDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // accountDescPlatform is the schema descriptor for platform field. + accountDescPlatform := accountFields[2].Descriptor() + // account.PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + account.PlatformValidator = func() func(string) error { + validators := accountDescPlatform.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(platform string) error { + for _, fn := range fns { + if err := fn(platform); err != nil { + return err + } + } + return nil + } + }() + // accountDescType is the schema descriptor for type field. + accountDescType := accountFields[3].Descriptor() + // account.TypeValidator is a validator for the "type" field. It is called by the builders before save. + account.TypeValidator = func() func(string) error { + validators := accountDescType.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_type string) error { + for _, fn := range fns { + if err := fn(_type); err != nil { + return err + } + } + return nil + } + }() + // accountDescCredentials is the schema descriptor for credentials field. + accountDescCredentials := accountFields[4].Descriptor() + // account.DefaultCredentials holds the default value on creation for the credentials field. + account.DefaultCredentials = accountDescCredentials.Default.(func() map[string]interface{}) + // accountDescExtra is the schema descriptor for extra field. + accountDescExtra := accountFields[5].Descriptor() + // account.DefaultExtra holds the default value on creation for the extra field. + account.DefaultExtra = accountDescExtra.Default.(func() map[string]interface{}) + // accountDescConcurrency is the schema descriptor for concurrency field. + accountDescConcurrency := accountFields[7].Descriptor() + // account.DefaultConcurrency holds the default value on creation for the concurrency field. + account.DefaultConcurrency = accountDescConcurrency.Default.(int) + // accountDescPriority is the schema descriptor for priority field. + accountDescPriority := accountFields[8].Descriptor() + // account.DefaultPriority holds the default value on creation for the priority field. + account.DefaultPriority = accountDescPriority.Default.(int) + // accountDescRateMultiplier is the schema descriptor for rate_multiplier field. + accountDescRateMultiplier := accountFields[9].Descriptor() + // account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. + account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64) + // accountDescStatus is the schema descriptor for status field. + accountDescStatus := accountFields[10].Descriptor() + // account.DefaultStatus holds the default value on creation for the status field. + account.DefaultStatus = accountDescStatus.Default.(string) + // account.StatusValidator is a validator for the "status" field. It is called by the builders before save. + account.StatusValidator = accountDescStatus.Validators[0].(func(string) error) + // accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field. + accountDescAutoPauseOnExpired := accountFields[14].Descriptor() + // account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field. + account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool) + // accountDescSchedulable is the schema descriptor for schedulable field. + accountDescSchedulable := accountFields[15].Descriptor() + // account.DefaultSchedulable holds the default value on creation for the schedulable field. + account.DefaultSchedulable = accountDescSchedulable.Default.(bool) + // accountDescSessionWindowStatus is the schema descriptor for session_window_status field. + accountDescSessionWindowStatus := accountFields[21].Descriptor() + // account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. + account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error) + accountgroupFields := schema.AccountGroup{}.Fields() + _ = accountgroupFields + // accountgroupDescPriority is the schema descriptor for priority field. + accountgroupDescPriority := accountgroupFields[2].Descriptor() + // accountgroup.DefaultPriority holds the default value on creation for the priority field. + accountgroup.DefaultPriority = accountgroupDescPriority.Default.(int) + // accountgroupDescCreatedAt is the schema descriptor for created_at field. + accountgroupDescCreatedAt := accountgroupFields[3].Descriptor() + // accountgroup.DefaultCreatedAt holds the default value on creation for the created_at field. + accountgroup.DefaultCreatedAt = accountgroupDescCreatedAt.Default.(func() time.Time) + groupMixin := schema.Group{}.Mixin() + groupMixinHooks1 := groupMixin[1].Hooks() + group.Hooks[0] = groupMixinHooks1[0] + groupMixinInters1 := groupMixin[1].Interceptors() + group.Interceptors[0] = groupMixinInters1[0] + groupMixinFields0 := groupMixin[0].Fields() + _ = groupMixinFields0 + groupFields := schema.Group{}.Fields() + _ = groupFields + // groupDescCreatedAt is the schema descriptor for created_at field. + groupDescCreatedAt := groupMixinFields0[0].Descriptor() + // group.DefaultCreatedAt holds the default value on creation for the created_at field. + group.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time) + // groupDescUpdatedAt is the schema descriptor for updated_at field. + groupDescUpdatedAt := groupMixinFields0[1].Descriptor() + // group.DefaultUpdatedAt holds the default value on creation for the updated_at field. + group.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time) + // group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + group.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time) + // groupDescName is the schema descriptor for name field. + groupDescName := groupFields[0].Descriptor() + // group.NameValidator is a validator for the "name" field. It is called by the builders before save. + group.NameValidator = func() func(string) error { + validators := groupDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // groupDescRateMultiplier is the schema descriptor for rate_multiplier field. + groupDescRateMultiplier := groupFields[2].Descriptor() + // group.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. + group.DefaultRateMultiplier = groupDescRateMultiplier.Default.(float64) + // groupDescIsExclusive is the schema descriptor for is_exclusive field. + groupDescIsExclusive := groupFields[3].Descriptor() + // group.DefaultIsExclusive holds the default value on creation for the is_exclusive field. + group.DefaultIsExclusive = groupDescIsExclusive.Default.(bool) + // groupDescStatus is the schema descriptor for status field. + groupDescStatus := groupFields[4].Descriptor() + // group.DefaultStatus holds the default value on creation for the status field. + group.DefaultStatus = groupDescStatus.Default.(string) + // group.StatusValidator is a validator for the "status" field. It is called by the builders before save. + group.StatusValidator = groupDescStatus.Validators[0].(func(string) error) + // groupDescPlatform is the schema descriptor for platform field. + groupDescPlatform := groupFields[5].Descriptor() + // group.DefaultPlatform holds the default value on creation for the platform field. + group.DefaultPlatform = groupDescPlatform.Default.(string) + // group.PlatformValidator is a validator for the "platform" field. It is called by the builders before save. + group.PlatformValidator = groupDescPlatform.Validators[0].(func(string) error) + // groupDescSubscriptionType is the schema descriptor for subscription_type field. + groupDescSubscriptionType := groupFields[6].Descriptor() + // group.DefaultSubscriptionType holds the default value on creation for the subscription_type field. + group.DefaultSubscriptionType = groupDescSubscriptionType.Default.(string) + // group.SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save. + group.SubscriptionTypeValidator = groupDescSubscriptionType.Validators[0].(func(string) error) + // groupDescDefaultValidityDays is the schema descriptor for default_validity_days field. + groupDescDefaultValidityDays := groupFields[10].Descriptor() + // group.DefaultDefaultValidityDays holds the default value on creation for the default_validity_days field. + group.DefaultDefaultValidityDays = groupDescDefaultValidityDays.Default.(int) + // groupDescClaudeCodeOnly is the schema descriptor for claude_code_only field. + groupDescClaudeCodeOnly := groupFields[14].Descriptor() + // group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field. + group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool) + promocodeFields := schema.PromoCode{}.Fields() + _ = promocodeFields + // promocodeDescCode is the schema descriptor for code field. + promocodeDescCode := promocodeFields[0].Descriptor() + // promocode.CodeValidator is a validator for the "code" field. It is called by the builders before save. + promocode.CodeValidator = func() func(string) error { + validators := promocodeDescCode.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(code string) error { + for _, fn := range fns { + if err := fn(code); err != nil { + return err + } + } + return nil + } + }() + // promocodeDescBonusAmount is the schema descriptor for bonus_amount field. + promocodeDescBonusAmount := promocodeFields[1].Descriptor() + // promocode.DefaultBonusAmount holds the default value on creation for the bonus_amount field. + promocode.DefaultBonusAmount = promocodeDescBonusAmount.Default.(float64) + // promocodeDescMaxUses is the schema descriptor for max_uses field. + promocodeDescMaxUses := promocodeFields[2].Descriptor() + // promocode.DefaultMaxUses holds the default value on creation for the max_uses field. + promocode.DefaultMaxUses = promocodeDescMaxUses.Default.(int) + // promocodeDescUsedCount is the schema descriptor for used_count field. + promocodeDescUsedCount := promocodeFields[3].Descriptor() + // promocode.DefaultUsedCount holds the default value on creation for the used_count field. + promocode.DefaultUsedCount = promocodeDescUsedCount.Default.(int) + // promocodeDescStatus is the schema descriptor for status field. + promocodeDescStatus := promocodeFields[4].Descriptor() + // promocode.DefaultStatus holds the default value on creation for the status field. + promocode.DefaultStatus = promocodeDescStatus.Default.(string) + // promocode.StatusValidator is a validator for the "status" field. It is called by the builders before save. + promocode.StatusValidator = promocodeDescStatus.Validators[0].(func(string) error) + // promocodeDescCreatedAt is the schema descriptor for created_at field. + promocodeDescCreatedAt := promocodeFields[7].Descriptor() + // promocode.DefaultCreatedAt holds the default value on creation for the created_at field. + promocode.DefaultCreatedAt = promocodeDescCreatedAt.Default.(func() time.Time) + // promocodeDescUpdatedAt is the schema descriptor for updated_at field. + promocodeDescUpdatedAt := promocodeFields[8].Descriptor() + // promocode.DefaultUpdatedAt holds the default value on creation for the updated_at field. + promocode.DefaultUpdatedAt = promocodeDescUpdatedAt.Default.(func() time.Time) + // promocode.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + promocode.UpdateDefaultUpdatedAt = promocodeDescUpdatedAt.UpdateDefault.(func() time.Time) + promocodeusageFields := schema.PromoCodeUsage{}.Fields() + _ = promocodeusageFields + // promocodeusageDescUsedAt is the schema descriptor for used_at field. + promocodeusageDescUsedAt := promocodeusageFields[3].Descriptor() + // promocodeusage.DefaultUsedAt holds the default value on creation for the used_at field. + promocodeusage.DefaultUsedAt = promocodeusageDescUsedAt.Default.(func() time.Time) + proxyMixin := schema.Proxy{}.Mixin() + proxyMixinHooks1 := proxyMixin[1].Hooks() + proxy.Hooks[0] = proxyMixinHooks1[0] + proxyMixinInters1 := proxyMixin[1].Interceptors() + proxy.Interceptors[0] = proxyMixinInters1[0] + proxyMixinFields0 := proxyMixin[0].Fields() + _ = proxyMixinFields0 + proxyFields := schema.Proxy{}.Fields() + _ = proxyFields + // proxyDescCreatedAt is the schema descriptor for created_at field. + proxyDescCreatedAt := proxyMixinFields0[0].Descriptor() + // proxy.DefaultCreatedAt holds the default value on creation for the created_at field. + proxy.DefaultCreatedAt = proxyDescCreatedAt.Default.(func() time.Time) + // proxyDescUpdatedAt is the schema descriptor for updated_at field. + proxyDescUpdatedAt := proxyMixinFields0[1].Descriptor() + // proxy.DefaultUpdatedAt holds the default value on creation for the updated_at field. + proxy.DefaultUpdatedAt = proxyDescUpdatedAt.Default.(func() time.Time) + // proxy.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + proxy.UpdateDefaultUpdatedAt = proxyDescUpdatedAt.UpdateDefault.(func() time.Time) + // proxyDescName is the schema descriptor for name field. + proxyDescName := proxyFields[0].Descriptor() + // proxy.NameValidator is a validator for the "name" field. It is called by the builders before save. + proxy.NameValidator = func() func(string) error { + validators := proxyDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // proxyDescProtocol is the schema descriptor for protocol field. + proxyDescProtocol := proxyFields[1].Descriptor() + // proxy.ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save. + proxy.ProtocolValidator = func() func(string) error { + validators := proxyDescProtocol.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(protocol string) error { + for _, fn := range fns { + if err := fn(protocol); err != nil { + return err + } + } + return nil + } + }() + // proxyDescHost is the schema descriptor for host field. + proxyDescHost := proxyFields[2].Descriptor() + // proxy.HostValidator is a validator for the "host" field. It is called by the builders before save. + proxy.HostValidator = func() func(string) error { + validators := proxyDescHost.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(host string) error { + for _, fn := range fns { + if err := fn(host); err != nil { + return err + } + } + return nil + } + }() + // proxyDescUsername is the schema descriptor for username field. + proxyDescUsername := proxyFields[4].Descriptor() + // proxy.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + proxy.UsernameValidator = proxyDescUsername.Validators[0].(func(string) error) + // proxyDescPassword is the schema descriptor for password field. + proxyDescPassword := proxyFields[5].Descriptor() + // proxy.PasswordValidator is a validator for the "password" field. It is called by the builders before save. + proxy.PasswordValidator = proxyDescPassword.Validators[0].(func(string) error) + // proxyDescStatus is the schema descriptor for status field. + proxyDescStatus := proxyFields[6].Descriptor() + // proxy.DefaultStatus holds the default value on creation for the status field. + proxy.DefaultStatus = proxyDescStatus.Default.(string) + // proxy.StatusValidator is a validator for the "status" field. It is called by the builders before save. + proxy.StatusValidator = proxyDescStatus.Validators[0].(func(string) error) + redeemcodeFields := schema.RedeemCode{}.Fields() + _ = redeemcodeFields + // redeemcodeDescCode is the schema descriptor for code field. + redeemcodeDescCode := redeemcodeFields[0].Descriptor() + // redeemcode.CodeValidator is a validator for the "code" field. It is called by the builders before save. + redeemcode.CodeValidator = func() func(string) error { + validators := redeemcodeDescCode.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(code string) error { + for _, fn := range fns { + if err := fn(code); err != nil { + return err + } + } + return nil + } + }() + // redeemcodeDescType is the schema descriptor for type field. + redeemcodeDescType := redeemcodeFields[1].Descriptor() + // redeemcode.DefaultType holds the default value on creation for the type field. + redeemcode.DefaultType = redeemcodeDescType.Default.(string) + // redeemcode.TypeValidator is a validator for the "type" field. It is called by the builders before save. + redeemcode.TypeValidator = redeemcodeDescType.Validators[0].(func(string) error) + // redeemcodeDescValue is the schema descriptor for value field. + redeemcodeDescValue := redeemcodeFields[2].Descriptor() + // redeemcode.DefaultValue holds the default value on creation for the value field. + redeemcode.DefaultValue = redeemcodeDescValue.Default.(float64) + // redeemcodeDescStatus is the schema descriptor for status field. + redeemcodeDescStatus := redeemcodeFields[3].Descriptor() + // redeemcode.DefaultStatus holds the default value on creation for the status field. + redeemcode.DefaultStatus = redeemcodeDescStatus.Default.(string) + // redeemcode.StatusValidator is a validator for the "status" field. It is called by the builders before save. + redeemcode.StatusValidator = redeemcodeDescStatus.Validators[0].(func(string) error) + // redeemcodeDescCreatedAt is the schema descriptor for created_at field. + redeemcodeDescCreatedAt := redeemcodeFields[7].Descriptor() + // redeemcode.DefaultCreatedAt holds the default value on creation for the created_at field. + redeemcode.DefaultCreatedAt = redeemcodeDescCreatedAt.Default.(func() time.Time) + // redeemcodeDescValidityDays is the schema descriptor for validity_days field. + redeemcodeDescValidityDays := redeemcodeFields[9].Descriptor() + // redeemcode.DefaultValidityDays holds the default value on creation for the validity_days field. + redeemcode.DefaultValidityDays = redeemcodeDescValidityDays.Default.(int) + settingFields := schema.Setting{}.Fields() + _ = settingFields + // settingDescKey is the schema descriptor for key field. + settingDescKey := settingFields[0].Descriptor() + // setting.KeyValidator is a validator for the "key" field. It is called by the builders before save. + setting.KeyValidator = func() func(string) error { + validators := settingDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // settingDescUpdatedAt is the schema descriptor for updated_at field. + settingDescUpdatedAt := settingFields[2].Descriptor() + // setting.DefaultUpdatedAt holds the default value on creation for the updated_at field. + setting.DefaultUpdatedAt = settingDescUpdatedAt.Default.(func() time.Time) + // setting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + setting.UpdateDefaultUpdatedAt = settingDescUpdatedAt.UpdateDefault.(func() time.Time) + usagelogFields := schema.UsageLog{}.Fields() + _ = usagelogFields + // usagelogDescRequestID is the schema descriptor for request_id field. + usagelogDescRequestID := usagelogFields[3].Descriptor() + // usagelog.RequestIDValidator is a validator for the "request_id" field. It is called by the builders before save. + usagelog.RequestIDValidator = func() func(string) error { + validators := usagelogDescRequestID.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(request_id string) error { + for _, fn := range fns { + if err := fn(request_id); err != nil { + return err + } + } + return nil + } + }() + // usagelogDescModel is the schema descriptor for model field. + usagelogDescModel := usagelogFields[4].Descriptor() + // usagelog.ModelValidator is a validator for the "model" field. It is called by the builders before save. + usagelog.ModelValidator = func() func(string) error { + validators := usagelogDescModel.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(model string) error { + for _, fn := range fns { + if err := fn(model); err != nil { + return err + } + } + return nil + } + }() + // usagelogDescInputTokens is the schema descriptor for input_tokens field. + usagelogDescInputTokens := usagelogFields[7].Descriptor() + // usagelog.DefaultInputTokens holds the default value on creation for the input_tokens field. + usagelog.DefaultInputTokens = usagelogDescInputTokens.Default.(int) + // usagelogDescOutputTokens is the schema descriptor for output_tokens field. + usagelogDescOutputTokens := usagelogFields[8].Descriptor() + // usagelog.DefaultOutputTokens holds the default value on creation for the output_tokens field. + usagelog.DefaultOutputTokens = usagelogDescOutputTokens.Default.(int) + // usagelogDescCacheCreationTokens is the schema descriptor for cache_creation_tokens field. + usagelogDescCacheCreationTokens := usagelogFields[9].Descriptor() + // usagelog.DefaultCacheCreationTokens holds the default value on creation for the cache_creation_tokens field. + usagelog.DefaultCacheCreationTokens = usagelogDescCacheCreationTokens.Default.(int) + // usagelogDescCacheReadTokens is the schema descriptor for cache_read_tokens field. + usagelogDescCacheReadTokens := usagelogFields[10].Descriptor() + // usagelog.DefaultCacheReadTokens holds the default value on creation for the cache_read_tokens field. + usagelog.DefaultCacheReadTokens = usagelogDescCacheReadTokens.Default.(int) + // usagelogDescCacheCreation5mTokens is the schema descriptor for cache_creation_5m_tokens field. + usagelogDescCacheCreation5mTokens := usagelogFields[11].Descriptor() + // usagelog.DefaultCacheCreation5mTokens holds the default value on creation for the cache_creation_5m_tokens field. + usagelog.DefaultCacheCreation5mTokens = usagelogDescCacheCreation5mTokens.Default.(int) + // usagelogDescCacheCreation1hTokens is the schema descriptor for cache_creation_1h_tokens field. + usagelogDescCacheCreation1hTokens := usagelogFields[12].Descriptor() + // usagelog.DefaultCacheCreation1hTokens holds the default value on creation for the cache_creation_1h_tokens field. + usagelog.DefaultCacheCreation1hTokens = usagelogDescCacheCreation1hTokens.Default.(int) + // usagelogDescInputCost is the schema descriptor for input_cost field. + usagelogDescInputCost := usagelogFields[13].Descriptor() + // usagelog.DefaultInputCost holds the default value on creation for the input_cost field. + usagelog.DefaultInputCost = usagelogDescInputCost.Default.(float64) + // usagelogDescOutputCost is the schema descriptor for output_cost field. + usagelogDescOutputCost := usagelogFields[14].Descriptor() + // usagelog.DefaultOutputCost holds the default value on creation for the output_cost field. + usagelog.DefaultOutputCost = usagelogDescOutputCost.Default.(float64) + // usagelogDescCacheCreationCost is the schema descriptor for cache_creation_cost field. + usagelogDescCacheCreationCost := usagelogFields[15].Descriptor() + // usagelog.DefaultCacheCreationCost holds the default value on creation for the cache_creation_cost field. + usagelog.DefaultCacheCreationCost = usagelogDescCacheCreationCost.Default.(float64) + // usagelogDescCacheReadCost is the schema descriptor for cache_read_cost field. + usagelogDescCacheReadCost := usagelogFields[16].Descriptor() + // usagelog.DefaultCacheReadCost holds the default value on creation for the cache_read_cost field. + usagelog.DefaultCacheReadCost = usagelogDescCacheReadCost.Default.(float64) + // usagelogDescTotalCost is the schema descriptor for total_cost field. + usagelogDescTotalCost := usagelogFields[17].Descriptor() + // usagelog.DefaultTotalCost holds the default value on creation for the total_cost field. + usagelog.DefaultTotalCost = usagelogDescTotalCost.Default.(float64) + // usagelogDescActualCost is the schema descriptor for actual_cost field. + usagelogDescActualCost := usagelogFields[18].Descriptor() + // usagelog.DefaultActualCost holds the default value on creation for the actual_cost field. + usagelog.DefaultActualCost = usagelogDescActualCost.Default.(float64) + // usagelogDescRateMultiplier is the schema descriptor for rate_multiplier field. + usagelogDescRateMultiplier := usagelogFields[19].Descriptor() + // usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. + usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64) + // usagelogDescBillingType is the schema descriptor for billing_type field. + usagelogDescBillingType := usagelogFields[21].Descriptor() + // usagelog.DefaultBillingType holds the default value on creation for the billing_type field. + usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8) + // usagelogDescStream is the schema descriptor for stream field. + usagelogDescStream := usagelogFields[22].Descriptor() + // usagelog.DefaultStream holds the default value on creation for the stream field. + usagelog.DefaultStream = usagelogDescStream.Default.(bool) + // usagelogDescUserAgent is the schema descriptor for user_agent field. + usagelogDescUserAgent := usagelogFields[25].Descriptor() + // usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save. + usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error) + // usagelogDescIPAddress is the schema descriptor for ip_address field. + usagelogDescIPAddress := usagelogFields[26].Descriptor() + // usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save. + usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error) + // usagelogDescImageCount is the schema descriptor for image_count field. + usagelogDescImageCount := usagelogFields[27].Descriptor() + // usagelog.DefaultImageCount holds the default value on creation for the image_count field. + usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int) + // usagelogDescImageSize is the schema descriptor for image_size field. + usagelogDescImageSize := usagelogFields[28].Descriptor() + // usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save. + usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error) + // usagelogDescCreatedAt is the schema descriptor for created_at field. + usagelogDescCreatedAt := usagelogFields[29].Descriptor() + // usagelog.DefaultCreatedAt holds the default value on creation for the created_at field. + usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time) + userMixin := schema.User{}.Mixin() + userMixinHooks1 := userMixin[1].Hooks() + user.Hooks[0] = userMixinHooks1[0] + userMixinInters1 := userMixin[1].Interceptors() + user.Interceptors[0] = userMixinInters1[0] + userMixinFields0 := userMixin[0].Fields() + _ = userMixinFields0 + userFields := schema.User{}.Fields() + _ = userFields + // userDescCreatedAt is the schema descriptor for created_at field. + userDescCreatedAt := userMixinFields0[0].Descriptor() + // user.DefaultCreatedAt holds the default value on creation for the created_at field. + user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time) + // userDescUpdatedAt is the schema descriptor for updated_at field. + userDescUpdatedAt := userMixinFields0[1].Descriptor() + // user.DefaultUpdatedAt holds the default value on creation for the updated_at field. + user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time) + // user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + user.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time) + // userDescEmail is the schema descriptor for email field. + userDescEmail := userFields[0].Descriptor() + // user.EmailValidator is a validator for the "email" field. It is called by the builders before save. + user.EmailValidator = func() func(string) error { + validators := userDescEmail.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(email string) error { + for _, fn := range fns { + if err := fn(email); err != nil { + return err + } + } + return nil + } + }() + // userDescPasswordHash is the schema descriptor for password_hash field. + userDescPasswordHash := userFields[1].Descriptor() + // user.PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + user.PasswordHashValidator = func() func(string) error { + validators := userDescPasswordHash.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(password_hash string) error { + for _, fn := range fns { + if err := fn(password_hash); err != nil { + return err + } + } + return nil + } + }() + // userDescRole is the schema descriptor for role field. + userDescRole := userFields[2].Descriptor() + // user.DefaultRole holds the default value on creation for the role field. + user.DefaultRole = userDescRole.Default.(string) + // user.RoleValidator is a validator for the "role" field. It is called by the builders before save. + user.RoleValidator = userDescRole.Validators[0].(func(string) error) + // userDescBalance is the schema descriptor for balance field. + userDescBalance := userFields[3].Descriptor() + // user.DefaultBalance holds the default value on creation for the balance field. + user.DefaultBalance = userDescBalance.Default.(float64) + // userDescConcurrency is the schema descriptor for concurrency field. + userDescConcurrency := userFields[4].Descriptor() + // user.DefaultConcurrency holds the default value on creation for the concurrency field. + user.DefaultConcurrency = userDescConcurrency.Default.(int) + // userDescStatus is the schema descriptor for status field. + userDescStatus := userFields[5].Descriptor() + // user.DefaultStatus holds the default value on creation for the status field. + user.DefaultStatus = userDescStatus.Default.(string) + // user.StatusValidator is a validator for the "status" field. It is called by the builders before save. + user.StatusValidator = userDescStatus.Validators[0].(func(string) error) + // userDescUsername is the schema descriptor for username field. + userDescUsername := userFields[6].Descriptor() + // user.DefaultUsername holds the default value on creation for the username field. + user.DefaultUsername = userDescUsername.Default.(string) + // user.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + user.UsernameValidator = userDescUsername.Validators[0].(func(string) error) + // userDescNotes is the schema descriptor for notes field. + userDescNotes := userFields[7].Descriptor() + // user.DefaultNotes holds the default value on creation for the notes field. + user.DefaultNotes = userDescNotes.Default.(string) + userallowedgroupFields := schema.UserAllowedGroup{}.Fields() + _ = userallowedgroupFields + // userallowedgroupDescCreatedAt is the schema descriptor for created_at field. + userallowedgroupDescCreatedAt := userallowedgroupFields[2].Descriptor() + // userallowedgroup.DefaultCreatedAt holds the default value on creation for the created_at field. + userallowedgroup.DefaultCreatedAt = userallowedgroupDescCreatedAt.Default.(func() time.Time) + userattributedefinitionMixin := schema.UserAttributeDefinition{}.Mixin() + userattributedefinitionMixinHooks1 := userattributedefinitionMixin[1].Hooks() + userattributedefinition.Hooks[0] = userattributedefinitionMixinHooks1[0] + userattributedefinitionMixinInters1 := userattributedefinitionMixin[1].Interceptors() + userattributedefinition.Interceptors[0] = userattributedefinitionMixinInters1[0] + userattributedefinitionMixinFields0 := userattributedefinitionMixin[0].Fields() + _ = userattributedefinitionMixinFields0 + userattributedefinitionFields := schema.UserAttributeDefinition{}.Fields() + _ = userattributedefinitionFields + // userattributedefinitionDescCreatedAt is the schema descriptor for created_at field. + userattributedefinitionDescCreatedAt := userattributedefinitionMixinFields0[0].Descriptor() + // userattributedefinition.DefaultCreatedAt holds the default value on creation for the created_at field. + userattributedefinition.DefaultCreatedAt = userattributedefinitionDescCreatedAt.Default.(func() time.Time) + // userattributedefinitionDescUpdatedAt is the schema descriptor for updated_at field. + userattributedefinitionDescUpdatedAt := userattributedefinitionMixinFields0[1].Descriptor() + // userattributedefinition.DefaultUpdatedAt holds the default value on creation for the updated_at field. + userattributedefinition.DefaultUpdatedAt = userattributedefinitionDescUpdatedAt.Default.(func() time.Time) + // userattributedefinition.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + userattributedefinition.UpdateDefaultUpdatedAt = userattributedefinitionDescUpdatedAt.UpdateDefault.(func() time.Time) + // userattributedefinitionDescKey is the schema descriptor for key field. + userattributedefinitionDescKey := userattributedefinitionFields[0].Descriptor() + // userattributedefinition.KeyValidator is a validator for the "key" field. It is called by the builders before save. + userattributedefinition.KeyValidator = func() func(string) error { + validators := userattributedefinitionDescKey.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(key string) error { + for _, fn := range fns { + if err := fn(key); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescName is the schema descriptor for name field. + userattributedefinitionDescName := userattributedefinitionFields[1].Descriptor() + // userattributedefinition.NameValidator is a validator for the "name" field. It is called by the builders before save. + userattributedefinition.NameValidator = func() func(string) error { + validators := userattributedefinitionDescName.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(name string) error { + for _, fn := range fns { + if err := fn(name); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescDescription is the schema descriptor for description field. + userattributedefinitionDescDescription := userattributedefinitionFields[2].Descriptor() + // userattributedefinition.DefaultDescription holds the default value on creation for the description field. + userattributedefinition.DefaultDescription = userattributedefinitionDescDescription.Default.(string) + // userattributedefinitionDescType is the schema descriptor for type field. + userattributedefinitionDescType := userattributedefinitionFields[3].Descriptor() + // userattributedefinition.TypeValidator is a validator for the "type" field. It is called by the builders before save. + userattributedefinition.TypeValidator = func() func(string) error { + validators := userattributedefinitionDescType.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(_type string) error { + for _, fn := range fns { + if err := fn(_type); err != nil { + return err + } + } + return nil + } + }() + // userattributedefinitionDescOptions is the schema descriptor for options field. + userattributedefinitionDescOptions := userattributedefinitionFields[4].Descriptor() + // userattributedefinition.DefaultOptions holds the default value on creation for the options field. + userattributedefinition.DefaultOptions = userattributedefinitionDescOptions.Default.([]map[string]interface{}) + // userattributedefinitionDescRequired is the schema descriptor for required field. + userattributedefinitionDescRequired := userattributedefinitionFields[5].Descriptor() + // userattributedefinition.DefaultRequired holds the default value on creation for the required field. + userattributedefinition.DefaultRequired = userattributedefinitionDescRequired.Default.(bool) + // userattributedefinitionDescValidation is the schema descriptor for validation field. + userattributedefinitionDescValidation := userattributedefinitionFields[6].Descriptor() + // userattributedefinition.DefaultValidation holds the default value on creation for the validation field. + userattributedefinition.DefaultValidation = userattributedefinitionDescValidation.Default.(map[string]interface{}) + // userattributedefinitionDescPlaceholder is the schema descriptor for placeholder field. + userattributedefinitionDescPlaceholder := userattributedefinitionFields[7].Descriptor() + // userattributedefinition.DefaultPlaceholder holds the default value on creation for the placeholder field. + userattributedefinition.DefaultPlaceholder = userattributedefinitionDescPlaceholder.Default.(string) + // userattributedefinition.PlaceholderValidator is a validator for the "placeholder" field. It is called by the builders before save. + userattributedefinition.PlaceholderValidator = userattributedefinitionDescPlaceholder.Validators[0].(func(string) error) + // userattributedefinitionDescDisplayOrder is the schema descriptor for display_order field. + userattributedefinitionDescDisplayOrder := userattributedefinitionFields[8].Descriptor() + // userattributedefinition.DefaultDisplayOrder holds the default value on creation for the display_order field. + userattributedefinition.DefaultDisplayOrder = userattributedefinitionDescDisplayOrder.Default.(int) + // userattributedefinitionDescEnabled is the schema descriptor for enabled field. + userattributedefinitionDescEnabled := userattributedefinitionFields[9].Descriptor() + // userattributedefinition.DefaultEnabled holds the default value on creation for the enabled field. + userattributedefinition.DefaultEnabled = userattributedefinitionDescEnabled.Default.(bool) + userattributevalueMixin := schema.UserAttributeValue{}.Mixin() + userattributevalueMixinFields0 := userattributevalueMixin[0].Fields() + _ = userattributevalueMixinFields0 + userattributevalueFields := schema.UserAttributeValue{}.Fields() + _ = userattributevalueFields + // userattributevalueDescCreatedAt is the schema descriptor for created_at field. + userattributevalueDescCreatedAt := userattributevalueMixinFields0[0].Descriptor() + // userattributevalue.DefaultCreatedAt holds the default value on creation for the created_at field. + userattributevalue.DefaultCreatedAt = userattributevalueDescCreatedAt.Default.(func() time.Time) + // userattributevalueDescUpdatedAt is the schema descriptor for updated_at field. + userattributevalueDescUpdatedAt := userattributevalueMixinFields0[1].Descriptor() + // userattributevalue.DefaultUpdatedAt holds the default value on creation for the updated_at field. + userattributevalue.DefaultUpdatedAt = userattributevalueDescUpdatedAt.Default.(func() time.Time) + // userattributevalue.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + userattributevalue.UpdateDefaultUpdatedAt = userattributevalueDescUpdatedAt.UpdateDefault.(func() time.Time) + // userattributevalueDescValue is the schema descriptor for value field. + userattributevalueDescValue := userattributevalueFields[2].Descriptor() + // userattributevalue.DefaultValue holds the default value on creation for the value field. + userattributevalue.DefaultValue = userattributevalueDescValue.Default.(string) + usersubscriptionMixin := schema.UserSubscription{}.Mixin() + usersubscriptionMixinHooks1 := usersubscriptionMixin[1].Hooks() + usersubscription.Hooks[0] = usersubscriptionMixinHooks1[0] + usersubscriptionMixinInters1 := usersubscriptionMixin[1].Interceptors() + usersubscription.Interceptors[0] = usersubscriptionMixinInters1[0] + usersubscriptionMixinFields0 := usersubscriptionMixin[0].Fields() + _ = usersubscriptionMixinFields0 + usersubscriptionFields := schema.UserSubscription{}.Fields() + _ = usersubscriptionFields + // usersubscriptionDescCreatedAt is the schema descriptor for created_at field. + usersubscriptionDescCreatedAt := usersubscriptionMixinFields0[0].Descriptor() + // usersubscription.DefaultCreatedAt holds the default value on creation for the created_at field. + usersubscription.DefaultCreatedAt = usersubscriptionDescCreatedAt.Default.(func() time.Time) + // usersubscriptionDescUpdatedAt is the schema descriptor for updated_at field. + usersubscriptionDescUpdatedAt := usersubscriptionMixinFields0[1].Descriptor() + // usersubscription.DefaultUpdatedAt holds the default value on creation for the updated_at field. + usersubscription.DefaultUpdatedAt = usersubscriptionDescUpdatedAt.Default.(func() time.Time) + // usersubscription.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + usersubscription.UpdateDefaultUpdatedAt = usersubscriptionDescUpdatedAt.UpdateDefault.(func() time.Time) + // usersubscriptionDescStatus is the schema descriptor for status field. + usersubscriptionDescStatus := usersubscriptionFields[4].Descriptor() + // usersubscription.DefaultStatus holds the default value on creation for the status field. + usersubscription.DefaultStatus = usersubscriptionDescStatus.Default.(string) + // usersubscription.StatusValidator is a validator for the "status" field. It is called by the builders before save. + usersubscription.StatusValidator = usersubscriptionDescStatus.Validators[0].(func(string) error) + // usersubscriptionDescDailyUsageUsd is the schema descriptor for daily_usage_usd field. + usersubscriptionDescDailyUsageUsd := usersubscriptionFields[8].Descriptor() + // usersubscription.DefaultDailyUsageUsd holds the default value on creation for the daily_usage_usd field. + usersubscription.DefaultDailyUsageUsd = usersubscriptionDescDailyUsageUsd.Default.(float64) + // usersubscriptionDescWeeklyUsageUsd is the schema descriptor for weekly_usage_usd field. + usersubscriptionDescWeeklyUsageUsd := usersubscriptionFields[9].Descriptor() + // usersubscription.DefaultWeeklyUsageUsd holds the default value on creation for the weekly_usage_usd field. + usersubscription.DefaultWeeklyUsageUsd = usersubscriptionDescWeeklyUsageUsd.Default.(float64) + // usersubscriptionDescMonthlyUsageUsd is the schema descriptor for monthly_usage_usd field. + usersubscriptionDescMonthlyUsageUsd := usersubscriptionFields[10].Descriptor() + // usersubscription.DefaultMonthlyUsageUsd holds the default value on creation for the monthly_usage_usd field. + usersubscription.DefaultMonthlyUsageUsd = usersubscriptionDescMonthlyUsageUsd.Default.(float64) + // usersubscriptionDescAssignedAt is the schema descriptor for assigned_at field. + usersubscriptionDescAssignedAt := usersubscriptionFields[12].Descriptor() + // usersubscription.DefaultAssignedAt holds the default value on creation for the assigned_at field. + usersubscription.DefaultAssignedAt = usersubscriptionDescAssignedAt.Default.(func() time.Time) +} + +const ( + Version = "v0.14.5" // Version of ent codegen. + Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen. +) diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go new file mode 100644 index 00000000..dd79ba96 --- /dev/null +++ b/backend/ent/schema/account.go @@ -0,0 +1,218 @@ +// Package schema 定义 Ent ORM 的数据库 schema。 +// 每个文件对应一个数据库实体(表),定义其字段、边(关联)和索引。 +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Account 定义 AI API 账户实体的 schema。 +// +// 账户是系统的核心资源,代表一个可用于调用 AI API 的凭证。 +// 例如:一个 Claude API 账户、一个 Gemini OAuth 账户等。 +// +// 主要功能: +// - 存储不同平台(Claude、Gemini、OpenAI 等)的 API 凭证 +// - 支持多种认证类型(api_key、oauth、cookie 等) +// - 管理账户的调度状态(可调度、速率限制、过载等) +// - 通过分组机制实现账户的灵活分配 +type Account struct { + ent.Schema +} + +// Annotations 返回 schema 的注解配置。 +// 这里指定数据库表名为 "accounts"。 +func (Account) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "accounts"}, + } +} + +// Mixin 返回该 schema 使用的混入组件。 +// - TimeMixin: 自动管理 created_at 和 updated_at 时间戳 +// - SoftDeleteMixin: 提供软删除功能(deleted_at) +func (Account) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +// Fields 定义账户实体的所有字段。 +func (Account) Fields() []ent.Field { + return []ent.Field{ + // name: 账户显示名称,用于在界面中标识账户 + field.String("name"). + MaxLen(100). + NotEmpty(), + // notes: 管理员备注(可为空) + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + + // platform: 所属平台,如 "claude", "gemini", "openai" 等 + field.String("platform"). + MaxLen(50). + NotEmpty(), + + // type: 认证类型,如 "api_key", "oauth", "cookie" 等 + // 不同类型决定了 credentials 中存储的数据结构 + field.String("type"). + MaxLen(20). + NotEmpty(), + + // credentials: 认证凭证,以 JSONB 格式存储 + // 结构取决于 type 字段: + // - api_key: {"api_key": "sk-xxx"} + // - oauth: {"access_token": "...", "refresh_token": "...", "expires_at": "..."} + // - cookie: {"session_key": "..."} + field.JSON("credentials", map[string]any{}). + Default(func() map[string]any { return map[string]any{} }). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // extra: 扩展数据,存储平台特定的额外信息 + // 如 CRS 账户的 crs_account_id、组织信息等 + field.JSON("extra", map[string]any{}). + Default(func() map[string]any { return map[string]any{} }). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // proxy_id: 关联的代理配置 ID(可选) + // 用于需要通过特定代理访问 API 的场景 + field.Int64("proxy_id"). + Optional(). + Nillable(), + + // concurrency: 账户最大并发请求数 + // 用于限制同一时间对该账户发起的请求数量 + field.Int("concurrency"). + Default(3), + + // priority: 账户优先级,数值越小优先级越高 + // 调度器会优先使用高优先级的账户 + field.Int("priority"). + Default(50), + + // rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0) + // 仅影响账号维度计费口径,不影响用户/API Key 扣费(分组倍率) + field.Float("rate_multiplier"). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}). + Default(1.0), + + // status: 账户状态,如 "active", "error", "disabled" + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // error_message: 错误信息,记录账户异常时的详细信息 + field.String("error_message"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + + // last_used_at: 最后使用时间,用于统计和调度 + field.Time("last_used_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + // expires_at: 账户过期时间(可为空) + field.Time("expires_at"). + Optional(). + Nillable(). + Comment("Account expiration time (NULL means no expiration)."). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + // auto_pause_on_expired: 过期后自动暂停调度 + field.Bool("auto_pause_on_expired"). + Default(true). + Comment("Auto pause scheduling when account expires."), + + // ========== 调度和速率限制相关字段 ========== + // 这些字段在 migrations/005_schema_parity.sql 中添加 + + // schedulable: 是否可被调度器选中 + // false 表示账户暂时不参与请求分配(如正在刷新 token) + field.Bool("schedulable"). + Default(true), + + // rate_limited_at: 触发速率限制的时间 + // 当收到 429 错误时记录 + field.Time("rate_limited_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // rate_limit_reset_at: 速率限制预计解除的时间 + // 调度器会在此时间之前避免使用该账户 + field.Time("rate_limit_reset_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // overload_until: 过载状态解除时间 + // 当收到 529 错误(API 过载)时设置 + field.Time("overload_until"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // session_window_*: 会话窗口相关字段 + // 用于管理某些需要会话时间窗口的 API(如 Claude Pro) + field.Time("session_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("session_window_end"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("session_window_status"). + Optional(). + Nillable(). + MaxLen(20), + } +} + +// Edges 定义账户实体的关联关系。 +func (Account) Edges() []ent.Edge { + return []ent.Edge{ + // groups: 账户所属的分组(多对多关系) + // 通过 account_groups 中间表实现 + // 一个账户可以属于多个分组,一个分组可以包含多个账户 + edge.To("groups", Group.Type). + Through("account_groups", AccountGroup.Type), + // proxy: 账户使用的代理配置(可选的一对一关系) + // 使用已有的 proxy_id 外键字段 + edge.To("proxy", Proxy.Type). + Field("proxy_id"). + Unique(), + // usage_logs: 该账户的使用日志 + edge.To("usage_logs", UsageLog.Type), + } +} + +// Indexes 定义数据库索引,优化查询性能。 +// 每个索引对应一个常用的查询条件。 +func (Account) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("platform"), // 按平台筛选 + index.Fields("type"), // 按认证类型筛选 + index.Fields("status"), // 按状态筛选 + index.Fields("proxy_id"), // 按代理筛选 + index.Fields("priority"), // 按优先级排序 + index.Fields("last_used_at"), // 按最后使用时间排序 + index.Fields("schedulable"), // 筛选可调度账户 + index.Fields("rate_limited_at"), // 筛选速率限制账户 + index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间 + index.Fields("overload_until"), // 筛选过载账户 + index.Fields("deleted_at"), // 软删除查询优化 + } +} diff --git a/backend/ent/schema/account_group.go b/backend/ent/schema/account_group.go new file mode 100644 index 00000000..aa270f08 --- /dev/null +++ b/backend/ent/schema/account_group.go @@ -0,0 +1,60 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// AccountGroup holds the edge schema definition for the account_groups relationship. +// It stores extra fields (priority, created_at) and uses a composite primary key. +type AccountGroup struct { + ent.Schema +} + +func (AccountGroup) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "account_groups"}, + // Composite primary key: (account_id, group_id). + field.ID("account_id", "group_id"), + } +} + +func (AccountGroup) Fields() []ent.Field { + return []ent.Field{ + field.Int64("account_id"), + field.Int64("group_id"), + field.Int("priority"). + Default(50), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (AccountGroup) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("account", Account.Type). + Unique(). + Required(). + Field("account_id"), + edge.To("group", Group.Type). + Unique(). + Required(). + Field("group_id"), + } +} + +func (AccountGroup) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("group_id"), + index.Fields("priority"), + } +} diff --git a/backend/ent/schema/api_key.go b/backend/ent/schema/api_key.go new file mode 100644 index 00000000..1b206089 --- /dev/null +++ b/backend/ent/schema/api_key.go @@ -0,0 +1,81 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// APIKey holds the schema definition for the APIKey entity. +type APIKey struct { + ent.Schema +} + +func (APIKey) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "api_keys"}, + } +} + +func (APIKey) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (APIKey) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.String("key"). + MaxLen(128). + NotEmpty(). + Unique(), + field.String("name"). + MaxLen(100). + NotEmpty(), + field.Int64("group_id"). + Optional(). + Nillable(), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + field.JSON("ip_whitelist", []string{}). + Optional(). + Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"), + field.JSON("ip_blacklist", []string{}). + Optional(). + Comment("Blocked IPs/CIDRs"), + } +} + +func (APIKey) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("api_keys"). + Field("user_id"). + Unique(). + Required(), + edge.From("group", Group.Type). + Ref("api_keys"). + Field("group_id"). + Unique(), + edge.To("usage_logs", UsageLog.Type), + } +} + +func (APIKey) Indexes() []ent.Index { + return []ent.Index{ + // key 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("user_id"), + index.Fields("group_id"), + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go new file mode 100644 index 00000000..d38925b1 --- /dev/null +++ b/backend/ent/schema/group.go @@ -0,0 +1,127 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Group holds the schema definition for the Group entity. +type Group struct { + ent.Schema +} + +func (Group) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "groups"}, + } +} + +func (Group) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (Group) Fields() []ent.Field { + return []ent.Field{ + // 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重用 + // 见迁移文件 016_soft_delete_partial_unique_indexes.sql + field.String("name"). + MaxLen(100). + NotEmpty(), + field.String("description"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Float("rate_multiplier"). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}). + Default(1.0), + field.Bool("is_exclusive"). + Default(false), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // Subscription-related fields (added by migration 003) + field.String("platform"). + MaxLen(50). + Default(service.PlatformAnthropic), + field.String("subscription_type"). + MaxLen(20). + Default(service.SubscriptionTypeStandard), + field.Float("daily_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("weekly_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("monthly_limit_usd"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Int("default_validity_days"). + Default(30), + + // 图片生成计费配置(antigravity 和 gemini 平台使用) + field.Float("image_price_1k"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("image_price_2k"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + field.Float("image_price_4k"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}), + + // Claude Code 客户端限制 (added by migration 029) + field.Bool("claude_code_only"). + Default(false). + Comment("是否仅允许 Claude Code 客户端"), + field.Int64("fallback_group_id"). + Optional(). + Nillable(). + Comment("非 Claude Code 请求降级使用的分组 ID"), + } +} + +func (Group) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("api_keys", APIKey.Type), + edge.To("redeem_codes", RedeemCode.Type), + edge.To("subscriptions", UserSubscription.Type), + edge.To("usage_logs", UsageLog.Type), + edge.From("accounts", Account.Type). + Ref("groups"). + Through("account_groups", AccountGroup.Type), + edge.From("allowed_users", User.Type). + Ref("allowed_groups"). + Through("user_allowed_groups", UserAllowedGroup.Type), + // 注意:fallback_group_id 直接作为字段使用,不定义 edge + // 这样允许多个分组指向同一个降级分组(M2O 关系) + } +} + +func (Group) Indexes() []ent.Index { + return []ent.Index{ + // name 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("status"), + index.Fields("platform"), + index.Fields("subscription_type"), + index.Fields("is_exclusive"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/mixins/soft_delete.go b/backend/ent/schema/mixins/soft_delete.go new file mode 100644 index 00000000..9571bc9c --- /dev/null +++ b/backend/ent/schema/mixins/soft_delete.go @@ -0,0 +1,139 @@ +// Package mixins 提供 Ent schema 的可复用混入组件。 +// 包括时间戳混入、软删除混入等通用功能。 +package mixins + +import ( + "context" + "fmt" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/intercept" +) + +// SoftDeleteMixin 实现基于 deleted_at 时间戳的软删除功能。 +// +// 软删除特性: +// - 删除操作不会真正删除数据库记录,而是设置 deleted_at 时间戳 +// - 所有查询默认自动过滤 deleted_at IS NULL,只返回"未删除"的记录 +// - 通过 SkipSoftDelete(ctx) 可以绕过软删除过滤器,查询或真正删除记录 +// +// 实现原理: +// - 使用 Ent 的 Interceptor 拦截所有查询,自动添加 deleted_at IS NULL 条件 +// - 使用 Ent 的 Hook 拦截删除操作,将 DELETE 转换为 UPDATE SET deleted_at = NOW() +// +// 使用示例: +// +// func (User) Mixin() []ent.Mixin { +// return []ent.Mixin{ +// mixins.SoftDeleteMixin{}, +// } +// } +type SoftDeleteMixin struct { + mixin.Schema +} + +// Fields 定义软删除所需的字段。 +// deleted_at 字段: +// - 类型为 TIMESTAMPTZ,精确记录删除时间 +// - Optional 和 Nillable 确保新记录时该字段为 NULL +// - NULL 表示记录未被删除,非 NULL 表示已软删除 +func (SoftDeleteMixin) Fields() []ent.Field { + return []ent.Field{ + field.Time("deleted_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} + +// softDeleteKey 是用于在 context 中标记跳过软删除的键类型。 +// 使用空结构体作为键可以避免与其他包的键冲突。 +type softDeleteKey struct{} + +// SkipSoftDelete 返回一个新的 context,用于跳过软删除的拦截器和变更器。 +// +// 使用场景: +// - 查询已软删除的记录(如管理员查看回收站) +// - 执行真正的物理删除(如彻底清理数据) +// - 恢复软删除的记录 +// +// 示例: +// +// // 查询包含已删除记录的所有用户 +// users, err := client.User.Query().All(mixins.SkipSoftDelete(ctx)) +// +// // 真正删除记录 +// client.User.DeleteOneID(id).Exec(mixins.SkipSoftDelete(ctx)) +func SkipSoftDelete(parent context.Context) context.Context { + return context.WithValue(parent, softDeleteKey{}, true) +} + +// Interceptors 返回查询拦截器列表。 +// 拦截器会自动为所有查询添加 deleted_at IS NULL 条件, +// 确保软删除的记录不会出现在普通查询结果中。 +func (d SoftDeleteMixin) Interceptors() []ent.Interceptor { + return []ent.Interceptor{ + intercept.TraverseFunc(func(ctx context.Context, q intercept.Query) error { + // 检查是否需要跳过软删除过滤 + if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip { + return nil + } + // 为查询添加 deleted_at IS NULL 条件 + d.applyPredicate(q) + return nil + }), + } +} + +// Hooks 返回变更钩子列表。 +// 钩子会拦截 DELETE 操作,将其转换为 UPDATE SET deleted_at = NOW()。 +// 这样删除操作实际上只是标记记录为已删除,而不是真正删除。 +func (d SoftDeleteMixin) Hooks() []ent.Hook { + return []ent.Hook{ + func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + // 只处理删除操作 + if m.Op() != ent.OpDelete && m.Op() != ent.OpDeleteOne { + return next.Mutate(ctx, m) + } + // 检查是否需要执行真正的删除 + if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip { + return next.Mutate(ctx, m) + } + // 类型断言,获取 mutation 的扩展接口 + mx, ok := m.(interface { + SetOp(ent.Op) + SetDeletedAt(time.Time) + WhereP(...func(*sql.Selector)) + Client() *dbent.Client + }) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // 添加软删除过滤条件,确保不会影响已删除的记录 + d.applyPredicate(mx) + // 将 DELETE 操作转换为 UPDATE 操作 + mx.SetOp(ent.OpUpdate) + // 设置删除时间为当前时间 + mx.SetDeletedAt(time.Now()) + return mx.Client().Mutate(ctx, m) + }) + }, + } +} + +// applyPredicate 为查询添加 deleted_at IS NULL 条件。 +// 这是软删除过滤的核心实现。 +func (d SoftDeleteMixin) applyPredicate(w interface{ WhereP(...func(*sql.Selector)) }) { + w.WhereP( + sql.FieldIsNull(d.Fields()[0].Descriptor().Name), + ) +} diff --git a/backend/ent/schema/mixins/time.go b/backend/ent/schema/mixins/time.go new file mode 100644 index 00000000..30ecf273 --- /dev/null +++ b/backend/ent/schema/mixins/time.go @@ -0,0 +1,32 @@ +package mixins + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" +) + +// TimeMixin provides created_at and updated_at fields compatible with the existing schema. +type TimeMixin struct { + mixin.Schema +} + +func (TimeMixin) Fields() []ent.Field { + return []ent.Field{ + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} diff --git a/backend/ent/schema/promo_code.go b/backend/ent/schema/promo_code.go new file mode 100644 index 00000000..c3bb824b --- /dev/null +++ b/backend/ent/schema/promo_code.go @@ -0,0 +1,87 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// PromoCode holds the schema definition for the PromoCode entity. +// +// 注册优惠码:用户注册时使用,可获得赠送余额 +// 与 RedeemCode 不同,PromoCode 支持多次使用(有使用次数限制) +// +// 删除策略:硬删除 +type PromoCode struct { + ent.Schema +} + +func (PromoCode) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "promo_codes"}, + } +} + +func (PromoCode) Fields() []ent.Field { + return []ent.Field{ + field.String("code"). + MaxLen(32). + NotEmpty(). + Unique(). + Comment("优惠码"), + field.Float("bonus_amount"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0). + Comment("赠送余额金额"), + field.Int("max_uses"). + Default(0). + Comment("最大使用次数,0表示无限制"), + field.Int("used_count"). + Default(0). + Comment("已使用次数"), + field.String("status"). + MaxLen(20). + Default(service.PromoCodeStatusActive). + Comment("状态: active, disabled"), + field.Time("expires_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("过期时间,null表示永不过期"), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Comment("备注"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (PromoCode) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("usage_records", PromoCodeUsage.Type), + } +} + +func (PromoCode) Indexes() []ent.Index { + return []ent.Index{ + // code 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("status"), + index.Fields("expires_at"), + } +} diff --git a/backend/ent/schema/promo_code_usage.go b/backend/ent/schema/promo_code_usage.go new file mode 100644 index 00000000..28fbabea --- /dev/null +++ b/backend/ent/schema/promo_code_usage.go @@ -0,0 +1,66 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// PromoCodeUsage holds the schema definition for the PromoCodeUsage entity. +// +// 优惠码使用记录:记录每个用户使用优惠码的情况 +type PromoCodeUsage struct { + ent.Schema +} + +func (PromoCodeUsage) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "promo_code_usages"}, + } +} + +func (PromoCodeUsage) Fields() []ent.Field { + return []ent.Field{ + field.Int64("promo_code_id"). + Comment("优惠码ID"), + field.Int64("user_id"). + Comment("使用用户ID"), + field.Float("bonus_amount"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Comment("实际赠送金额"), + field.Time("used_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("使用时间"), + } +} + +func (PromoCodeUsage) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("promo_code", PromoCode.Type). + Ref("usage_records"). + Field("promo_code_id"). + Required(). + Unique(), + edge.From("user", User.Type). + Ref("promo_code_usages"). + Field("user_id"). + Required(). + Unique(), + } +} + +func (PromoCodeUsage) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("promo_code_id"), + index.Fields("user_id"), + // 每个用户每个优惠码只能使用一次 + index.Fields("promo_code_id", "user_id").Unique(), + } +} diff --git a/backend/ent/schema/proxy.go b/backend/ent/schema/proxy.go new file mode 100644 index 00000000..46d657d3 --- /dev/null +++ b/backend/ent/schema/proxy.go @@ -0,0 +1,72 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// Proxy holds the schema definition for the Proxy entity. +type Proxy struct { + ent.Schema +} + +func (Proxy) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "proxies"}, + } +} + +func (Proxy) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (Proxy) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + MaxLen(100). + NotEmpty(), + field.String("protocol"). + MaxLen(20). + NotEmpty(), + field.String("host"). + MaxLen(255). + NotEmpty(), + field.Int("port"), + field.String("username"). + MaxLen(100). + Optional(). + Nillable(), + field.String("password"). + MaxLen(100). + Optional(). + Nillable(), + field.String("status"). + MaxLen(20). + Default("active"), + } +} + +// Edges 定义代理实体的关联关系。 +func (Proxy) Edges() []ent.Edge { + return []ent.Edge{ + // accounts: 使用此代理的账户(反向边) + edge.From("accounts", Account.Type). + Ref("proxy"), + } +} + +func (Proxy) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/redeem_code.go b/backend/ent/schema/redeem_code.go new file mode 100644 index 00000000..b4664e06 --- /dev/null +++ b/backend/ent/schema/redeem_code.go @@ -0,0 +1,94 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// RedeemCode holds the schema definition for the RedeemCode entity. +// +// 删除策略:硬删除 +// RedeemCode 使用硬删除而非软删除,原因如下: +// - 兑换码具有一次性使用特性,删除后无需保留历史记录 +// - 已使用的兑换码通过 status 和 used_at 字段追踪,无需依赖软删除 +// - 减少数据库存储压力和查询复杂度 +// +// 如需审计已删除的兑换码,建议在删除前将关键信息写入审计日志表。 +type RedeemCode struct { + ent.Schema +} + +func (RedeemCode) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "redeem_codes"}, + } +} + +func (RedeemCode) Fields() []ent.Field { + return []ent.Field{ + field.String("code"). + MaxLen(32). + NotEmpty(). + Unique(), + field.String("type"). + MaxLen(20). + Default(service.RedeemTypeBalance), + field.Float("value"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0), + field.String("status"). + MaxLen(20). + Default(service.StatusUnused), + field.Int64("used_by"). + Optional(). + Nillable(), + field.Time("used_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Int64("group_id"). + Optional(). + Nillable(), + field.Int("validity_days"). + Default(30), + } +} + +func (RedeemCode) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("redeem_codes"). + Field("used_by"). + Unique(), + edge.From("group", Group.Type). + Ref("redeem_codes"). + Field("group_id"). + Unique(), + } +} + +func (RedeemCode) Indexes() []ent.Index { + return []ent.Index{ + // code 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("status"), + index.Fields("used_by"), + index.Fields("group_id"), + } +} diff --git a/backend/ent/schema/setting.go b/backend/ent/schema/setting.go new file mode 100644 index 00000000..0acfde59 --- /dev/null +++ b/backend/ent/schema/setting.go @@ -0,0 +1,54 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/field" +) + +// Setting holds the schema definition for the Setting entity. +// +// 删除策略:硬删除 +// Setting 使用硬删除而非软删除,原因如下: +// - 系统设置是简单的键值对,删除即意味着恢复默认值 +// - 设置变更通常通过应用日志追踪,无需在数据库层面保留历史 +// - 保持表结构简洁,避免无效数据积累 +// +// 如需设置变更审计,建议在更新/删除前将变更记录写入审计日志表。 +type Setting struct { + ent.Schema +} + +func (Setting) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "settings"}, + } +} + +func (Setting) Fields() []ent.Field { + return []ent.Field{ + field.String("key"). + MaxLen(100). + NotEmpty(). + Unique(), + field.String("value"). + SchemaType(map[string]string{ + dialect.Postgres: "text", + }), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{ + dialect.Postgres: "timestamptz", + }), + } +} + +func (Setting) Indexes() []ent.Index { + // key 字段已在 Fields() 中声明 Unique(),无需额外索引 + return nil +} diff --git a/backend/ent/schema/usage_log.go b/backend/ent/schema/usage_log.go new file mode 100644 index 00000000..fc7c7165 --- /dev/null +++ b/backend/ent/schema/usage_log.go @@ -0,0 +1,174 @@ +// Package schema 定义 Ent ORM 的数据库 schema。 +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UsageLog 定义使用日志实体的 schema。 +// +// 使用日志记录每次 API 调用的详细信息,包括 token 使用量、成本计算等。 +// 这是一个只追加的表,不支持更新和删除。 +type UsageLog struct { + ent.Schema +} + +// Annotations 返回 schema 的注解配置。 +func (UsageLog) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "usage_logs"}, + } +} + +// Fields 定义使用日志实体的所有字段。 +func (UsageLog) Fields() []ent.Field { + return []ent.Field{ + // 关联字段 + field.Int64("user_id"), + field.Int64("api_key_id"), + field.Int64("account_id"), + field.String("request_id"). + MaxLen(64). + NotEmpty(), + field.String("model"). + MaxLen(100). + NotEmpty(), + field.Int64("group_id"). + Optional(). + Nillable(), + field.Int64("subscription_id"). + Optional(). + Nillable(), + + // Token 计数字段 + field.Int("input_tokens"). + Default(0), + field.Int("output_tokens"). + Default(0), + field.Int("cache_creation_tokens"). + Default(0), + field.Int("cache_read_tokens"). + Default(0), + field.Int("cache_creation_5m_tokens"). + Default(0), + field.Int("cache_creation_1h_tokens"). + Default(0), + + // 成本字段 + field.Float("input_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("output_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("cache_creation_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("cache_read_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("total_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("actual_cost"). + Default(0). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}), + field.Float("rate_multiplier"). + Default(1). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}), + + // account_rate_multiplier: 账号计费倍率快照(NULL 表示按 1.0 处理) + field.Float("account_rate_multiplier"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}), + + // 其他字段 + field.Int8("billing_type"). + Default(0), + field.Bool("stream"). + Default(false), + field.Int("duration_ms"). + Optional(). + Nillable(), + field.Int("first_token_ms"). + Optional(). + Nillable(), + field.String("user_agent"). + MaxLen(512). + Optional(). + Nillable(), + field.String("ip_address"). + MaxLen(45). // 支持 IPv6 + Optional(). + Nillable(), + + // 图片生成字段(仅 gemini-3-pro-image 等图片模型使用) + field.Int("image_count"). + Default(0), + field.String("image_size"). + MaxLen(10). + Optional(). + Nillable(), + + // 时间戳(只有 created_at,日志不可修改) + field.Time("created_at"). + Default(time.Now). + Immutable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +// Edges 定义使用日志实体的关联关系。 +func (UsageLog) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("usage_logs"). + Field("user_id"). + Required(). + Unique(), + edge.From("api_key", APIKey.Type). + Ref("usage_logs"). + Field("api_key_id"). + Required(). + Unique(), + edge.From("account", Account.Type). + Ref("usage_logs"). + Field("account_id"). + Required(). + Unique(), + edge.From("group", Group.Type). + Ref("usage_logs"). + Field("group_id"). + Unique(), + edge.From("subscription", UserSubscription.Type). + Ref("usage_logs"). + Field("subscription_id"). + Unique(), + } +} + +// Indexes 定义数据库索引,优化查询性能。 +func (UsageLog) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("user_id"), + index.Fields("api_key_id"), + index.Fields("account_id"), + index.Fields("group_id"), + index.Fields("subscription_id"), + index.Fields("created_at"), + index.Fields("model"), + index.Fields("request_id"), + // 复合索引用于时间范围查询 + index.Fields("user_id", "created_at"), + index.Fields("api_key_id", "created_at"), + } +} diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go new file mode 100644 index 00000000..79dc2286 --- /dev/null +++ b/backend/ent/schema/user.go @@ -0,0 +1,87 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// User holds the schema definition for the User entity. +type User struct { + ent.Schema +} + +func (User) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "users"}, + } +} + +func (User) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (User) Fields() []ent.Field { + return []ent.Field{ + // 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重用 + // 见迁移文件 016_soft_delete_partial_unique_indexes.sql + field.String("email"). + MaxLen(255). + NotEmpty(), + field.String("password_hash"). + MaxLen(255). + NotEmpty(), + field.String("role"). + MaxLen(20). + Default(service.RoleUser), + field.Float("balance"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0), + field.Int("concurrency"). + Default(5), + field.String("status"). + MaxLen(20). + Default(service.StatusActive), + + // Optional profile fields (added later; default '' in DB migration) + field.String("username"). + MaxLen(100). + Default(""), + // wechat field migrated to user_attribute_values (see migration 019) + field.String("notes"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Default(""), + } +} + +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("api_keys", APIKey.Type), + edge.To("redeem_codes", RedeemCode.Type), + edge.To("subscriptions", UserSubscription.Type), + edge.To("assigned_subscriptions", UserSubscription.Type), + edge.To("allowed_groups", Group.Type). + Through("user_allowed_groups", UserAllowedGroup.Type), + edge.To("usage_logs", UsageLog.Type), + edge.To("attribute_values", UserAttributeValue.Type), + edge.To("promo_code_usages", PromoCodeUsage.Type), + } +} + +func (User) Indexes() []ent.Index { + return []ent.Index{ + // email 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("status"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/user_allowed_group.go b/backend/ent/schema/user_allowed_group.go new file mode 100644 index 00000000..94156219 --- /dev/null +++ b/backend/ent/schema/user_allowed_group.go @@ -0,0 +1,57 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAllowedGroup holds the edge schema definition for the user_allowed_groups relationship. +// It replaces the legacy users.allowed_groups BIGINT[] column. +type UserAllowedGroup struct { + ent.Schema +} + +func (UserAllowedGroup) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_allowed_groups"}, + // Composite primary key: (user_id, group_id). + field.ID("user_id", "group_id"), + } +} + +func (UserAllowedGroup) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.Int64("group_id"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (UserAllowedGroup) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("user", User.Type). + Unique(). + Required(). + Field("user_id"), + edge.To("group", Group.Type). + Unique(). + Required(). + Field("group_id"), + } +} + +func (UserAllowedGroup) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("group_id"), + } +} diff --git a/backend/ent/schema/user_attribute_definition.go b/backend/ent/schema/user_attribute_definition.go new file mode 100644 index 00000000..eb54171a --- /dev/null +++ b/backend/ent/schema/user_attribute_definition.go @@ -0,0 +1,109 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAttributeDefinition holds the schema definition for custom user attributes. +// +// This entity defines the metadata for user attributes, such as: +// - Attribute key (unique identifier like "company_name") +// - Display name shown in forms +// - Field type (text, number, select, etc.) +// - Validation rules +// - Whether the field is required or enabled +type UserAttributeDefinition struct { + ent.Schema +} + +func (UserAttributeDefinition) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_attribute_definitions"}, + } +} + +func (UserAttributeDefinition) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (UserAttributeDefinition) Fields() []ent.Field { + return []ent.Field{ + // key: Unique identifier for the attribute (e.g., "company_name") + // Used for programmatic reference + field.String("key"). + MaxLen(100). + NotEmpty(), + + // name: Display name shown in forms (e.g., "Company Name") + field.String("name"). + MaxLen(255). + NotEmpty(), + + // description: Optional description/help text for the attribute + field.String("description"). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Default(""), + + // type: Attribute type - text, textarea, number, email, url, date, select, multi_select + field.String("type"). + MaxLen(20). + NotEmpty(), + + // options: Select options for select/multi_select types (stored as JSONB) + // Format: [{"value": "xxx", "label": "XXX"}, ...] + field.JSON("options", []map[string]any{}). + Default([]map[string]any{}). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // required: Whether this attribute is required when editing a user + field.Bool("required"). + Default(false), + + // validation: Validation rules for the attribute value (stored as JSONB) + // Format: {"min_length": 1, "max_length": 100, "min": 0, "max": 100, "pattern": "^[a-z]+$", "message": "..."} + field.JSON("validation", map[string]any{}). + Default(map[string]any{}). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}), + + // placeholder: Placeholder text shown in input fields + field.String("placeholder"). + MaxLen(255). + Default(""), + + // display_order: Order in which attributes are displayed (lower = first) + field.Int("display_order"). + Default(0), + + // enabled: Whether this attribute is active and shown in forms + field.Bool("enabled"). + Default(true), + } +} + +func (UserAttributeDefinition) Edges() []ent.Edge { + return []ent.Edge{ + // values: All user values for this attribute definition + edge.To("values", UserAttributeValue.Type), + } +} + +func (UserAttributeDefinition) Indexes() []ent.Index { + return []ent.Index{ + // Partial unique index on key (WHERE deleted_at IS NULL) via migration + index.Fields("key"), + index.Fields("enabled"), + index.Fields("display_order"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/schema/user_attribute_value.go b/backend/ent/schema/user_attribute_value.go new file mode 100644 index 00000000..fb9a9727 --- /dev/null +++ b/backend/ent/schema/user_attribute_value.go @@ -0,0 +1,74 @@ +package schema + +import ( + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + + "entgo.io/ent" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserAttributeValue holds a user's value for a specific attribute. +// +// This entity stores the actual values that users have for each attribute definition. +// Values are stored as strings and converted to the appropriate type by the application. +type UserAttributeValue struct { + ent.Schema +} + +func (UserAttributeValue) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_attribute_values"}, + } +} + +func (UserAttributeValue) Mixin() []ent.Mixin { + return []ent.Mixin{ + // Only use TimeMixin, no soft delete - values are hard deleted + mixins.TimeMixin{}, + } +} + +func (UserAttributeValue) Fields() []ent.Field { + return []ent.Field{ + // user_id: References the user this value belongs to + field.Int64("user_id"), + + // attribute_id: References the attribute definition + field.Int64("attribute_id"), + + // value: The actual value stored as a string + // For multi_select, this is a JSON array string + field.Text("value"). + Default(""), + } +} + +func (UserAttributeValue) Edges() []ent.Edge { + return []ent.Edge{ + // user: The user who owns this attribute value + edge.From("user", User.Type). + Ref("attribute_values"). + Field("user_id"). + Required(). + Unique(), + + // definition: The attribute definition this value is for + edge.From("definition", UserAttributeDefinition.Type). + Ref("values"). + Field("attribute_id"). + Required(). + Unique(), + } +} + +func (UserAttributeValue) Indexes() []ent.Index { + return []ent.Index{ + // Unique index on (user_id, attribute_id) + index.Fields("user_id", "attribute_id").Unique(), + index.Fields("attribute_id"), + } +} diff --git a/backend/ent/schema/user_subscription.go b/backend/ent/schema/user_subscription.go new file mode 100644 index 00000000..b21f4083 --- /dev/null +++ b/backend/ent/schema/user_subscription.go @@ -0,0 +1,117 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// UserSubscription holds the schema definition for the UserSubscription entity. +type UserSubscription struct { + ent.Schema +} + +func (UserSubscription) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "user_subscriptions"}, + } +} + +func (UserSubscription) Mixin() []ent.Mixin { + return []ent.Mixin{ + mixins.TimeMixin{}, + mixins.SoftDeleteMixin{}, + } +} + +func (UserSubscription) Fields() []ent.Field { + return []ent.Field{ + field.Int64("user_id"), + field.Int64("group_id"), + + field.Time("starts_at"). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("expires_at"). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("status"). + MaxLen(20). + Default(service.SubscriptionStatusActive), + + field.Time("daily_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("weekly_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("monthly_window_start"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + field.Float("daily_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + field.Float("weekly_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + field.Float("monthly_usage_usd"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,10)"}). + Default(0), + + field.Int64("assigned_by"). + Optional(). + Nillable(), + field.Time("assigned_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + } +} + +func (UserSubscription) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("user", User.Type). + Ref("subscriptions"). + Field("user_id"). + Unique(). + Required(), + edge.From("group", Group.Type). + Ref("subscriptions"). + Field("group_id"). + Unique(). + Required(), + edge.From("assigned_by_user", User.Type). + Ref("assigned_subscriptions"). + Field("assigned_by"). + Unique(), + edge.To("usage_logs", UsageLog.Type), + } +} + +func (UserSubscription) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("user_id"), + index.Fields("group_id"), + index.Fields("status"), + index.Fields("expires_at"), + index.Fields("assigned_by"), + // 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重新订阅 + // 见迁移文件 016_soft_delete_partial_unique_indexes.sql + index.Fields("user_id", "group_id"), + index.Fields("deleted_at"), + } +} diff --git a/backend/ent/setting.go b/backend/ent/setting.go new file mode 100644 index 00000000..08ce81e4 --- /dev/null +++ b/backend/ent/setting.go @@ -0,0 +1,128 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// Setting is the model entity for the Setting schema. +type Setting struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Setting) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case setting.FieldID: + values[i] = new(sql.NullInt64) + case setting.FieldKey, setting.FieldValue: + values[i] = new(sql.NullString) + case setting.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Setting fields. +func (_m *Setting) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case setting.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case setting.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case setting.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.String + } + case setting.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the Setting. +// This includes values selected through modifiers, order, etc. +func (_m *Setting) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this Setting. +// Note that you need to call Setting.Unwrap() before calling this method if this Setting +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *Setting) Update() *SettingUpdateOne { + return NewSettingClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the Setting entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *Setting) Unwrap() *Setting { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: Setting is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *Setting) String() string { + var builder strings.Builder + builder.WriteString("Setting(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(_m.Value) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Settings is a parsable slice of Setting. +type Settings []*Setting diff --git a/backend/ent/setting/setting.go b/backend/ent/setting/setting.go new file mode 100644 index 00000000..79abe970 --- /dev/null +++ b/backend/ent/setting/setting.go @@ -0,0 +1,74 @@ +// Code generated by ent, DO NOT EDIT. + +package setting + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the setting type in the database. + Label = "setting" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the setting in the database. + Table = "settings" +) + +// Columns holds all SQL columns for setting fields. +var Columns = []string{ + FieldID, + FieldKey, + FieldValue, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the Setting queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/backend/ent/setting/where.go b/backend/ent/setting/where.go new file mode 100644 index 00000000..23343e9e --- /dev/null +++ b/backend/ent/setting/where.go @@ -0,0 +1,255 @@ +// Code generated by ent, DO NOT EDIT. + +package setting + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldID, id)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldKey, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldValue, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.Setting { + return predicate.Setting(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldContainsFold(FieldKey, v)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.Setting { + return predicate.Setting(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.Setting { + return predicate.Setting(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.Setting { + return predicate.Setting(sql.FieldContainsFold(FieldValue, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Setting { + return predicate.Setting(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Setting { + return predicate.Setting(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Setting { + return predicate.Setting(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Setting) predicate.Setting { + return predicate.Setting(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Setting) predicate.Setting { + return predicate.Setting(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Setting) predicate.Setting { + return predicate.Setting(sql.NotPredicates(p)) +} diff --git a/backend/ent/setting_create.go b/backend/ent/setting_create.go new file mode 100644 index 00000000..553261e7 --- /dev/null +++ b/backend/ent/setting_create.go @@ -0,0 +1,584 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingCreate is the builder for creating a Setting entity. +type SettingCreate struct { + config + mutation *SettingMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetKey sets the "key" field. +func (_c *SettingCreate) SetKey(v string) *SettingCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetValue sets the "value" field. +func (_c *SettingCreate) SetValue(v string) *SettingCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *SettingCreate) SetUpdatedAt(v time.Time) *SettingCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *SettingCreate) SetNillableUpdatedAt(v *time.Time) *SettingCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// Mutation returns the SettingMutation object of the builder. +func (_c *SettingCreate) Mutation() *SettingMutation { + return _c.mutation +} + +// Save creates the Setting in the database. +func (_c *SettingCreate) Save(ctx context.Context) (*Setting, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *SettingCreate) SaveX(ctx context.Context) *Setting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *SettingCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *SettingCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *SettingCreate) defaults() { + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := setting.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *SettingCreate) check() error { + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "Setting.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "Setting.value"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Setting.updated_at"`)} + } + return nil +} + +func (_c *SettingCreate) sqlSave(ctx context.Context) (*Setting, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *SettingCreate) createSpec() (*Setting, *sqlgraph.CreateSpec) { + var ( + _node = &Setting{config: _c.config} + _spec = sqlgraph.NewCreateSpec(setting.Table, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + _node.Value = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Setting.Create(). +// SetKey(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.SettingUpsert) { +// SetKey(v+v). +// }). +// Exec(ctx) +func (_c *SettingCreate) OnConflict(opts ...sql.ConflictOption) *SettingUpsertOne { + _c.conflict = opts + return &SettingUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *SettingCreate) OnConflictColumns(columns ...string) *SettingUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &SettingUpsertOne{ + create: _c, + } +} + +type ( + // SettingUpsertOne is the builder for "upsert"-ing + // one Setting node. + SettingUpsertOne struct { + create *SettingCreate + } + + // SettingUpsert is the "OnConflict" setter. + SettingUpsert struct { + *sql.UpdateSet + } +) + +// SetKey sets the "key" field. +func (u *SettingUpsert) SetKey(v string) *SettingUpsert { + u.Set(setting.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsert) UpdateKey() *SettingUpsert { + u.SetExcluded(setting.FieldKey) + return u +} + +// SetValue sets the "value" field. +func (u *SettingUpsert) SetValue(v string) *SettingUpsert { + u.Set(setting.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsert) UpdateValue() *SettingUpsert { + u.SetExcluded(setting.FieldValue) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsert) SetUpdatedAt(v time.Time) *SettingUpsert { + u.Set(setting.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsert) UpdateUpdatedAt() *SettingUpsert { + u.SetExcluded(setting.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *SettingUpsertOne) UpdateNewValues() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *SettingUpsertOne) Ignore() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *SettingUpsertOne) DoNothing() *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the SettingCreate.OnConflict +// documentation for more info. +func (u *SettingUpsertOne) Update(set func(*SettingUpsert)) *SettingUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&SettingUpsert{UpdateSet: update}) + })) + return u +} + +// SetKey sets the "key" field. +func (u *SettingUpsertOne) SetKey(v string) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateKey() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateKey() + }) +} + +// SetValue sets the "value" field. +func (u *SettingUpsertOne) SetValue(v string) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateValue() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateValue() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsertOne) SetUpdatedAt(v time.Time) *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsertOne) UpdateUpdatedAt() *SettingUpsertOne { + return u.Update(func(s *SettingUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *SettingUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for SettingCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *SettingUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *SettingUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *SettingUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// SettingCreateBulk is the builder for creating many Setting entities in bulk. +type SettingCreateBulk struct { + config + err error + builders []*SettingCreate + conflict []sql.ConflictOption +} + +// Save creates the Setting entities in the database. +func (_c *SettingCreateBulk) Save(ctx context.Context) ([]*Setting, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*Setting, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*SettingMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *SettingCreateBulk) SaveX(ctx context.Context) []*Setting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *SettingCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *SettingCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Setting.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.SettingUpsert) { +// SetKey(v+v). +// }). +// Exec(ctx) +func (_c *SettingCreateBulk) OnConflict(opts ...sql.ConflictOption) *SettingUpsertBulk { + _c.conflict = opts + return &SettingUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *SettingCreateBulk) OnConflictColumns(columns ...string) *SettingUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &SettingUpsertBulk{ + create: _c, + } +} + +// SettingUpsertBulk is the builder for "upsert"-ing +// a bulk of Setting nodes. +type SettingUpsertBulk struct { + create *SettingCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *SettingUpsertBulk) UpdateNewValues() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Setting.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *SettingUpsertBulk) Ignore() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *SettingUpsertBulk) DoNothing() *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the SettingCreateBulk.OnConflict +// documentation for more info. +func (u *SettingUpsertBulk) Update(set func(*SettingUpsert)) *SettingUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&SettingUpsert{UpdateSet: update}) + })) + return u +} + +// SetKey sets the "key" field. +func (u *SettingUpsertBulk) SetKey(v string) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateKey() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateKey() + }) +} + +// SetValue sets the "value" field. +func (u *SettingUpsertBulk) SetValue(v string) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateValue() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateValue() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *SettingUpsertBulk) SetUpdatedAt(v time.Time) *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *SettingUpsertBulk) UpdateUpdatedAt() *SettingUpsertBulk { + return u.Update(func(s *SettingUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *SettingUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the SettingCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for SettingCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *SettingUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/setting_delete.go b/backend/ent/setting_delete.go new file mode 100644 index 00000000..64919673 --- /dev/null +++ b/backend/ent/setting_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingDelete is the builder for deleting a Setting entity. +type SettingDelete struct { + config + hooks []Hook + mutation *SettingMutation +} + +// Where appends a list predicates to the SettingDelete builder. +func (_d *SettingDelete) Where(ps ...predicate.Setting) *SettingDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *SettingDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *SettingDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *SettingDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(setting.Table, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// SettingDeleteOne is the builder for deleting a single Setting entity. +type SettingDeleteOne struct { + _d *SettingDelete +} + +// Where appends a list predicates to the SettingDelete builder. +func (_d *SettingDeleteOne) Where(ps ...predicate.Setting) *SettingDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *SettingDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{setting.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *SettingDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/setting_query.go b/backend/ent/setting_query.go new file mode 100644 index 00000000..38eb9462 --- /dev/null +++ b/backend/ent/setting_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingQuery is the builder for querying Setting entities. +type SettingQuery struct { + config + ctx *QueryContext + order []setting.OrderOption + inters []Interceptor + predicates []predicate.Setting + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the SettingQuery builder. +func (_q *SettingQuery) Where(ps ...predicate.Setting) *SettingQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *SettingQuery) Limit(limit int) *SettingQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *SettingQuery) Offset(offset int) *SettingQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *SettingQuery) Unique(unique bool) *SettingQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *SettingQuery) Order(o ...setting.OrderOption) *SettingQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first Setting entity from the query. +// Returns a *NotFoundError when no Setting was found. +func (_q *SettingQuery) First(ctx context.Context) (*Setting, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{setting.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *SettingQuery) FirstX(ctx context.Context) *Setting { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Setting ID from the query. +// Returns a *NotFoundError when no Setting ID was found. +func (_q *SettingQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{setting.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *SettingQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Setting entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Setting entity is found. +// Returns a *NotFoundError when no Setting entities are found. +func (_q *SettingQuery) Only(ctx context.Context) (*Setting, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{setting.Label} + default: + return nil, &NotSingularError{setting.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *SettingQuery) OnlyX(ctx context.Context) *Setting { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Setting ID in the query. +// Returns a *NotSingularError when more than one Setting ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *SettingQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{setting.Label} + default: + err = &NotSingularError{setting.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *SettingQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Settings. +func (_q *SettingQuery) All(ctx context.Context) ([]*Setting, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Setting, *SettingQuery]() + return withInterceptors[[]*Setting](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *SettingQuery) AllX(ctx context.Context) []*Setting { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Setting IDs. +func (_q *SettingQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(setting.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *SettingQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *SettingQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*SettingQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *SettingQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *SettingQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *SettingQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the SettingQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *SettingQuery) Clone() *SettingQuery { + if _q == nil { + return nil + } + return &SettingQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]setting.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.Setting{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Key string `json:"key,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Setting.Query(). +// GroupBy(setting.FieldKey). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *SettingQuery) GroupBy(field string, fields ...string) *SettingGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &SettingGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = setting.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Key string `json:"key,omitempty"` +// } +// +// client.Setting.Query(). +// Select(setting.FieldKey). +// Scan(ctx, &v) +func (_q *SettingQuery) Select(fields ...string) *SettingSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &SettingSelect{SettingQuery: _q} + sbuild.label = setting.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a SettingSelect configured with the given aggregations. +func (_q *SettingQuery) Aggregate(fns ...AggregateFunc) *SettingSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *SettingQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !setting.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Setting, error) { + var ( + nodes = []*Setting{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Setting).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Setting{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *SettingQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *SettingQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, setting.FieldID) + for i := range fields { + if fields[i] != setting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(setting.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = setting.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *SettingQuery) ForUpdate(opts ...sql.LockOption) *SettingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *SettingQuery) ForShare(opts ...sql.LockOption) *SettingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// SettingGroupBy is the group-by builder for Setting entities. +type SettingGroupBy struct { + selector + build *SettingQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *SettingGroupBy) Aggregate(fns ...AggregateFunc) *SettingGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *SettingGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SettingQuery, *SettingGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *SettingGroupBy) sqlScan(ctx context.Context, root *SettingQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// SettingSelect is the builder for selecting fields of Setting entities. +type SettingSelect struct { + *SettingQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *SettingSelect) Aggregate(fns ...AggregateFunc) *SettingSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *SettingSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*SettingQuery, *SettingSelect](ctx, _s.SettingQuery, _s, _s.inters, v) +} + +func (_s *SettingSelect) sqlScan(ctx context.Context, root *SettingQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/setting_update.go b/backend/ent/setting_update.go new file mode 100644 index 00000000..42d016d6 --- /dev/null +++ b/backend/ent/setting_update.go @@ -0,0 +1,306 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/setting" +) + +// SettingUpdate is the builder for updating Setting entities. +type SettingUpdate struct { + config + hooks []Hook + mutation *SettingMutation +} + +// Where appends a list predicates to the SettingUpdate builder. +func (_u *SettingUpdate) Where(ps ...predicate.Setting) *SettingUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetKey sets the "key" field. +func (_u *SettingUpdate) SetKey(v string) *SettingUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *SettingUpdate) SetNillableKey(v *string) *SettingUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *SettingUpdate) SetValue(v string) *SettingUpdate { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *SettingUpdate) SetNillableValue(v *string) *SettingUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *SettingUpdate) SetUpdatedAt(v time.Time) *SettingUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the SettingMutation object of the builder. +func (_u *SettingUpdate) Mutation() *SettingMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *SettingUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *SettingUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *SettingUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *SettingUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *SettingUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := setting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *SettingUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + return nil +} + +func (_u *SettingUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{setting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// SettingUpdateOne is the builder for updating a single Setting entity. +type SettingUpdateOne struct { + config + fields []string + hooks []Hook + mutation *SettingMutation +} + +// SetKey sets the "key" field. +func (_u *SettingUpdateOne) SetKey(v string) *SettingUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *SettingUpdateOne) SetNillableKey(v *string) *SettingUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *SettingUpdateOne) SetValue(v string) *SettingUpdateOne { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *SettingUpdateOne) SetNillableValue(v *string) *SettingUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *SettingUpdateOne) SetUpdatedAt(v time.Time) *SettingUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the SettingMutation object of the builder. +func (_u *SettingUpdateOne) Mutation() *SettingMutation { + return _u.mutation +} + +// Where appends a list predicates to the SettingUpdate builder. +func (_u *SettingUpdateOne) Where(ps ...predicate.Setting) *SettingUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *SettingUpdateOne) Select(field string, fields ...string) *SettingUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated Setting entity. +func (_u *SettingUpdateOne) Save(ctx context.Context) (*Setting, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *SettingUpdateOne) SaveX(ctx context.Context) *Setting { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *SettingUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *SettingUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *SettingUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := setting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *SettingUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := setting.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "Setting.key": %w`, err)} + } + } + return nil +} + +func (_u *SettingUpdateOne) sqlSave(ctx context.Context) (_node *Setting, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(setting.Table, setting.Columns, sqlgraph.NewFieldSpec(setting.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Setting.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, setting.FieldID) + for _, f := range fields { + if !setting.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != setting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(setting.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(setting.FieldValue, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(setting.FieldUpdatedAt, field.TypeTime, value) + } + _node = &Setting{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{setting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/tx.go b/backend/ent/tx.go new file mode 100644 index 00000000..56df121a --- /dev/null +++ b/backend/ent/tx.go @@ -0,0 +1,278 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + stdsql "database/sql" + "fmt" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // APIKey is the client for interacting with the APIKey builders. + APIKey *APIKeyClient + // Account is the client for interacting with the Account builders. + Account *AccountClient + // AccountGroup is the client for interacting with the AccountGroup builders. + AccountGroup *AccountGroupClient + // Group is the client for interacting with the Group builders. + Group *GroupClient + // PromoCode is the client for interacting with the PromoCode builders. + PromoCode *PromoCodeClient + // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. + PromoCodeUsage *PromoCodeUsageClient + // Proxy is the client for interacting with the Proxy builders. + Proxy *ProxyClient + // RedeemCode is the client for interacting with the RedeemCode builders. + RedeemCode *RedeemCodeClient + // Setting is the client for interacting with the Setting builders. + Setting *SettingClient + // UsageLog is the client for interacting with the UsageLog builders. + UsageLog *UsageLogClient + // User is the client for interacting with the User builders. + User *UserClient + // UserAllowedGroup is the client for interacting with the UserAllowedGroup builders. + UserAllowedGroup *UserAllowedGroupClient + // UserAttributeDefinition is the client for interacting with the UserAttributeDefinition builders. + UserAttributeDefinition *UserAttributeDefinitionClient + // UserAttributeValue is the client for interacting with the UserAttributeValue builders. + UserAttributeValue *UserAttributeValueClient + // UserSubscription is the client for interacting with the UserSubscription builders. + UserSubscription *UserSubscriptionClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.APIKey = NewAPIKeyClient(tx.config) + tx.Account = NewAccountClient(tx.config) + tx.AccountGroup = NewAccountGroupClient(tx.config) + tx.Group = NewGroupClient(tx.config) + tx.PromoCode = NewPromoCodeClient(tx.config) + tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) + tx.Proxy = NewProxyClient(tx.config) + tx.RedeemCode = NewRedeemCodeClient(tx.config) + tx.Setting = NewSettingClient(tx.config) + tx.UsageLog = NewUsageLogClient(tx.config) + tx.User = NewUserClient(tx.config) + tx.UserAllowedGroup = NewUserAllowedGroupClient(tx.config) + tx.UserAttributeDefinition = NewUserAttributeDefinitionClient(tx.config) + tx.UserAttributeValue = NewUserAttributeValueClient(tx.config) + tx.UserSubscription = NewUserSubscriptionClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: APIKey.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) + +// ExecContext allows calling the underlying ExecContext method of the transaction if it is supported by it. +// See, database/sql#Tx.ExecContext for more information. +func (tx *txDriver) ExecContext(ctx context.Context, query string, args ...any) (stdsql.Result, error) { + ex, ok := tx.tx.(interface { + ExecContext(context.Context, string, ...any) (stdsql.Result, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.ExecContext is not supported") + } + return ex.ExecContext(ctx, query, args...) +} + +// QueryContext allows calling the underlying QueryContext method of the transaction if it is supported by it. +// See, database/sql#Tx.QueryContext for more information. +func (tx *txDriver) QueryContext(ctx context.Context, query string, args ...any) (*stdsql.Rows, error) { + q, ok := tx.tx.(interface { + QueryContext(context.Context, string, ...any) (*stdsql.Rows, error) + }) + if !ok { + return nil, fmt.Errorf("Tx.QueryContext is not supported") + } + return q.QueryContext(ctx, query, args...) +} diff --git a/backend/ent/usagelog.go b/backend/ent/usagelog.go new file mode 100644 index 00000000..81c466b4 --- /dev/null +++ b/backend/ent/usagelog.go @@ -0,0 +1,558 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UsageLog is the model entity for the UsageLog schema. +type UsageLog struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // APIKeyID holds the value of the "api_key_id" field. + APIKeyID int64 `json:"api_key_id,omitempty"` + // AccountID holds the value of the "account_id" field. + AccountID int64 `json:"account_id,omitempty"` + // RequestID holds the value of the "request_id" field. + RequestID string `json:"request_id,omitempty"` + // Model holds the value of the "model" field. + Model string `json:"model,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID *int64 `json:"group_id,omitempty"` + // SubscriptionID holds the value of the "subscription_id" field. + SubscriptionID *int64 `json:"subscription_id,omitempty"` + // InputTokens holds the value of the "input_tokens" field. + InputTokens int `json:"input_tokens,omitempty"` + // OutputTokens holds the value of the "output_tokens" field. + OutputTokens int `json:"output_tokens,omitempty"` + // CacheCreationTokens holds the value of the "cache_creation_tokens" field. + CacheCreationTokens int `json:"cache_creation_tokens,omitempty"` + // CacheReadTokens holds the value of the "cache_read_tokens" field. + CacheReadTokens int `json:"cache_read_tokens,omitempty"` + // CacheCreation5mTokens holds the value of the "cache_creation_5m_tokens" field. + CacheCreation5mTokens int `json:"cache_creation_5m_tokens,omitempty"` + // CacheCreation1hTokens holds the value of the "cache_creation_1h_tokens" field. + CacheCreation1hTokens int `json:"cache_creation_1h_tokens,omitempty"` + // InputCost holds the value of the "input_cost" field. + InputCost float64 `json:"input_cost,omitempty"` + // OutputCost holds the value of the "output_cost" field. + OutputCost float64 `json:"output_cost,omitempty"` + // CacheCreationCost holds the value of the "cache_creation_cost" field. + CacheCreationCost float64 `json:"cache_creation_cost,omitempty"` + // CacheReadCost holds the value of the "cache_read_cost" field. + CacheReadCost float64 `json:"cache_read_cost,omitempty"` + // TotalCost holds the value of the "total_cost" field. + TotalCost float64 `json:"total_cost,omitempty"` + // ActualCost holds the value of the "actual_cost" field. + ActualCost float64 `json:"actual_cost,omitempty"` + // RateMultiplier holds the value of the "rate_multiplier" field. + RateMultiplier float64 `json:"rate_multiplier,omitempty"` + // AccountRateMultiplier holds the value of the "account_rate_multiplier" field. + AccountRateMultiplier *float64 `json:"account_rate_multiplier,omitempty"` + // BillingType holds the value of the "billing_type" field. + BillingType int8 `json:"billing_type,omitempty"` + // Stream holds the value of the "stream" field. + Stream bool `json:"stream,omitempty"` + // DurationMs holds the value of the "duration_ms" field. + DurationMs *int `json:"duration_ms,omitempty"` + // FirstTokenMs holds the value of the "first_token_ms" field. + FirstTokenMs *int `json:"first_token_ms,omitempty"` + // UserAgent holds the value of the "user_agent" field. + UserAgent *string `json:"user_agent,omitempty"` + // IPAddress holds the value of the "ip_address" field. + IPAddress *string `json:"ip_address,omitempty"` + // ImageCount holds the value of the "image_count" field. + ImageCount int `json:"image_count,omitempty"` + // ImageSize holds the value of the "image_size" field. + ImageSize *string `json:"image_size,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UsageLogQuery when eager-loading is set. + Edges UsageLogEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UsageLogEdges holds the relations/edges for other nodes in the graph. +type UsageLogEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // APIKey holds the value of the api_key edge. + APIKey *APIKey `json:"api_key,omitempty"` + // Account holds the value of the account edge. + Account *Account `json:"account,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // Subscription holds the value of the subscription edge. + Subscription *UserSubscription `json:"subscription,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [5]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UsageLogEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// APIKeyOrErr returns the APIKey value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UsageLogEdges) APIKeyOrErr() (*APIKey, error) { + if e.APIKey != nil { + return e.APIKey, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: apikey.Label} + } + return nil, &NotLoadedError{edge: "api_key"} +} + +// AccountOrErr returns the Account value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UsageLogEdges) AccountOrErr() (*Account, error) { + if e.Account != nil { + return e.Account, nil + } else if e.loadedTypes[2] { + return nil, &NotFoundError{label: account.Label} + } + return nil, &NotLoadedError{edge: "account"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UsageLogEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[3] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// SubscriptionOrErr returns the Subscription value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UsageLogEdges) SubscriptionOrErr() (*UserSubscription, error) { + if e.Subscription != nil { + return e.Subscription, nil + } else if e.loadedTypes[4] { + return nil, &NotFoundError{label: usersubscription.Label} + } + return nil, &NotLoadedError{edge: "subscription"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UsageLog) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case usagelog.FieldStream: + values[i] = new(sql.NullBool) + case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier, usagelog.FieldAccountRateMultiplier: + values[i] = new(sql.NullFloat64) + case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount: + values[i] = new(sql.NullInt64) + case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldIPAddress, usagelog.FieldImageSize: + values[i] = new(sql.NullString) + case usagelog.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UsageLog fields. +func (_m *UsageLog) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case usagelog.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case usagelog.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case usagelog.FieldAPIKeyID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field api_key_id", values[i]) + } else if value.Valid { + _m.APIKeyID = value.Int64 + } + case usagelog.FieldAccountID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field account_id", values[i]) + } else if value.Valid { + _m.AccountID = value.Int64 + } + case usagelog.FieldRequestID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field request_id", values[i]) + } else if value.Valid { + _m.RequestID = value.String + } + case usagelog.FieldModel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field model", values[i]) + } else if value.Valid { + _m.Model = value.String + } + case usagelog.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = new(int64) + *_m.GroupID = value.Int64 + } + case usagelog.FieldSubscriptionID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field subscription_id", values[i]) + } else if value.Valid { + _m.SubscriptionID = new(int64) + *_m.SubscriptionID = value.Int64 + } + case usagelog.FieldInputTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field input_tokens", values[i]) + } else if value.Valid { + _m.InputTokens = int(value.Int64) + } + case usagelog.FieldOutputTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field output_tokens", values[i]) + } else if value.Valid { + _m.OutputTokens = int(value.Int64) + } + case usagelog.FieldCacheCreationTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field cache_creation_tokens", values[i]) + } else if value.Valid { + _m.CacheCreationTokens = int(value.Int64) + } + case usagelog.FieldCacheReadTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field cache_read_tokens", values[i]) + } else if value.Valid { + _m.CacheReadTokens = int(value.Int64) + } + case usagelog.FieldCacheCreation5mTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field cache_creation_5m_tokens", values[i]) + } else if value.Valid { + _m.CacheCreation5mTokens = int(value.Int64) + } + case usagelog.FieldCacheCreation1hTokens: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field cache_creation_1h_tokens", values[i]) + } else if value.Valid { + _m.CacheCreation1hTokens = int(value.Int64) + } + case usagelog.FieldInputCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field input_cost", values[i]) + } else if value.Valid { + _m.InputCost = value.Float64 + } + case usagelog.FieldOutputCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field output_cost", values[i]) + } else if value.Valid { + _m.OutputCost = value.Float64 + } + case usagelog.FieldCacheCreationCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field cache_creation_cost", values[i]) + } else if value.Valid { + _m.CacheCreationCost = value.Float64 + } + case usagelog.FieldCacheReadCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field cache_read_cost", values[i]) + } else if value.Valid { + _m.CacheReadCost = value.Float64 + } + case usagelog.FieldTotalCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field total_cost", values[i]) + } else if value.Valid { + _m.TotalCost = value.Float64 + } + case usagelog.FieldActualCost: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field actual_cost", values[i]) + } else if value.Valid { + _m.ActualCost = value.Float64 + } + case usagelog.FieldRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i]) + } else if value.Valid { + _m.RateMultiplier = value.Float64 + } + case usagelog.FieldAccountRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field account_rate_multiplier", values[i]) + } else if value.Valid { + _m.AccountRateMultiplier = new(float64) + *_m.AccountRateMultiplier = value.Float64 + } + case usagelog.FieldBillingType: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field billing_type", values[i]) + } else if value.Valid { + _m.BillingType = int8(value.Int64) + } + case usagelog.FieldStream: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field stream", values[i]) + } else if value.Valid { + _m.Stream = value.Bool + } + case usagelog.FieldDurationMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field duration_ms", values[i]) + } else if value.Valid { + _m.DurationMs = new(int) + *_m.DurationMs = int(value.Int64) + } + case usagelog.FieldFirstTokenMs: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field first_token_ms", values[i]) + } else if value.Valid { + _m.FirstTokenMs = new(int) + *_m.FirstTokenMs = int(value.Int64) + } + case usagelog.FieldUserAgent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field user_agent", values[i]) + } else if value.Valid { + _m.UserAgent = new(string) + *_m.UserAgent = value.String + } + case usagelog.FieldIPAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ip_address", values[i]) + } else if value.Valid { + _m.IPAddress = new(string) + *_m.IPAddress = value.String + } + case usagelog.FieldImageCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field image_count", values[i]) + } else if value.Valid { + _m.ImageCount = int(value.Int64) + } + case usagelog.FieldImageSize: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field image_size", values[i]) + } else if value.Valid { + _m.ImageSize = new(string) + *_m.ImageSize = value.String + } + case usagelog.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UsageLog. +// This includes values selected through modifiers, order, etc. +func (_m *UsageLog) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UsageLog entity. +func (_m *UsageLog) QueryUser() *UserQuery { + return NewUsageLogClient(_m.config).QueryUser(_m) +} + +// QueryAPIKey queries the "api_key" edge of the UsageLog entity. +func (_m *UsageLog) QueryAPIKey() *APIKeyQuery { + return NewUsageLogClient(_m.config).QueryAPIKey(_m) +} + +// QueryAccount queries the "account" edge of the UsageLog entity. +func (_m *UsageLog) QueryAccount() *AccountQuery { + return NewUsageLogClient(_m.config).QueryAccount(_m) +} + +// QueryGroup queries the "group" edge of the UsageLog entity. +func (_m *UsageLog) QueryGroup() *GroupQuery { + return NewUsageLogClient(_m.config).QueryGroup(_m) +} + +// QuerySubscription queries the "subscription" edge of the UsageLog entity. +func (_m *UsageLog) QuerySubscription() *UserSubscriptionQuery { + return NewUsageLogClient(_m.config).QuerySubscription(_m) +} + +// Update returns a builder for updating this UsageLog. +// Note that you need to call UsageLog.Unwrap() before calling this method if this UsageLog +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UsageLog) Update() *UsageLogUpdateOne { + return NewUsageLogClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UsageLog entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UsageLog) Unwrap() *UsageLog { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UsageLog is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UsageLog) String() string { + var builder strings.Builder + builder.WriteString("UsageLog(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("api_key_id=") + builder.WriteString(fmt.Sprintf("%v", _m.APIKeyID)) + builder.WriteString(", ") + builder.WriteString("account_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AccountID)) + builder.WriteString(", ") + builder.WriteString("request_id=") + builder.WriteString(_m.RequestID) + builder.WriteString(", ") + builder.WriteString("model=") + builder.WriteString(_m.Model) + builder.WriteString(", ") + if v := _m.GroupID; v != nil { + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.SubscriptionID; v != nil { + builder.WriteString("subscription_id=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("input_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.InputTokens)) + builder.WriteString(", ") + builder.WriteString("output_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.OutputTokens)) + builder.WriteString(", ") + builder.WriteString("cache_creation_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheCreationTokens)) + builder.WriteString(", ") + builder.WriteString("cache_read_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheReadTokens)) + builder.WriteString(", ") + builder.WriteString("cache_creation_5m_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheCreation5mTokens)) + builder.WriteString(", ") + builder.WriteString("cache_creation_1h_tokens=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheCreation1hTokens)) + builder.WriteString(", ") + builder.WriteString("input_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.InputCost)) + builder.WriteString(", ") + builder.WriteString("output_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.OutputCost)) + builder.WriteString(", ") + builder.WriteString("cache_creation_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheCreationCost)) + builder.WriteString(", ") + builder.WriteString("cache_read_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.CacheReadCost)) + builder.WriteString(", ") + builder.WriteString("total_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.TotalCost)) + builder.WriteString(", ") + builder.WriteString("actual_cost=") + builder.WriteString(fmt.Sprintf("%v", _m.ActualCost)) + builder.WriteString(", ") + builder.WriteString("rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) + builder.WriteString(", ") + if v := _m.AccountRateMultiplier; v != nil { + builder.WriteString("account_rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("billing_type=") + builder.WriteString(fmt.Sprintf("%v", _m.BillingType)) + builder.WriteString(", ") + builder.WriteString("stream=") + builder.WriteString(fmt.Sprintf("%v", _m.Stream)) + builder.WriteString(", ") + if v := _m.DurationMs; v != nil { + builder.WriteString("duration_ms=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.FirstTokenMs; v != nil { + builder.WriteString("first_token_ms=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.UserAgent; v != nil { + builder.WriteString("user_agent=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.IPAddress; v != nil { + builder.WriteString("ip_address=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("image_count=") + builder.WriteString(fmt.Sprintf("%v", _m.ImageCount)) + builder.WriteString(", ") + if v := _m.ImageSize; v != nil { + builder.WriteString("image_size=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// UsageLogs is a parsable slice of UsageLog. +type UsageLogs []*UsageLog diff --git a/backend/ent/usagelog/usagelog.go b/backend/ent/usagelog/usagelog.go new file mode 100644 index 00000000..980f1e58 --- /dev/null +++ b/backend/ent/usagelog/usagelog.go @@ -0,0 +1,444 @@ +// Code generated by ent, DO NOT EDIT. + +package usagelog + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the usagelog type in the database. + Label = "usage_log" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldAPIKeyID holds the string denoting the api_key_id field in the database. + FieldAPIKeyID = "api_key_id" + // FieldAccountID holds the string denoting the account_id field in the database. + FieldAccountID = "account_id" + // FieldRequestID holds the string denoting the request_id field in the database. + FieldRequestID = "request_id" + // FieldModel holds the string denoting the model field in the database. + FieldModel = "model" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldSubscriptionID holds the string denoting the subscription_id field in the database. + FieldSubscriptionID = "subscription_id" + // FieldInputTokens holds the string denoting the input_tokens field in the database. + FieldInputTokens = "input_tokens" + // FieldOutputTokens holds the string denoting the output_tokens field in the database. + FieldOutputTokens = "output_tokens" + // FieldCacheCreationTokens holds the string denoting the cache_creation_tokens field in the database. + FieldCacheCreationTokens = "cache_creation_tokens" + // FieldCacheReadTokens holds the string denoting the cache_read_tokens field in the database. + FieldCacheReadTokens = "cache_read_tokens" + // FieldCacheCreation5mTokens holds the string denoting the cache_creation_5m_tokens field in the database. + FieldCacheCreation5mTokens = "cache_creation_5m_tokens" + // FieldCacheCreation1hTokens holds the string denoting the cache_creation_1h_tokens field in the database. + FieldCacheCreation1hTokens = "cache_creation_1h_tokens" + // FieldInputCost holds the string denoting the input_cost field in the database. + FieldInputCost = "input_cost" + // FieldOutputCost holds the string denoting the output_cost field in the database. + FieldOutputCost = "output_cost" + // FieldCacheCreationCost holds the string denoting the cache_creation_cost field in the database. + FieldCacheCreationCost = "cache_creation_cost" + // FieldCacheReadCost holds the string denoting the cache_read_cost field in the database. + FieldCacheReadCost = "cache_read_cost" + // FieldTotalCost holds the string denoting the total_cost field in the database. + FieldTotalCost = "total_cost" + // FieldActualCost holds the string denoting the actual_cost field in the database. + FieldActualCost = "actual_cost" + // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. + FieldRateMultiplier = "rate_multiplier" + // FieldAccountRateMultiplier holds the string denoting the account_rate_multiplier field in the database. + FieldAccountRateMultiplier = "account_rate_multiplier" + // FieldBillingType holds the string denoting the billing_type field in the database. + FieldBillingType = "billing_type" + // FieldStream holds the string denoting the stream field in the database. + FieldStream = "stream" + // FieldDurationMs holds the string denoting the duration_ms field in the database. + FieldDurationMs = "duration_ms" + // FieldFirstTokenMs holds the string denoting the first_token_ms field in the database. + FieldFirstTokenMs = "first_token_ms" + // FieldUserAgent holds the string denoting the user_agent field in the database. + FieldUserAgent = "user_agent" + // FieldIPAddress holds the string denoting the ip_address field in the database. + FieldIPAddress = "ip_address" + // FieldImageCount holds the string denoting the image_count field in the database. + FieldImageCount = "image_count" + // FieldImageSize holds the string denoting the image_size field in the database. + FieldImageSize = "image_size" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeAPIKey holds the string denoting the api_key edge name in mutations. + EdgeAPIKey = "api_key" + // EdgeAccount holds the string denoting the account edge name in mutations. + EdgeAccount = "account" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeSubscription holds the string denoting the subscription edge name in mutations. + EdgeSubscription = "subscription" + // Table holds the table name of the usagelog in the database. + Table = "usage_logs" + // UserTable is the table that holds the user relation/edge. + UserTable = "usage_logs" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // APIKeyTable is the table that holds the api_key relation/edge. + APIKeyTable = "usage_logs" + // APIKeyInverseTable is the table name for the APIKey entity. + // It exists in this package in order to avoid circular dependency with the "apikey" package. + APIKeyInverseTable = "api_keys" + // APIKeyColumn is the table column denoting the api_key relation/edge. + APIKeyColumn = "api_key_id" + // AccountTable is the table that holds the account relation/edge. + AccountTable = "usage_logs" + // AccountInverseTable is the table name for the Account entity. + // It exists in this package in order to avoid circular dependency with the "account" package. + AccountInverseTable = "accounts" + // AccountColumn is the table column denoting the account relation/edge. + AccountColumn = "account_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "usage_logs" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" + // SubscriptionTable is the table that holds the subscription relation/edge. + SubscriptionTable = "usage_logs" + // SubscriptionInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + SubscriptionInverseTable = "user_subscriptions" + // SubscriptionColumn is the table column denoting the subscription relation/edge. + SubscriptionColumn = "subscription_id" +) + +// Columns holds all SQL columns for usagelog fields. +var Columns = []string{ + FieldID, + FieldUserID, + FieldAPIKeyID, + FieldAccountID, + FieldRequestID, + FieldModel, + FieldGroupID, + FieldSubscriptionID, + FieldInputTokens, + FieldOutputTokens, + FieldCacheCreationTokens, + FieldCacheReadTokens, + FieldCacheCreation5mTokens, + FieldCacheCreation1hTokens, + FieldInputCost, + FieldOutputCost, + FieldCacheCreationCost, + FieldCacheReadCost, + FieldTotalCost, + FieldActualCost, + FieldRateMultiplier, + FieldAccountRateMultiplier, + FieldBillingType, + FieldStream, + FieldDurationMs, + FieldFirstTokenMs, + FieldUserAgent, + FieldIPAddress, + FieldImageCount, + FieldImageSize, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // RequestIDValidator is a validator for the "request_id" field. It is called by the builders before save. + RequestIDValidator func(string) error + // ModelValidator is a validator for the "model" field. It is called by the builders before save. + ModelValidator func(string) error + // DefaultInputTokens holds the default value on creation for the "input_tokens" field. + DefaultInputTokens int + // DefaultOutputTokens holds the default value on creation for the "output_tokens" field. + DefaultOutputTokens int + // DefaultCacheCreationTokens holds the default value on creation for the "cache_creation_tokens" field. + DefaultCacheCreationTokens int + // DefaultCacheReadTokens holds the default value on creation for the "cache_read_tokens" field. + DefaultCacheReadTokens int + // DefaultCacheCreation5mTokens holds the default value on creation for the "cache_creation_5m_tokens" field. + DefaultCacheCreation5mTokens int + // DefaultCacheCreation1hTokens holds the default value on creation for the "cache_creation_1h_tokens" field. + DefaultCacheCreation1hTokens int + // DefaultInputCost holds the default value on creation for the "input_cost" field. + DefaultInputCost float64 + // DefaultOutputCost holds the default value on creation for the "output_cost" field. + DefaultOutputCost float64 + // DefaultCacheCreationCost holds the default value on creation for the "cache_creation_cost" field. + DefaultCacheCreationCost float64 + // DefaultCacheReadCost holds the default value on creation for the "cache_read_cost" field. + DefaultCacheReadCost float64 + // DefaultTotalCost holds the default value on creation for the "total_cost" field. + DefaultTotalCost float64 + // DefaultActualCost holds the default value on creation for the "actual_cost" field. + DefaultActualCost float64 + // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field. + DefaultRateMultiplier float64 + // DefaultBillingType holds the default value on creation for the "billing_type" field. + DefaultBillingType int8 + // DefaultStream holds the default value on creation for the "stream" field. + DefaultStream bool + // UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save. + UserAgentValidator func(string) error + // IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save. + IPAddressValidator func(string) error + // DefaultImageCount holds the default value on creation for the "image_count" field. + DefaultImageCount int + // ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save. + ImageSizeValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the UsageLog queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByAPIKeyID orders the results by the api_key_id field. +func ByAPIKeyID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAPIKeyID, opts...).ToFunc() +} + +// ByAccountID orders the results by the account_id field. +func ByAccountID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccountID, opts...).ToFunc() +} + +// ByRequestID orders the results by the request_id field. +func ByRequestID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRequestID, opts...).ToFunc() +} + +// ByModel orders the results by the model field. +func ByModel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldModel, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// BySubscriptionID orders the results by the subscription_id field. +func BySubscriptionID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSubscriptionID, opts...).ToFunc() +} + +// ByInputTokens orders the results by the input_tokens field. +func ByInputTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInputTokens, opts...).ToFunc() +} + +// ByOutputTokens orders the results by the output_tokens field. +func ByOutputTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOutputTokens, opts...).ToFunc() +} + +// ByCacheCreationTokens orders the results by the cache_creation_tokens field. +func ByCacheCreationTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheCreationTokens, opts...).ToFunc() +} + +// ByCacheReadTokens orders the results by the cache_read_tokens field. +func ByCacheReadTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheReadTokens, opts...).ToFunc() +} + +// ByCacheCreation5mTokens orders the results by the cache_creation_5m_tokens field. +func ByCacheCreation5mTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheCreation5mTokens, opts...).ToFunc() +} + +// ByCacheCreation1hTokens orders the results by the cache_creation_1h_tokens field. +func ByCacheCreation1hTokens(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheCreation1hTokens, opts...).ToFunc() +} + +// ByInputCost orders the results by the input_cost field. +func ByInputCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInputCost, opts...).ToFunc() +} + +// ByOutputCost orders the results by the output_cost field. +func ByOutputCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOutputCost, opts...).ToFunc() +} + +// ByCacheCreationCost orders the results by the cache_creation_cost field. +func ByCacheCreationCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheCreationCost, opts...).ToFunc() +} + +// ByCacheReadCost orders the results by the cache_read_cost field. +func ByCacheReadCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCacheReadCost, opts...).ToFunc() +} + +// ByTotalCost orders the results by the total_cost field. +func ByTotalCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTotalCost, opts...).ToFunc() +} + +// ByActualCost orders the results by the actual_cost field. +func ByActualCost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldActualCost, opts...).ToFunc() +} + +// ByRateMultiplier orders the results by the rate_multiplier field. +func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() +} + +// ByAccountRateMultiplier orders the results by the account_rate_multiplier field. +func ByAccountRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccountRateMultiplier, opts...).ToFunc() +} + +// ByBillingType orders the results by the billing_type field. +func ByBillingType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBillingType, opts...).ToFunc() +} + +// ByStream orders the results by the stream field. +func ByStream(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStream, opts...).ToFunc() +} + +// ByDurationMs orders the results by the duration_ms field. +func ByDurationMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDurationMs, opts...).ToFunc() +} + +// ByFirstTokenMs orders the results by the first_token_ms field. +func ByFirstTokenMs(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFirstTokenMs, opts...).ToFunc() +} + +// ByUserAgent orders the results by the user_agent field. +func ByUserAgent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserAgent, opts...).ToFunc() +} + +// ByIPAddress orders the results by the ip_address field. +func ByIPAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPAddress, opts...).ToFunc() +} + +// ByImageCount orders the results by the image_count field. +func ByImageCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImageCount, opts...).ToFunc() +} + +// ByImageSize orders the results by the image_size field. +func ByImageSize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldImageSize, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAPIKeyField orders the results by api_key field. +func ByAPIKeyField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAPIKeyStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAccountField orders the results by account field. +func ByAccountField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAccountStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// BySubscriptionField orders the results by subscription field. +func BySubscriptionField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSubscriptionStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newAPIKeyStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(APIKeyInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, APIKeyTable, APIKeyColumn), + ) +} +func newAccountStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AccountInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AccountTable, AccountColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newSubscriptionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SubscriptionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, SubscriptionTable, SubscriptionColumn), + ) +} diff --git a/backend/ent/usagelog/where.go b/backend/ent/usagelog/where.go new file mode 100644 index 00000000..28e2ab4c --- /dev/null +++ b/backend/ent/usagelog/where.go @@ -0,0 +1,1611 @@ +// Code generated by ent, DO NOT EDIT. + +package usagelog + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldID, id)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldUserID, v)) +} + +// APIKeyID applies equality check predicate on the "api_key_id" field. It's identical to APIKeyIDEQ. +func APIKeyID(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAPIKeyID, v)) +} + +// AccountID applies equality check predicate on the "account_id" field. It's identical to AccountIDEQ. +func AccountID(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountID, v)) +} + +// RequestID applies equality check predicate on the "request_id" field. It's identical to RequestIDEQ. +func RequestID(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldRequestID, v)) +} + +// Model applies equality check predicate on the "model" field. It's identical to ModelEQ. +func Model(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldModel, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v)) +} + +// SubscriptionID applies equality check predicate on the "subscription_id" field. It's identical to SubscriptionIDEQ. +func SubscriptionID(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldSubscriptionID, v)) +} + +// InputTokens applies equality check predicate on the "input_tokens" field. It's identical to InputTokensEQ. +func InputTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldInputTokens, v)) +} + +// OutputTokens applies equality check predicate on the "output_tokens" field. It's identical to OutputTokensEQ. +func OutputTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldOutputTokens, v)) +} + +// CacheCreationTokens applies equality check predicate on the "cache_creation_tokens" field. It's identical to CacheCreationTokensEQ. +func CacheCreationTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreationTokens, v)) +} + +// CacheReadTokens applies equality check predicate on the "cache_read_tokens" field. It's identical to CacheReadTokensEQ. +func CacheReadTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheReadTokens, v)) +} + +// CacheCreation5mTokens applies equality check predicate on the "cache_creation_5m_tokens" field. It's identical to CacheCreation5mTokensEQ. +func CacheCreation5mTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation1hTokens applies equality check predicate on the "cache_creation_1h_tokens" field. It's identical to CacheCreation1hTokensEQ. +func CacheCreation1hTokens(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreation1hTokens, v)) +} + +// InputCost applies equality check predicate on the "input_cost" field. It's identical to InputCostEQ. +func InputCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldInputCost, v)) +} + +// OutputCost applies equality check predicate on the "output_cost" field. It's identical to OutputCostEQ. +func OutputCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldOutputCost, v)) +} + +// CacheCreationCost applies equality check predicate on the "cache_creation_cost" field. It's identical to CacheCreationCostEQ. +func CacheCreationCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreationCost, v)) +} + +// CacheReadCost applies equality check predicate on the "cache_read_cost" field. It's identical to CacheReadCostEQ. +func CacheReadCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheReadCost, v)) +} + +// TotalCost applies equality check predicate on the "total_cost" field. It's identical to TotalCostEQ. +func TotalCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldTotalCost, v)) +} + +// ActualCost applies equality check predicate on the "actual_cost" field. It's identical to ActualCostEQ. +func ActualCost(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldActualCost, v)) +} + +// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ. +func RateMultiplier(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// AccountRateMultiplier applies equality check predicate on the "account_rate_multiplier" field. It's identical to AccountRateMultiplierEQ. +func AccountRateMultiplier(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v)) +} + +// BillingType applies equality check predicate on the "billing_type" field. It's identical to BillingTypeEQ. +func BillingType(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v)) +} + +// Stream applies equality check predicate on the "stream" field. It's identical to StreamEQ. +func Stream(v bool) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldStream, v)) +} + +// DurationMs applies equality check predicate on the "duration_ms" field. It's identical to DurationMsEQ. +func DurationMs(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldDurationMs, v)) +} + +// FirstTokenMs applies equality check predicate on the "first_token_ms" field. It's identical to FirstTokenMsEQ. +func FirstTokenMs(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldFirstTokenMs, v)) +} + +// UserAgent applies equality check predicate on the "user_agent" field. It's identical to UserAgentEQ. +func UserAgent(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldUserAgent, v)) +} + +// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. +func IPAddress(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v)) +} + +// ImageCount applies equality check predicate on the "image_count" field. It's identical to ImageCountEQ. +func ImageCount(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v)) +} + +// ImageSize applies equality check predicate on the "image_size" field. It's identical to ImageSizeEQ. +func ImageSize(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldImageSize, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldUserID, vs...)) +} + +// APIKeyIDEQ applies the EQ predicate on the "api_key_id" field. +func APIKeyIDEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAPIKeyID, v)) +} + +// APIKeyIDNEQ applies the NEQ predicate on the "api_key_id" field. +func APIKeyIDNEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldAPIKeyID, v)) +} + +// APIKeyIDIn applies the In predicate on the "api_key_id" field. +func APIKeyIDIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldAPIKeyID, vs...)) +} + +// APIKeyIDNotIn applies the NotIn predicate on the "api_key_id" field. +func APIKeyIDNotIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldAPIKeyID, vs...)) +} + +// AccountIDEQ applies the EQ predicate on the "account_id" field. +func AccountIDEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountID, v)) +} + +// AccountIDNEQ applies the NEQ predicate on the "account_id" field. +func AccountIDNEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldAccountID, v)) +} + +// AccountIDIn applies the In predicate on the "account_id" field. +func AccountIDIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldAccountID, vs...)) +} + +// AccountIDNotIn applies the NotIn predicate on the "account_id" field. +func AccountIDNotIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldAccountID, vs...)) +} + +// RequestIDEQ applies the EQ predicate on the "request_id" field. +func RequestIDEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldRequestID, v)) +} + +// RequestIDNEQ applies the NEQ predicate on the "request_id" field. +func RequestIDNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldRequestID, v)) +} + +// RequestIDIn applies the In predicate on the "request_id" field. +func RequestIDIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldRequestID, vs...)) +} + +// RequestIDNotIn applies the NotIn predicate on the "request_id" field. +func RequestIDNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldRequestID, vs...)) +} + +// RequestIDGT applies the GT predicate on the "request_id" field. +func RequestIDGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldRequestID, v)) +} + +// RequestIDGTE applies the GTE predicate on the "request_id" field. +func RequestIDGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldRequestID, v)) +} + +// RequestIDLT applies the LT predicate on the "request_id" field. +func RequestIDLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldRequestID, v)) +} + +// RequestIDLTE applies the LTE predicate on the "request_id" field. +func RequestIDLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldRequestID, v)) +} + +// RequestIDContains applies the Contains predicate on the "request_id" field. +func RequestIDContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldRequestID, v)) +} + +// RequestIDHasPrefix applies the HasPrefix predicate on the "request_id" field. +func RequestIDHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldRequestID, v)) +} + +// RequestIDHasSuffix applies the HasSuffix predicate on the "request_id" field. +func RequestIDHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldRequestID, v)) +} + +// RequestIDEqualFold applies the EqualFold predicate on the "request_id" field. +func RequestIDEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldRequestID, v)) +} + +// RequestIDContainsFold applies the ContainsFold predicate on the "request_id" field. +func RequestIDContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldRequestID, v)) +} + +// ModelEQ applies the EQ predicate on the "model" field. +func ModelEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldModel, v)) +} + +// ModelNEQ applies the NEQ predicate on the "model" field. +func ModelNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldModel, v)) +} + +// ModelIn applies the In predicate on the "model" field. +func ModelIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldModel, vs...)) +} + +// ModelNotIn applies the NotIn predicate on the "model" field. +func ModelNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldModel, vs...)) +} + +// ModelGT applies the GT predicate on the "model" field. +func ModelGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldModel, v)) +} + +// ModelGTE applies the GTE predicate on the "model" field. +func ModelGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldModel, v)) +} + +// ModelLT applies the LT predicate on the "model" field. +func ModelLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldModel, v)) +} + +// ModelLTE applies the LTE predicate on the "model" field. +func ModelLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldModel, v)) +} + +// ModelContains applies the Contains predicate on the "model" field. +func ModelContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldModel, v)) +} + +// ModelHasPrefix applies the HasPrefix predicate on the "model" field. +func ModelHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldModel, v)) +} + +// ModelHasSuffix applies the HasSuffix predicate on the "model" field. +func ModelHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldModel, v)) +} + +// ModelEqualFold applies the EqualFold predicate on the "model" field. +func ModelEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldModel, v)) +} + +// ModelContainsFold applies the ContainsFold predicate on the "model" field. +func ModelContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldModel, v)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// GroupIDIsNil applies the IsNil predicate on the "group_id" field. +func GroupIDIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldGroupID)) +} + +// GroupIDNotNil applies the NotNil predicate on the "group_id" field. +func GroupIDNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldGroupID)) +} + +// SubscriptionIDEQ applies the EQ predicate on the "subscription_id" field. +func SubscriptionIDEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldSubscriptionID, v)) +} + +// SubscriptionIDNEQ applies the NEQ predicate on the "subscription_id" field. +func SubscriptionIDNEQ(v int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldSubscriptionID, v)) +} + +// SubscriptionIDIn applies the In predicate on the "subscription_id" field. +func SubscriptionIDIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldSubscriptionID, vs...)) +} + +// SubscriptionIDNotIn applies the NotIn predicate on the "subscription_id" field. +func SubscriptionIDNotIn(vs ...int64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldSubscriptionID, vs...)) +} + +// SubscriptionIDIsNil applies the IsNil predicate on the "subscription_id" field. +func SubscriptionIDIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldSubscriptionID)) +} + +// SubscriptionIDNotNil applies the NotNil predicate on the "subscription_id" field. +func SubscriptionIDNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldSubscriptionID)) +} + +// InputTokensEQ applies the EQ predicate on the "input_tokens" field. +func InputTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldInputTokens, v)) +} + +// InputTokensNEQ applies the NEQ predicate on the "input_tokens" field. +func InputTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldInputTokens, v)) +} + +// InputTokensIn applies the In predicate on the "input_tokens" field. +func InputTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldInputTokens, vs...)) +} + +// InputTokensNotIn applies the NotIn predicate on the "input_tokens" field. +func InputTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldInputTokens, vs...)) +} + +// InputTokensGT applies the GT predicate on the "input_tokens" field. +func InputTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldInputTokens, v)) +} + +// InputTokensGTE applies the GTE predicate on the "input_tokens" field. +func InputTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldInputTokens, v)) +} + +// InputTokensLT applies the LT predicate on the "input_tokens" field. +func InputTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldInputTokens, v)) +} + +// InputTokensLTE applies the LTE predicate on the "input_tokens" field. +func InputTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldInputTokens, v)) +} + +// OutputTokensEQ applies the EQ predicate on the "output_tokens" field. +func OutputTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldOutputTokens, v)) +} + +// OutputTokensNEQ applies the NEQ predicate on the "output_tokens" field. +func OutputTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldOutputTokens, v)) +} + +// OutputTokensIn applies the In predicate on the "output_tokens" field. +func OutputTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldOutputTokens, vs...)) +} + +// OutputTokensNotIn applies the NotIn predicate on the "output_tokens" field. +func OutputTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldOutputTokens, vs...)) +} + +// OutputTokensGT applies the GT predicate on the "output_tokens" field. +func OutputTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldOutputTokens, v)) +} + +// OutputTokensGTE applies the GTE predicate on the "output_tokens" field. +func OutputTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldOutputTokens, v)) +} + +// OutputTokensLT applies the LT predicate on the "output_tokens" field. +func OutputTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldOutputTokens, v)) +} + +// OutputTokensLTE applies the LTE predicate on the "output_tokens" field. +func OutputTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldOutputTokens, v)) +} + +// CacheCreationTokensEQ applies the EQ predicate on the "cache_creation_tokens" field. +func CacheCreationTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreationTokens, v)) +} + +// CacheCreationTokensNEQ applies the NEQ predicate on the "cache_creation_tokens" field. +func CacheCreationTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheCreationTokens, v)) +} + +// CacheCreationTokensIn applies the In predicate on the "cache_creation_tokens" field. +func CacheCreationTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheCreationTokens, vs...)) +} + +// CacheCreationTokensNotIn applies the NotIn predicate on the "cache_creation_tokens" field. +func CacheCreationTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheCreationTokens, vs...)) +} + +// CacheCreationTokensGT applies the GT predicate on the "cache_creation_tokens" field. +func CacheCreationTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheCreationTokens, v)) +} + +// CacheCreationTokensGTE applies the GTE predicate on the "cache_creation_tokens" field. +func CacheCreationTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheCreationTokens, v)) +} + +// CacheCreationTokensLT applies the LT predicate on the "cache_creation_tokens" field. +func CacheCreationTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheCreationTokens, v)) +} + +// CacheCreationTokensLTE applies the LTE predicate on the "cache_creation_tokens" field. +func CacheCreationTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheCreationTokens, v)) +} + +// CacheReadTokensEQ applies the EQ predicate on the "cache_read_tokens" field. +func CacheReadTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheReadTokens, v)) +} + +// CacheReadTokensNEQ applies the NEQ predicate on the "cache_read_tokens" field. +func CacheReadTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheReadTokens, v)) +} + +// CacheReadTokensIn applies the In predicate on the "cache_read_tokens" field. +func CacheReadTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheReadTokens, vs...)) +} + +// CacheReadTokensNotIn applies the NotIn predicate on the "cache_read_tokens" field. +func CacheReadTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheReadTokens, vs...)) +} + +// CacheReadTokensGT applies the GT predicate on the "cache_read_tokens" field. +func CacheReadTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheReadTokens, v)) +} + +// CacheReadTokensGTE applies the GTE predicate on the "cache_read_tokens" field. +func CacheReadTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheReadTokens, v)) +} + +// CacheReadTokensLT applies the LT predicate on the "cache_read_tokens" field. +func CacheReadTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheReadTokens, v)) +} + +// CacheReadTokensLTE applies the LTE predicate on the "cache_read_tokens" field. +func CacheReadTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheReadTokens, v)) +} + +// CacheCreation5mTokensEQ applies the EQ predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation5mTokensNEQ applies the NEQ predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation5mTokensIn applies the In predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheCreation5mTokens, vs...)) +} + +// CacheCreation5mTokensNotIn applies the NotIn predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheCreation5mTokens, vs...)) +} + +// CacheCreation5mTokensGT applies the GT predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation5mTokensGTE applies the GTE predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation5mTokensLT applies the LT predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation5mTokensLTE applies the LTE predicate on the "cache_creation_5m_tokens" field. +func CacheCreation5mTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheCreation5mTokens, v)) +} + +// CacheCreation1hTokensEQ applies the EQ predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreation1hTokens, v)) +} + +// CacheCreation1hTokensNEQ applies the NEQ predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheCreation1hTokens, v)) +} + +// CacheCreation1hTokensIn applies the In predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheCreation1hTokens, vs...)) +} + +// CacheCreation1hTokensNotIn applies the NotIn predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheCreation1hTokens, vs...)) +} + +// CacheCreation1hTokensGT applies the GT predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheCreation1hTokens, v)) +} + +// CacheCreation1hTokensGTE applies the GTE predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheCreation1hTokens, v)) +} + +// CacheCreation1hTokensLT applies the LT predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheCreation1hTokens, v)) +} + +// CacheCreation1hTokensLTE applies the LTE predicate on the "cache_creation_1h_tokens" field. +func CacheCreation1hTokensLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheCreation1hTokens, v)) +} + +// InputCostEQ applies the EQ predicate on the "input_cost" field. +func InputCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldInputCost, v)) +} + +// InputCostNEQ applies the NEQ predicate on the "input_cost" field. +func InputCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldInputCost, v)) +} + +// InputCostIn applies the In predicate on the "input_cost" field. +func InputCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldInputCost, vs...)) +} + +// InputCostNotIn applies the NotIn predicate on the "input_cost" field. +func InputCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldInputCost, vs...)) +} + +// InputCostGT applies the GT predicate on the "input_cost" field. +func InputCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldInputCost, v)) +} + +// InputCostGTE applies the GTE predicate on the "input_cost" field. +func InputCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldInputCost, v)) +} + +// InputCostLT applies the LT predicate on the "input_cost" field. +func InputCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldInputCost, v)) +} + +// InputCostLTE applies the LTE predicate on the "input_cost" field. +func InputCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldInputCost, v)) +} + +// OutputCostEQ applies the EQ predicate on the "output_cost" field. +func OutputCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldOutputCost, v)) +} + +// OutputCostNEQ applies the NEQ predicate on the "output_cost" field. +func OutputCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldOutputCost, v)) +} + +// OutputCostIn applies the In predicate on the "output_cost" field. +func OutputCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldOutputCost, vs...)) +} + +// OutputCostNotIn applies the NotIn predicate on the "output_cost" field. +func OutputCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldOutputCost, vs...)) +} + +// OutputCostGT applies the GT predicate on the "output_cost" field. +func OutputCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldOutputCost, v)) +} + +// OutputCostGTE applies the GTE predicate on the "output_cost" field. +func OutputCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldOutputCost, v)) +} + +// OutputCostLT applies the LT predicate on the "output_cost" field. +func OutputCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldOutputCost, v)) +} + +// OutputCostLTE applies the LTE predicate on the "output_cost" field. +func OutputCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldOutputCost, v)) +} + +// CacheCreationCostEQ applies the EQ predicate on the "cache_creation_cost" field. +func CacheCreationCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheCreationCost, v)) +} + +// CacheCreationCostNEQ applies the NEQ predicate on the "cache_creation_cost" field. +func CacheCreationCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheCreationCost, v)) +} + +// CacheCreationCostIn applies the In predicate on the "cache_creation_cost" field. +func CacheCreationCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheCreationCost, vs...)) +} + +// CacheCreationCostNotIn applies the NotIn predicate on the "cache_creation_cost" field. +func CacheCreationCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheCreationCost, vs...)) +} + +// CacheCreationCostGT applies the GT predicate on the "cache_creation_cost" field. +func CacheCreationCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheCreationCost, v)) +} + +// CacheCreationCostGTE applies the GTE predicate on the "cache_creation_cost" field. +func CacheCreationCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheCreationCost, v)) +} + +// CacheCreationCostLT applies the LT predicate on the "cache_creation_cost" field. +func CacheCreationCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheCreationCost, v)) +} + +// CacheCreationCostLTE applies the LTE predicate on the "cache_creation_cost" field. +func CacheCreationCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheCreationCost, v)) +} + +// CacheReadCostEQ applies the EQ predicate on the "cache_read_cost" field. +func CacheReadCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCacheReadCost, v)) +} + +// CacheReadCostNEQ applies the NEQ predicate on the "cache_read_cost" field. +func CacheReadCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCacheReadCost, v)) +} + +// CacheReadCostIn applies the In predicate on the "cache_read_cost" field. +func CacheReadCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCacheReadCost, vs...)) +} + +// CacheReadCostNotIn applies the NotIn predicate on the "cache_read_cost" field. +func CacheReadCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCacheReadCost, vs...)) +} + +// CacheReadCostGT applies the GT predicate on the "cache_read_cost" field. +func CacheReadCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCacheReadCost, v)) +} + +// CacheReadCostGTE applies the GTE predicate on the "cache_read_cost" field. +func CacheReadCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCacheReadCost, v)) +} + +// CacheReadCostLT applies the LT predicate on the "cache_read_cost" field. +func CacheReadCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCacheReadCost, v)) +} + +// CacheReadCostLTE applies the LTE predicate on the "cache_read_cost" field. +func CacheReadCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCacheReadCost, v)) +} + +// TotalCostEQ applies the EQ predicate on the "total_cost" field. +func TotalCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldTotalCost, v)) +} + +// TotalCostNEQ applies the NEQ predicate on the "total_cost" field. +func TotalCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldTotalCost, v)) +} + +// TotalCostIn applies the In predicate on the "total_cost" field. +func TotalCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldTotalCost, vs...)) +} + +// TotalCostNotIn applies the NotIn predicate on the "total_cost" field. +func TotalCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldTotalCost, vs...)) +} + +// TotalCostGT applies the GT predicate on the "total_cost" field. +func TotalCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldTotalCost, v)) +} + +// TotalCostGTE applies the GTE predicate on the "total_cost" field. +func TotalCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldTotalCost, v)) +} + +// TotalCostLT applies the LT predicate on the "total_cost" field. +func TotalCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldTotalCost, v)) +} + +// TotalCostLTE applies the LTE predicate on the "total_cost" field. +func TotalCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldTotalCost, v)) +} + +// ActualCostEQ applies the EQ predicate on the "actual_cost" field. +func ActualCostEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldActualCost, v)) +} + +// ActualCostNEQ applies the NEQ predicate on the "actual_cost" field. +func ActualCostNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldActualCost, v)) +} + +// ActualCostIn applies the In predicate on the "actual_cost" field. +func ActualCostIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldActualCost, vs...)) +} + +// ActualCostNotIn applies the NotIn predicate on the "actual_cost" field. +func ActualCostNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldActualCost, vs...)) +} + +// ActualCostGT applies the GT predicate on the "actual_cost" field. +func ActualCostGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldActualCost, v)) +} + +// ActualCostGTE applies the GTE predicate on the "actual_cost" field. +func ActualCostGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldActualCost, v)) +} + +// ActualCostLT applies the LT predicate on the "actual_cost" field. +func ActualCostLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldActualCost, v)) +} + +// ActualCostLTE applies the LTE predicate on the "actual_cost" field. +func ActualCostLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldActualCost, v)) +} + +// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field. +func RateMultiplierEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field. +func RateMultiplierNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierIn applies the In predicate on the "rate_multiplier" field. +func RateMultiplierIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field. +func RateMultiplierNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field. +func RateMultiplierGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldRateMultiplier, v)) +} + +// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field. +func RateMultiplierGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldRateMultiplier, v)) +} + +// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field. +func RateMultiplierLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldRateMultiplier, v)) +} + +// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field. +func RateMultiplierLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldRateMultiplier, v)) +} + +// AccountRateMultiplierEQ applies the EQ predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierNEQ applies the NEQ predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierIn applies the In predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldAccountRateMultiplier, vs...)) +} + +// AccountRateMultiplierNotIn applies the NotIn predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldAccountRateMultiplier, vs...)) +} + +// AccountRateMultiplierGT applies the GT predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierGTE applies the GTE predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierLT applies the LT predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierLTE applies the LTE predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierIsNil applies the IsNil predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldAccountRateMultiplier)) +} + +// AccountRateMultiplierNotNil applies the NotNil predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldAccountRateMultiplier)) +} + +// BillingTypeEQ applies the EQ predicate on the "billing_type" field. +func BillingTypeEQ(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v)) +} + +// BillingTypeNEQ applies the NEQ predicate on the "billing_type" field. +func BillingTypeNEQ(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldBillingType, v)) +} + +// BillingTypeIn applies the In predicate on the "billing_type" field. +func BillingTypeIn(vs ...int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldBillingType, vs...)) +} + +// BillingTypeNotIn applies the NotIn predicate on the "billing_type" field. +func BillingTypeNotIn(vs ...int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldBillingType, vs...)) +} + +// BillingTypeGT applies the GT predicate on the "billing_type" field. +func BillingTypeGT(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldBillingType, v)) +} + +// BillingTypeGTE applies the GTE predicate on the "billing_type" field. +func BillingTypeGTE(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldBillingType, v)) +} + +// BillingTypeLT applies the LT predicate on the "billing_type" field. +func BillingTypeLT(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldBillingType, v)) +} + +// BillingTypeLTE applies the LTE predicate on the "billing_type" field. +func BillingTypeLTE(v int8) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldBillingType, v)) +} + +// StreamEQ applies the EQ predicate on the "stream" field. +func StreamEQ(v bool) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldStream, v)) +} + +// StreamNEQ applies the NEQ predicate on the "stream" field. +func StreamNEQ(v bool) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldStream, v)) +} + +// DurationMsEQ applies the EQ predicate on the "duration_ms" field. +func DurationMsEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldDurationMs, v)) +} + +// DurationMsNEQ applies the NEQ predicate on the "duration_ms" field. +func DurationMsNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldDurationMs, v)) +} + +// DurationMsIn applies the In predicate on the "duration_ms" field. +func DurationMsIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldDurationMs, vs...)) +} + +// DurationMsNotIn applies the NotIn predicate on the "duration_ms" field. +func DurationMsNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldDurationMs, vs...)) +} + +// DurationMsGT applies the GT predicate on the "duration_ms" field. +func DurationMsGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldDurationMs, v)) +} + +// DurationMsGTE applies the GTE predicate on the "duration_ms" field. +func DurationMsGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldDurationMs, v)) +} + +// DurationMsLT applies the LT predicate on the "duration_ms" field. +func DurationMsLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldDurationMs, v)) +} + +// DurationMsLTE applies the LTE predicate on the "duration_ms" field. +func DurationMsLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldDurationMs, v)) +} + +// DurationMsIsNil applies the IsNil predicate on the "duration_ms" field. +func DurationMsIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldDurationMs)) +} + +// DurationMsNotNil applies the NotNil predicate on the "duration_ms" field. +func DurationMsNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldDurationMs)) +} + +// FirstTokenMsEQ applies the EQ predicate on the "first_token_ms" field. +func FirstTokenMsEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldFirstTokenMs, v)) +} + +// FirstTokenMsNEQ applies the NEQ predicate on the "first_token_ms" field. +func FirstTokenMsNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldFirstTokenMs, v)) +} + +// FirstTokenMsIn applies the In predicate on the "first_token_ms" field. +func FirstTokenMsIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldFirstTokenMs, vs...)) +} + +// FirstTokenMsNotIn applies the NotIn predicate on the "first_token_ms" field. +func FirstTokenMsNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldFirstTokenMs, vs...)) +} + +// FirstTokenMsGT applies the GT predicate on the "first_token_ms" field. +func FirstTokenMsGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldFirstTokenMs, v)) +} + +// FirstTokenMsGTE applies the GTE predicate on the "first_token_ms" field. +func FirstTokenMsGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldFirstTokenMs, v)) +} + +// FirstTokenMsLT applies the LT predicate on the "first_token_ms" field. +func FirstTokenMsLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldFirstTokenMs, v)) +} + +// FirstTokenMsLTE applies the LTE predicate on the "first_token_ms" field. +func FirstTokenMsLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldFirstTokenMs, v)) +} + +// FirstTokenMsIsNil applies the IsNil predicate on the "first_token_ms" field. +func FirstTokenMsIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldFirstTokenMs)) +} + +// FirstTokenMsNotNil applies the NotNil predicate on the "first_token_ms" field. +func FirstTokenMsNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldFirstTokenMs)) +} + +// UserAgentEQ applies the EQ predicate on the "user_agent" field. +func UserAgentEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldUserAgent, v)) +} + +// UserAgentNEQ applies the NEQ predicate on the "user_agent" field. +func UserAgentNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldUserAgent, v)) +} + +// UserAgentIn applies the In predicate on the "user_agent" field. +func UserAgentIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldUserAgent, vs...)) +} + +// UserAgentNotIn applies the NotIn predicate on the "user_agent" field. +func UserAgentNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldUserAgent, vs...)) +} + +// UserAgentGT applies the GT predicate on the "user_agent" field. +func UserAgentGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldUserAgent, v)) +} + +// UserAgentGTE applies the GTE predicate on the "user_agent" field. +func UserAgentGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldUserAgent, v)) +} + +// UserAgentLT applies the LT predicate on the "user_agent" field. +func UserAgentLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldUserAgent, v)) +} + +// UserAgentLTE applies the LTE predicate on the "user_agent" field. +func UserAgentLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldUserAgent, v)) +} + +// UserAgentContains applies the Contains predicate on the "user_agent" field. +func UserAgentContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldUserAgent, v)) +} + +// UserAgentHasPrefix applies the HasPrefix predicate on the "user_agent" field. +func UserAgentHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldUserAgent, v)) +} + +// UserAgentHasSuffix applies the HasSuffix predicate on the "user_agent" field. +func UserAgentHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldUserAgent, v)) +} + +// UserAgentIsNil applies the IsNil predicate on the "user_agent" field. +func UserAgentIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldUserAgent)) +} + +// UserAgentNotNil applies the NotNil predicate on the "user_agent" field. +func UserAgentNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldUserAgent)) +} + +// UserAgentEqualFold applies the EqualFold predicate on the "user_agent" field. +func UserAgentEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldUserAgent, v)) +} + +// UserAgentContainsFold applies the ContainsFold predicate on the "user_agent" field. +func UserAgentContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldUserAgent, v)) +} + +// IPAddressEQ applies the EQ predicate on the "ip_address" field. +func IPAddressEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v)) +} + +// IPAddressNEQ applies the NEQ predicate on the "ip_address" field. +func IPAddressNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldIPAddress, v)) +} + +// IPAddressIn applies the In predicate on the "ip_address" field. +func IPAddressIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldIPAddress, vs...)) +} + +// IPAddressNotIn applies the NotIn predicate on the "ip_address" field. +func IPAddressNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldIPAddress, vs...)) +} + +// IPAddressGT applies the GT predicate on the "ip_address" field. +func IPAddressGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldIPAddress, v)) +} + +// IPAddressGTE applies the GTE predicate on the "ip_address" field. +func IPAddressGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldIPAddress, v)) +} + +// IPAddressLT applies the LT predicate on the "ip_address" field. +func IPAddressLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldIPAddress, v)) +} + +// IPAddressLTE applies the LTE predicate on the "ip_address" field. +func IPAddressLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldIPAddress, v)) +} + +// IPAddressContains applies the Contains predicate on the "ip_address" field. +func IPAddressContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldIPAddress, v)) +} + +// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. +func IPAddressHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldIPAddress, v)) +} + +// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. +func IPAddressHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldIPAddress, v)) +} + +// IPAddressIsNil applies the IsNil predicate on the "ip_address" field. +func IPAddressIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldIPAddress)) +} + +// IPAddressNotNil applies the NotNil predicate on the "ip_address" field. +func IPAddressNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldIPAddress)) +} + +// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. +func IPAddressEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldIPAddress, v)) +} + +// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. +func IPAddressContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldIPAddress, v)) +} + +// ImageCountEQ applies the EQ predicate on the "image_count" field. +func ImageCountEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v)) +} + +// ImageCountNEQ applies the NEQ predicate on the "image_count" field. +func ImageCountNEQ(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldImageCount, v)) +} + +// ImageCountIn applies the In predicate on the "image_count" field. +func ImageCountIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldImageCount, vs...)) +} + +// ImageCountNotIn applies the NotIn predicate on the "image_count" field. +func ImageCountNotIn(vs ...int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldImageCount, vs...)) +} + +// ImageCountGT applies the GT predicate on the "image_count" field. +func ImageCountGT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldImageCount, v)) +} + +// ImageCountGTE applies the GTE predicate on the "image_count" field. +func ImageCountGTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldImageCount, v)) +} + +// ImageCountLT applies the LT predicate on the "image_count" field. +func ImageCountLT(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldImageCount, v)) +} + +// ImageCountLTE applies the LTE predicate on the "image_count" field. +func ImageCountLTE(v int) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldImageCount, v)) +} + +// ImageSizeEQ applies the EQ predicate on the "image_size" field. +func ImageSizeEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldImageSize, v)) +} + +// ImageSizeNEQ applies the NEQ predicate on the "image_size" field. +func ImageSizeNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldImageSize, v)) +} + +// ImageSizeIn applies the In predicate on the "image_size" field. +func ImageSizeIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldImageSize, vs...)) +} + +// ImageSizeNotIn applies the NotIn predicate on the "image_size" field. +func ImageSizeNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldImageSize, vs...)) +} + +// ImageSizeGT applies the GT predicate on the "image_size" field. +func ImageSizeGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldImageSize, v)) +} + +// ImageSizeGTE applies the GTE predicate on the "image_size" field. +func ImageSizeGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldImageSize, v)) +} + +// ImageSizeLT applies the LT predicate on the "image_size" field. +func ImageSizeLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldImageSize, v)) +} + +// ImageSizeLTE applies the LTE predicate on the "image_size" field. +func ImageSizeLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldImageSize, v)) +} + +// ImageSizeContains applies the Contains predicate on the "image_size" field. +func ImageSizeContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldImageSize, v)) +} + +// ImageSizeHasPrefix applies the HasPrefix predicate on the "image_size" field. +func ImageSizeHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldImageSize, v)) +} + +// ImageSizeHasSuffix applies the HasSuffix predicate on the "image_size" field. +func ImageSizeHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldImageSize, v)) +} + +// ImageSizeIsNil applies the IsNil predicate on the "image_size" field. +func ImageSizeIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldImageSize)) +} + +// ImageSizeNotNil applies the NotNil predicate on the "image_size" field. +func ImageSizeNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldImageSize)) +} + +// ImageSizeEqualFold applies the EqualFold predicate on the "image_size" field. +func ImageSizeEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldImageSize, v)) +} + +// ImageSizeContainsFold applies the ContainsFold predicate on the "image_size" field. +func ImageSizeContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldImageSize, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAPIKey applies the HasEdge predicate on the "api_key" edge. +func HasAPIKey() predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, APIKeyTable, APIKeyColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAPIKeyWith applies the HasEdge predicate on the "api_key" edge with a given conditions (other predicates). +func HasAPIKeyWith(preds ...predicate.APIKey) predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := newAPIKeyStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAccount applies the HasEdge predicate on the "account" edge. +func HasAccount() predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AccountTable, AccountColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAccountWith applies the HasEdge predicate on the "account" edge with a given conditions (other predicates). +func HasAccountWith(preds ...predicate.Account) predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := newAccountStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSubscription applies the HasEdge predicate on the "subscription" edge. +func HasSubscription() predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, SubscriptionTable, SubscriptionColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSubscriptionWith applies the HasEdge predicate on the "subscription" edge with a given conditions (other predicates). +func HasSubscriptionWith(preds ...predicate.UserSubscription) predicate.UsageLog { + return predicate.UsageLog(func(s *sql.Selector) { + step := newSubscriptionStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UsageLog) predicate.UsageLog { + return predicate.UsageLog(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UsageLog) predicate.UsageLog { + return predicate.UsageLog(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UsageLog) predicate.UsageLog { + return predicate.UsageLog(sql.NotPredicates(p)) +} diff --git a/backend/ent/usagelog_create.go b/backend/ent/usagelog_create.go new file mode 100644 index 00000000..a17d6507 --- /dev/null +++ b/backend/ent/usagelog_create.go @@ -0,0 +1,2863 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UsageLogCreate is the builder for creating a UsageLog entity. +type UsageLogCreate struct { + config + mutation *UsageLogMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetUserID sets the "user_id" field. +func (_c *UsageLogCreate) SetUserID(v int64) *UsageLogCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetAPIKeyID sets the "api_key_id" field. +func (_c *UsageLogCreate) SetAPIKeyID(v int64) *UsageLogCreate { + _c.mutation.SetAPIKeyID(v) + return _c +} + +// SetAccountID sets the "account_id" field. +func (_c *UsageLogCreate) SetAccountID(v int64) *UsageLogCreate { + _c.mutation.SetAccountID(v) + return _c +} + +// SetRequestID sets the "request_id" field. +func (_c *UsageLogCreate) SetRequestID(v string) *UsageLogCreate { + _c.mutation.SetRequestID(v) + return _c +} + +// SetModel sets the "model" field. +func (_c *UsageLogCreate) SetModel(v string) *UsageLogCreate { + _c.mutation.SetModel(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *UsageLogCreate) SetGroupID(v int64) *UsageLogCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableGroupID(v *int64) *UsageLogCreate { + if v != nil { + _c.SetGroupID(*v) + } + return _c +} + +// SetSubscriptionID sets the "subscription_id" field. +func (_c *UsageLogCreate) SetSubscriptionID(v int64) *UsageLogCreate { + _c.mutation.SetSubscriptionID(v) + return _c +} + +// SetNillableSubscriptionID sets the "subscription_id" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableSubscriptionID(v *int64) *UsageLogCreate { + if v != nil { + _c.SetSubscriptionID(*v) + } + return _c +} + +// SetInputTokens sets the "input_tokens" field. +func (_c *UsageLogCreate) SetInputTokens(v int) *UsageLogCreate { + _c.mutation.SetInputTokens(v) + return _c +} + +// SetNillableInputTokens sets the "input_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableInputTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetInputTokens(*v) + } + return _c +} + +// SetOutputTokens sets the "output_tokens" field. +func (_c *UsageLogCreate) SetOutputTokens(v int) *UsageLogCreate { + _c.mutation.SetOutputTokens(v) + return _c +} + +// SetNillableOutputTokens sets the "output_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableOutputTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetOutputTokens(*v) + } + return _c +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (_c *UsageLogCreate) SetCacheCreationTokens(v int) *UsageLogCreate { + _c.mutation.SetCacheCreationTokens(v) + return _c +} + +// SetNillableCacheCreationTokens sets the "cache_creation_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheCreationTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetCacheCreationTokens(*v) + } + return _c +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (_c *UsageLogCreate) SetCacheReadTokens(v int) *UsageLogCreate { + _c.mutation.SetCacheReadTokens(v) + return _c +} + +// SetNillableCacheReadTokens sets the "cache_read_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheReadTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetCacheReadTokens(*v) + } + return _c +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (_c *UsageLogCreate) SetCacheCreation5mTokens(v int) *UsageLogCreate { + _c.mutation.SetCacheCreation5mTokens(v) + return _c +} + +// SetNillableCacheCreation5mTokens sets the "cache_creation_5m_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheCreation5mTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetCacheCreation5mTokens(*v) + } + return _c +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (_c *UsageLogCreate) SetCacheCreation1hTokens(v int) *UsageLogCreate { + _c.mutation.SetCacheCreation1hTokens(v) + return _c +} + +// SetNillableCacheCreation1hTokens sets the "cache_creation_1h_tokens" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheCreation1hTokens(v *int) *UsageLogCreate { + if v != nil { + _c.SetCacheCreation1hTokens(*v) + } + return _c +} + +// SetInputCost sets the "input_cost" field. +func (_c *UsageLogCreate) SetInputCost(v float64) *UsageLogCreate { + _c.mutation.SetInputCost(v) + return _c +} + +// SetNillableInputCost sets the "input_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableInputCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetInputCost(*v) + } + return _c +} + +// SetOutputCost sets the "output_cost" field. +func (_c *UsageLogCreate) SetOutputCost(v float64) *UsageLogCreate { + _c.mutation.SetOutputCost(v) + return _c +} + +// SetNillableOutputCost sets the "output_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableOutputCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetOutputCost(*v) + } + return _c +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (_c *UsageLogCreate) SetCacheCreationCost(v float64) *UsageLogCreate { + _c.mutation.SetCacheCreationCost(v) + return _c +} + +// SetNillableCacheCreationCost sets the "cache_creation_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheCreationCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetCacheCreationCost(*v) + } + return _c +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (_c *UsageLogCreate) SetCacheReadCost(v float64) *UsageLogCreate { + _c.mutation.SetCacheReadCost(v) + return _c +} + +// SetNillableCacheReadCost sets the "cache_read_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCacheReadCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetCacheReadCost(*v) + } + return _c +} + +// SetTotalCost sets the "total_cost" field. +func (_c *UsageLogCreate) SetTotalCost(v float64) *UsageLogCreate { + _c.mutation.SetTotalCost(v) + return _c +} + +// SetNillableTotalCost sets the "total_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableTotalCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetTotalCost(*v) + } + return _c +} + +// SetActualCost sets the "actual_cost" field. +func (_c *UsageLogCreate) SetActualCost(v float64) *UsageLogCreate { + _c.mutation.SetActualCost(v) + return _c +} + +// SetNillableActualCost sets the "actual_cost" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableActualCost(v *float64) *UsageLogCreate { + if v != nil { + _c.SetActualCost(*v) + } + return _c +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_c *UsageLogCreate) SetRateMultiplier(v float64) *UsageLogCreate { + _c.mutation.SetRateMultiplier(v) + return _c +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableRateMultiplier(v *float64) *UsageLogCreate { + if v != nil { + _c.SetRateMultiplier(*v) + } + return _c +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_c *UsageLogCreate) SetAccountRateMultiplier(v float64) *UsageLogCreate { + _c.mutation.SetAccountRateMultiplier(v) + return _c +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableAccountRateMultiplier(v *float64) *UsageLogCreate { + if v != nil { + _c.SetAccountRateMultiplier(*v) + } + return _c +} + +// SetBillingType sets the "billing_type" field. +func (_c *UsageLogCreate) SetBillingType(v int8) *UsageLogCreate { + _c.mutation.SetBillingType(v) + return _c +} + +// SetNillableBillingType sets the "billing_type" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableBillingType(v *int8) *UsageLogCreate { + if v != nil { + _c.SetBillingType(*v) + } + return _c +} + +// SetStream sets the "stream" field. +func (_c *UsageLogCreate) SetStream(v bool) *UsageLogCreate { + _c.mutation.SetStream(v) + return _c +} + +// SetNillableStream sets the "stream" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableStream(v *bool) *UsageLogCreate { + if v != nil { + _c.SetStream(*v) + } + return _c +} + +// SetDurationMs sets the "duration_ms" field. +func (_c *UsageLogCreate) SetDurationMs(v int) *UsageLogCreate { + _c.mutation.SetDurationMs(v) + return _c +} + +// SetNillableDurationMs sets the "duration_ms" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableDurationMs(v *int) *UsageLogCreate { + if v != nil { + _c.SetDurationMs(*v) + } + return _c +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (_c *UsageLogCreate) SetFirstTokenMs(v int) *UsageLogCreate { + _c.mutation.SetFirstTokenMs(v) + return _c +} + +// SetNillableFirstTokenMs sets the "first_token_ms" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableFirstTokenMs(v *int) *UsageLogCreate { + if v != nil { + _c.SetFirstTokenMs(*v) + } + return _c +} + +// SetUserAgent sets the "user_agent" field. +func (_c *UsageLogCreate) SetUserAgent(v string) *UsageLogCreate { + _c.mutation.SetUserAgent(v) + return _c +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableUserAgent(v *string) *UsageLogCreate { + if v != nil { + _c.SetUserAgent(*v) + } + return _c +} + +// SetIPAddress sets the "ip_address" field. +func (_c *UsageLogCreate) SetIPAddress(v string) *UsageLogCreate { + _c.mutation.SetIPAddress(v) + return _c +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableIPAddress(v *string) *UsageLogCreate { + if v != nil { + _c.SetIPAddress(*v) + } + return _c +} + +// SetImageCount sets the "image_count" field. +func (_c *UsageLogCreate) SetImageCount(v int) *UsageLogCreate { + _c.mutation.SetImageCount(v) + return _c +} + +// SetNillableImageCount sets the "image_count" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableImageCount(v *int) *UsageLogCreate { + if v != nil { + _c.SetImageCount(*v) + } + return _c +} + +// SetImageSize sets the "image_size" field. +func (_c *UsageLogCreate) SetImageSize(v string) *UsageLogCreate { + _c.mutation.SetImageSize(v) + return _c +} + +// SetNillableImageSize sets the "image_size" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableImageSize(v *string) *UsageLogCreate { + if v != nil { + _c.SetImageSize(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UsageLogCreate) SetCreatedAt(v time.Time) *UsageLogCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableCreatedAt(v *time.Time) *UsageLogCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UsageLogCreate) SetUser(v *User) *UsageLogCreate { + return _c.SetUserID(v.ID) +} + +// SetAPIKey sets the "api_key" edge to the APIKey entity. +func (_c *UsageLogCreate) SetAPIKey(v *APIKey) *UsageLogCreate { + return _c.SetAPIKeyID(v.ID) +} + +// SetAccount sets the "account" edge to the Account entity. +func (_c *UsageLogCreate) SetAccount(v *Account) *UsageLogCreate { + return _c.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *UsageLogCreate) SetGroup(v *Group) *UsageLogCreate { + return _c.SetGroupID(v.ID) +} + +// SetSubscription sets the "subscription" edge to the UserSubscription entity. +func (_c *UsageLogCreate) SetSubscription(v *UserSubscription) *UsageLogCreate { + return _c.SetSubscriptionID(v.ID) +} + +// Mutation returns the UsageLogMutation object of the builder. +func (_c *UsageLogCreate) Mutation() *UsageLogMutation { + return _c.mutation +} + +// Save creates the UsageLog in the database. +func (_c *UsageLogCreate) Save(ctx context.Context) (*UsageLog, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UsageLogCreate) SaveX(ctx context.Context) *UsageLog { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageLogCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageLogCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UsageLogCreate) defaults() { + if _, ok := _c.mutation.InputTokens(); !ok { + v := usagelog.DefaultInputTokens + _c.mutation.SetInputTokens(v) + } + if _, ok := _c.mutation.OutputTokens(); !ok { + v := usagelog.DefaultOutputTokens + _c.mutation.SetOutputTokens(v) + } + if _, ok := _c.mutation.CacheCreationTokens(); !ok { + v := usagelog.DefaultCacheCreationTokens + _c.mutation.SetCacheCreationTokens(v) + } + if _, ok := _c.mutation.CacheReadTokens(); !ok { + v := usagelog.DefaultCacheReadTokens + _c.mutation.SetCacheReadTokens(v) + } + if _, ok := _c.mutation.CacheCreation5mTokens(); !ok { + v := usagelog.DefaultCacheCreation5mTokens + _c.mutation.SetCacheCreation5mTokens(v) + } + if _, ok := _c.mutation.CacheCreation1hTokens(); !ok { + v := usagelog.DefaultCacheCreation1hTokens + _c.mutation.SetCacheCreation1hTokens(v) + } + if _, ok := _c.mutation.InputCost(); !ok { + v := usagelog.DefaultInputCost + _c.mutation.SetInputCost(v) + } + if _, ok := _c.mutation.OutputCost(); !ok { + v := usagelog.DefaultOutputCost + _c.mutation.SetOutputCost(v) + } + if _, ok := _c.mutation.CacheCreationCost(); !ok { + v := usagelog.DefaultCacheCreationCost + _c.mutation.SetCacheCreationCost(v) + } + if _, ok := _c.mutation.CacheReadCost(); !ok { + v := usagelog.DefaultCacheReadCost + _c.mutation.SetCacheReadCost(v) + } + if _, ok := _c.mutation.TotalCost(); !ok { + v := usagelog.DefaultTotalCost + _c.mutation.SetTotalCost(v) + } + if _, ok := _c.mutation.ActualCost(); !ok { + v := usagelog.DefaultActualCost + _c.mutation.SetActualCost(v) + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + v := usagelog.DefaultRateMultiplier + _c.mutation.SetRateMultiplier(v) + } + if _, ok := _c.mutation.BillingType(); !ok { + v := usagelog.DefaultBillingType + _c.mutation.SetBillingType(v) + } + if _, ok := _c.mutation.Stream(); !ok { + v := usagelog.DefaultStream + _c.mutation.SetStream(v) + } + if _, ok := _c.mutation.ImageCount(); !ok { + v := usagelog.DefaultImageCount + _c.mutation.SetImageCount(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := usagelog.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UsageLogCreate) check() error { + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UsageLog.user_id"`)} + } + if _, ok := _c.mutation.APIKeyID(); !ok { + return &ValidationError{Name: "api_key_id", err: errors.New(`ent: missing required field "UsageLog.api_key_id"`)} + } + if _, ok := _c.mutation.AccountID(); !ok { + return &ValidationError{Name: "account_id", err: errors.New(`ent: missing required field "UsageLog.account_id"`)} + } + if _, ok := _c.mutation.RequestID(); !ok { + return &ValidationError{Name: "request_id", err: errors.New(`ent: missing required field "UsageLog.request_id"`)} + } + if v, ok := _c.mutation.RequestID(); ok { + if err := usagelog.RequestIDValidator(v); err != nil { + return &ValidationError{Name: "request_id", err: fmt.Errorf(`ent: validator failed for field "UsageLog.request_id": %w`, err)} + } + } + if _, ok := _c.mutation.Model(); !ok { + return &ValidationError{Name: "model", err: errors.New(`ent: missing required field "UsageLog.model"`)} + } + if v, ok := _c.mutation.Model(); ok { + if err := usagelog.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)} + } + } + if _, ok := _c.mutation.InputTokens(); !ok { + return &ValidationError{Name: "input_tokens", err: errors.New(`ent: missing required field "UsageLog.input_tokens"`)} + } + if _, ok := _c.mutation.OutputTokens(); !ok { + return &ValidationError{Name: "output_tokens", err: errors.New(`ent: missing required field "UsageLog.output_tokens"`)} + } + if _, ok := _c.mutation.CacheCreationTokens(); !ok { + return &ValidationError{Name: "cache_creation_tokens", err: errors.New(`ent: missing required field "UsageLog.cache_creation_tokens"`)} + } + if _, ok := _c.mutation.CacheReadTokens(); !ok { + return &ValidationError{Name: "cache_read_tokens", err: errors.New(`ent: missing required field "UsageLog.cache_read_tokens"`)} + } + if _, ok := _c.mutation.CacheCreation5mTokens(); !ok { + return &ValidationError{Name: "cache_creation_5m_tokens", err: errors.New(`ent: missing required field "UsageLog.cache_creation_5m_tokens"`)} + } + if _, ok := _c.mutation.CacheCreation1hTokens(); !ok { + return &ValidationError{Name: "cache_creation_1h_tokens", err: errors.New(`ent: missing required field "UsageLog.cache_creation_1h_tokens"`)} + } + if _, ok := _c.mutation.InputCost(); !ok { + return &ValidationError{Name: "input_cost", err: errors.New(`ent: missing required field "UsageLog.input_cost"`)} + } + if _, ok := _c.mutation.OutputCost(); !ok { + return &ValidationError{Name: "output_cost", err: errors.New(`ent: missing required field "UsageLog.output_cost"`)} + } + if _, ok := _c.mutation.CacheCreationCost(); !ok { + return &ValidationError{Name: "cache_creation_cost", err: errors.New(`ent: missing required field "UsageLog.cache_creation_cost"`)} + } + if _, ok := _c.mutation.CacheReadCost(); !ok { + return &ValidationError{Name: "cache_read_cost", err: errors.New(`ent: missing required field "UsageLog.cache_read_cost"`)} + } + if _, ok := _c.mutation.TotalCost(); !ok { + return &ValidationError{Name: "total_cost", err: errors.New(`ent: missing required field "UsageLog.total_cost"`)} + } + if _, ok := _c.mutation.ActualCost(); !ok { + return &ValidationError{Name: "actual_cost", err: errors.New(`ent: missing required field "UsageLog.actual_cost"`)} + } + if _, ok := _c.mutation.RateMultiplier(); !ok { + return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "UsageLog.rate_multiplier"`)} + } + if _, ok := _c.mutation.BillingType(); !ok { + return &ValidationError{Name: "billing_type", err: errors.New(`ent: missing required field "UsageLog.billing_type"`)} + } + if _, ok := _c.mutation.Stream(); !ok { + return &ValidationError{Name: "stream", err: errors.New(`ent: missing required field "UsageLog.stream"`)} + } + if v, ok := _c.mutation.UserAgent(); ok { + if err := usagelog.UserAgentValidator(v); err != nil { + return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} + } + } + if v, ok := _c.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } + if _, ok := _c.mutation.ImageCount(); !ok { + return &ValidationError{Name: "image_count", err: errors.New(`ent: missing required field "UsageLog.image_count"`)} + } + if v, ok := _c.mutation.ImageSize(); ok { + if err := usagelog.ImageSizeValidator(v); err != nil { + return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UsageLog.created_at"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UsageLog.user"`)} + } + if len(_c.mutation.APIKeyIDs()) == 0 { + return &ValidationError{Name: "api_key", err: errors.New(`ent: missing required edge "UsageLog.api_key"`)} + } + if len(_c.mutation.AccountIDs()) == 0 { + return &ValidationError{Name: "account", err: errors.New(`ent: missing required edge "UsageLog.account"`)} + } + return nil +} + +func (_c *UsageLogCreate) sqlSave(ctx context.Context) (*UsageLog, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) { + var ( + _node = &UsageLog{config: _c.config} + _spec = sqlgraph.NewCreateSpec(usagelog.Table, sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.RequestID(); ok { + _spec.SetField(usagelog.FieldRequestID, field.TypeString, value) + _node.RequestID = value + } + if value, ok := _c.mutation.Model(); ok { + _spec.SetField(usagelog.FieldModel, field.TypeString, value) + _node.Model = value + } + if value, ok := _c.mutation.InputTokens(); ok { + _spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value) + _node.InputTokens = value + } + if value, ok := _c.mutation.OutputTokens(); ok { + _spec.SetField(usagelog.FieldOutputTokens, field.TypeInt, value) + _node.OutputTokens = value + } + if value, ok := _c.mutation.CacheCreationTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreationTokens, field.TypeInt, value) + _node.CacheCreationTokens = value + } + if value, ok := _c.mutation.CacheReadTokens(); ok { + _spec.SetField(usagelog.FieldCacheReadTokens, field.TypeInt, value) + _node.CacheReadTokens = value + } + if value, ok := _c.mutation.CacheCreation5mTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation5mTokens, field.TypeInt, value) + _node.CacheCreation5mTokens = value + } + if value, ok := _c.mutation.CacheCreation1hTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation1hTokens, field.TypeInt, value) + _node.CacheCreation1hTokens = value + } + if value, ok := _c.mutation.InputCost(); ok { + _spec.SetField(usagelog.FieldInputCost, field.TypeFloat64, value) + _node.InputCost = value + } + if value, ok := _c.mutation.OutputCost(); ok { + _spec.SetField(usagelog.FieldOutputCost, field.TypeFloat64, value) + _node.OutputCost = value + } + if value, ok := _c.mutation.CacheCreationCost(); ok { + _spec.SetField(usagelog.FieldCacheCreationCost, field.TypeFloat64, value) + _node.CacheCreationCost = value + } + if value, ok := _c.mutation.CacheReadCost(); ok { + _spec.SetField(usagelog.FieldCacheReadCost, field.TypeFloat64, value) + _node.CacheReadCost = value + } + if value, ok := _c.mutation.TotalCost(); ok { + _spec.SetField(usagelog.FieldTotalCost, field.TypeFloat64, value) + _node.TotalCost = value + } + if value, ok := _c.mutation.ActualCost(); ok { + _spec.SetField(usagelog.FieldActualCost, field.TypeFloat64, value) + _node.ActualCost = value + } + if value, ok := _c.mutation.RateMultiplier(); ok { + _spec.SetField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) + _node.RateMultiplier = value + } + if value, ok := _c.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + _node.AccountRateMultiplier = &value + } + if value, ok := _c.mutation.BillingType(); ok { + _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) + _node.BillingType = value + } + if value, ok := _c.mutation.Stream(); ok { + _spec.SetField(usagelog.FieldStream, field.TypeBool, value) + _node.Stream = value + } + if value, ok := _c.mutation.DurationMs(); ok { + _spec.SetField(usagelog.FieldDurationMs, field.TypeInt, value) + _node.DurationMs = &value + } + if value, ok := _c.mutation.FirstTokenMs(); ok { + _spec.SetField(usagelog.FieldFirstTokenMs, field.TypeInt, value) + _node.FirstTokenMs = &value + } + if value, ok := _c.mutation.UserAgent(); ok { + _spec.SetField(usagelog.FieldUserAgent, field.TypeString, value) + _node.UserAgent = &value + } + if value, ok := _c.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + _node.IPAddress = &value + } + if value, ok := _c.mutation.ImageCount(); ok { + _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) + _node.ImageCount = value + } + if value, ok := _c.mutation.ImageSize(); ok { + _spec.SetField(usagelog.FieldImageSize, field.TypeString, value) + _node.ImageSize = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(usagelog.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.UserTable, + Columns: []string{usagelog.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.APIKeyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.APIKeyTable, + Columns: []string{usagelog.APIKeyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.APIKeyID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.AccountTable, + Columns: []string{usagelog.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AccountID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.GroupTable, + Columns: []string{usagelog.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.SubscriptionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.SubscriptionTable, + Columns: []string{usagelog.SubscriptionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.SubscriptionID = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageLog.Create(). +// SetUserID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageLogUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UsageLogCreate) OnConflict(opts ...sql.ConflictOption) *UsageLogUpsertOne { + _c.conflict = opts + return &UsageLogUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageLogCreate) OnConflictColumns(columns ...string) *UsageLogUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageLogUpsertOne{ + create: _c, + } +} + +type ( + // UsageLogUpsertOne is the builder for "upsert"-ing + // one UsageLog node. + UsageLogUpsertOne struct { + create *UsageLogCreate + } + + // UsageLogUpsert is the "OnConflict" setter. + UsageLogUpsert struct { + *sql.UpdateSet + } +) + +// SetUserID sets the "user_id" field. +func (u *UsageLogUpsert) SetUserID(v int64) *UsageLogUpsert { + u.Set(usagelog.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateUserID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldUserID) + return u +} + +// SetAPIKeyID sets the "api_key_id" field. +func (u *UsageLogUpsert) SetAPIKeyID(v int64) *UsageLogUpsert { + u.Set(usagelog.FieldAPIKeyID, v) + return u +} + +// UpdateAPIKeyID sets the "api_key_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateAPIKeyID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldAPIKeyID) + return u +} + +// SetAccountID sets the "account_id" field. +func (u *UsageLogUpsert) SetAccountID(v int64) *UsageLogUpsert { + u.Set(usagelog.FieldAccountID, v) + return u +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateAccountID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldAccountID) + return u +} + +// SetRequestID sets the "request_id" field. +func (u *UsageLogUpsert) SetRequestID(v string) *UsageLogUpsert { + u.Set(usagelog.FieldRequestID, v) + return u +} + +// UpdateRequestID sets the "request_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateRequestID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldRequestID) + return u +} + +// SetModel sets the "model" field. +func (u *UsageLogUpsert) SetModel(v string) *UsageLogUpsert { + u.Set(usagelog.FieldModel, v) + return u +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateModel() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldModel) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *UsageLogUpsert) SetGroupID(v int64) *UsageLogUpsert { + u.Set(usagelog.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateGroupID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldGroupID) + return u +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *UsageLogUpsert) ClearGroupID() *UsageLogUpsert { + u.SetNull(usagelog.FieldGroupID) + return u +} + +// SetSubscriptionID sets the "subscription_id" field. +func (u *UsageLogUpsert) SetSubscriptionID(v int64) *UsageLogUpsert { + u.Set(usagelog.FieldSubscriptionID, v) + return u +} + +// UpdateSubscriptionID sets the "subscription_id" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateSubscriptionID() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldSubscriptionID) + return u +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (u *UsageLogUpsert) ClearSubscriptionID() *UsageLogUpsert { + u.SetNull(usagelog.FieldSubscriptionID) + return u +} + +// SetInputTokens sets the "input_tokens" field. +func (u *UsageLogUpsert) SetInputTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldInputTokens, v) + return u +} + +// UpdateInputTokens sets the "input_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateInputTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldInputTokens) + return u +} + +// AddInputTokens adds v to the "input_tokens" field. +func (u *UsageLogUpsert) AddInputTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldInputTokens, v) + return u +} + +// SetOutputTokens sets the "output_tokens" field. +func (u *UsageLogUpsert) SetOutputTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldOutputTokens, v) + return u +} + +// UpdateOutputTokens sets the "output_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateOutputTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldOutputTokens) + return u +} + +// AddOutputTokens adds v to the "output_tokens" field. +func (u *UsageLogUpsert) AddOutputTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldOutputTokens, v) + return u +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (u *UsageLogUpsert) SetCacheCreationTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldCacheCreationTokens, v) + return u +} + +// UpdateCacheCreationTokens sets the "cache_creation_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheCreationTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheCreationTokens) + return u +} + +// AddCacheCreationTokens adds v to the "cache_creation_tokens" field. +func (u *UsageLogUpsert) AddCacheCreationTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldCacheCreationTokens, v) + return u +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (u *UsageLogUpsert) SetCacheReadTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldCacheReadTokens, v) + return u +} + +// UpdateCacheReadTokens sets the "cache_read_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheReadTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheReadTokens) + return u +} + +// AddCacheReadTokens adds v to the "cache_read_tokens" field. +func (u *UsageLogUpsert) AddCacheReadTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldCacheReadTokens, v) + return u +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsert) SetCacheCreation5mTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldCacheCreation5mTokens, v) + return u +} + +// UpdateCacheCreation5mTokens sets the "cache_creation_5m_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheCreation5mTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheCreation5mTokens) + return u +} + +// AddCacheCreation5mTokens adds v to the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsert) AddCacheCreation5mTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldCacheCreation5mTokens, v) + return u +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsert) SetCacheCreation1hTokens(v int) *UsageLogUpsert { + u.Set(usagelog.FieldCacheCreation1hTokens, v) + return u +} + +// UpdateCacheCreation1hTokens sets the "cache_creation_1h_tokens" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheCreation1hTokens() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheCreation1hTokens) + return u +} + +// AddCacheCreation1hTokens adds v to the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsert) AddCacheCreation1hTokens(v int) *UsageLogUpsert { + u.Add(usagelog.FieldCacheCreation1hTokens, v) + return u +} + +// SetInputCost sets the "input_cost" field. +func (u *UsageLogUpsert) SetInputCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldInputCost, v) + return u +} + +// UpdateInputCost sets the "input_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateInputCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldInputCost) + return u +} + +// AddInputCost adds v to the "input_cost" field. +func (u *UsageLogUpsert) AddInputCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldInputCost, v) + return u +} + +// SetOutputCost sets the "output_cost" field. +func (u *UsageLogUpsert) SetOutputCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldOutputCost, v) + return u +} + +// UpdateOutputCost sets the "output_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateOutputCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldOutputCost) + return u +} + +// AddOutputCost adds v to the "output_cost" field. +func (u *UsageLogUpsert) AddOutputCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldOutputCost, v) + return u +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (u *UsageLogUpsert) SetCacheCreationCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldCacheCreationCost, v) + return u +} + +// UpdateCacheCreationCost sets the "cache_creation_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheCreationCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheCreationCost) + return u +} + +// AddCacheCreationCost adds v to the "cache_creation_cost" field. +func (u *UsageLogUpsert) AddCacheCreationCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldCacheCreationCost, v) + return u +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (u *UsageLogUpsert) SetCacheReadCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldCacheReadCost, v) + return u +} + +// UpdateCacheReadCost sets the "cache_read_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateCacheReadCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldCacheReadCost) + return u +} + +// AddCacheReadCost adds v to the "cache_read_cost" field. +func (u *UsageLogUpsert) AddCacheReadCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldCacheReadCost, v) + return u +} + +// SetTotalCost sets the "total_cost" field. +func (u *UsageLogUpsert) SetTotalCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldTotalCost, v) + return u +} + +// UpdateTotalCost sets the "total_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateTotalCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldTotalCost) + return u +} + +// AddTotalCost adds v to the "total_cost" field. +func (u *UsageLogUpsert) AddTotalCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldTotalCost, v) + return u +} + +// SetActualCost sets the "actual_cost" field. +func (u *UsageLogUpsert) SetActualCost(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldActualCost, v) + return u +} + +// UpdateActualCost sets the "actual_cost" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateActualCost() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldActualCost) + return u +} + +// AddActualCost adds v to the "actual_cost" field. +func (u *UsageLogUpsert) AddActualCost(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldActualCost, v) + return u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *UsageLogUpsert) SetRateMultiplier(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldRateMultiplier, v) + return u +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateRateMultiplier() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldRateMultiplier) + return u +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *UsageLogUpsert) AddRateMultiplier(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldRateMultiplier, v) + return u +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsert) SetAccountRateMultiplier(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldAccountRateMultiplier, v) + return u +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateAccountRateMultiplier() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldAccountRateMultiplier) + return u +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsert) AddAccountRateMultiplier(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldAccountRateMultiplier, v) + return u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsert) ClearAccountRateMultiplier() *UsageLogUpsert { + u.SetNull(usagelog.FieldAccountRateMultiplier) + return u +} + +// SetBillingType sets the "billing_type" field. +func (u *UsageLogUpsert) SetBillingType(v int8) *UsageLogUpsert { + u.Set(usagelog.FieldBillingType, v) + return u +} + +// UpdateBillingType sets the "billing_type" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateBillingType() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldBillingType) + return u +} + +// AddBillingType adds v to the "billing_type" field. +func (u *UsageLogUpsert) AddBillingType(v int8) *UsageLogUpsert { + u.Add(usagelog.FieldBillingType, v) + return u +} + +// SetStream sets the "stream" field. +func (u *UsageLogUpsert) SetStream(v bool) *UsageLogUpsert { + u.Set(usagelog.FieldStream, v) + return u +} + +// UpdateStream sets the "stream" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateStream() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldStream) + return u +} + +// SetDurationMs sets the "duration_ms" field. +func (u *UsageLogUpsert) SetDurationMs(v int) *UsageLogUpsert { + u.Set(usagelog.FieldDurationMs, v) + return u +} + +// UpdateDurationMs sets the "duration_ms" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateDurationMs() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldDurationMs) + return u +} + +// AddDurationMs adds v to the "duration_ms" field. +func (u *UsageLogUpsert) AddDurationMs(v int) *UsageLogUpsert { + u.Add(usagelog.FieldDurationMs, v) + return u +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (u *UsageLogUpsert) ClearDurationMs() *UsageLogUpsert { + u.SetNull(usagelog.FieldDurationMs) + return u +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (u *UsageLogUpsert) SetFirstTokenMs(v int) *UsageLogUpsert { + u.Set(usagelog.FieldFirstTokenMs, v) + return u +} + +// UpdateFirstTokenMs sets the "first_token_ms" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateFirstTokenMs() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldFirstTokenMs) + return u +} + +// AddFirstTokenMs adds v to the "first_token_ms" field. +func (u *UsageLogUpsert) AddFirstTokenMs(v int) *UsageLogUpsert { + u.Add(usagelog.FieldFirstTokenMs, v) + return u +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (u *UsageLogUpsert) ClearFirstTokenMs() *UsageLogUpsert { + u.SetNull(usagelog.FieldFirstTokenMs) + return u +} + +// SetUserAgent sets the "user_agent" field. +func (u *UsageLogUpsert) SetUserAgent(v string) *UsageLogUpsert { + u.Set(usagelog.FieldUserAgent, v) + return u +} + +// UpdateUserAgent sets the "user_agent" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateUserAgent() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldUserAgent) + return u +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (u *UsageLogUpsert) ClearUserAgent() *UsageLogUpsert { + u.SetNull(usagelog.FieldUserAgent) + return u +} + +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsert) SetIPAddress(v string) *UsageLogUpsert { + u.Set(usagelog.FieldIPAddress, v) + return u +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateIPAddress() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldIPAddress) + return u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsert) ClearIPAddress() *UsageLogUpsert { + u.SetNull(usagelog.FieldIPAddress) + return u +} + +// SetImageCount sets the "image_count" field. +func (u *UsageLogUpsert) SetImageCount(v int) *UsageLogUpsert { + u.Set(usagelog.FieldImageCount, v) + return u +} + +// UpdateImageCount sets the "image_count" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateImageCount() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldImageCount) + return u +} + +// AddImageCount adds v to the "image_count" field. +func (u *UsageLogUpsert) AddImageCount(v int) *UsageLogUpsert { + u.Add(usagelog.FieldImageCount, v) + return u +} + +// SetImageSize sets the "image_size" field. +func (u *UsageLogUpsert) SetImageSize(v string) *UsageLogUpsert { + u.Set(usagelog.FieldImageSize, v) + return u +} + +// UpdateImageSize sets the "image_size" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateImageSize() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldImageSize) + return u +} + +// ClearImageSize clears the value of the "image_size" field. +func (u *UsageLogUpsert) ClearImageSize() *UsageLogUpsert { + u.SetNull(usagelog.FieldImageSize) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageLogUpsertOne) UpdateNewValues() *UsageLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(usagelog.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageLogUpsertOne) Ignore() *UsageLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageLogUpsertOne) DoNothing() *UsageLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageLogCreate.OnConflict +// documentation for more info. +func (u *UsageLogUpsertOne) Update(set func(*UsageLogUpsert)) *UsageLogUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageLogUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UsageLogUpsertOne) SetUserID(v int64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateUserID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateUserID() + }) +} + +// SetAPIKeyID sets the "api_key_id" field. +func (u *UsageLogUpsertOne) SetAPIKeyID(v int64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetAPIKeyID(v) + }) +} + +// UpdateAPIKeyID sets the "api_key_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateAPIKeyID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAPIKeyID() + }) +} + +// SetAccountID sets the "account_id" field. +func (u *UsageLogUpsertOne) SetAccountID(v int64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateAccountID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountID() + }) +} + +// SetRequestID sets the "request_id" field. +func (u *UsageLogUpsertOne) SetRequestID(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetRequestID(v) + }) +} + +// UpdateRequestID sets the "request_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateRequestID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateRequestID() + }) +} + +// SetModel sets the "model" field. +func (u *UsageLogUpsertOne) SetModel(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateModel() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateModel() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UsageLogUpsertOne) SetGroupID(v int64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateGroupID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *UsageLogUpsertOne) ClearGroupID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearGroupID() + }) +} + +// SetSubscriptionID sets the "subscription_id" field. +func (u *UsageLogUpsertOne) SetSubscriptionID(v int64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetSubscriptionID(v) + }) +} + +// UpdateSubscriptionID sets the "subscription_id" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateSubscriptionID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateSubscriptionID() + }) +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (u *UsageLogUpsertOne) ClearSubscriptionID() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearSubscriptionID() + }) +} + +// SetInputTokens sets the "input_tokens" field. +func (u *UsageLogUpsertOne) SetInputTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetInputTokens(v) + }) +} + +// AddInputTokens adds v to the "input_tokens" field. +func (u *UsageLogUpsertOne) AddInputTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddInputTokens(v) + }) +} + +// UpdateInputTokens sets the "input_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateInputTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateInputTokens() + }) +} + +// SetOutputTokens sets the "output_tokens" field. +func (u *UsageLogUpsertOne) SetOutputTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetOutputTokens(v) + }) +} + +// AddOutputTokens adds v to the "output_tokens" field. +func (u *UsageLogUpsertOne) AddOutputTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddOutputTokens(v) + }) +} + +// UpdateOutputTokens sets the "output_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateOutputTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateOutputTokens() + }) +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (u *UsageLogUpsertOne) SetCacheCreationTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreationTokens(v) + }) +} + +// AddCacheCreationTokens adds v to the "cache_creation_tokens" field. +func (u *UsageLogUpsertOne) AddCacheCreationTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreationTokens(v) + }) +} + +// UpdateCacheCreationTokens sets the "cache_creation_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheCreationTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreationTokens() + }) +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (u *UsageLogUpsertOne) SetCacheReadTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheReadTokens(v) + }) +} + +// AddCacheReadTokens adds v to the "cache_read_tokens" field. +func (u *UsageLogUpsertOne) AddCacheReadTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheReadTokens(v) + }) +} + +// UpdateCacheReadTokens sets the "cache_read_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheReadTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheReadTokens() + }) +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsertOne) SetCacheCreation5mTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreation5mTokens(v) + }) +} + +// AddCacheCreation5mTokens adds v to the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsertOne) AddCacheCreation5mTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreation5mTokens(v) + }) +} + +// UpdateCacheCreation5mTokens sets the "cache_creation_5m_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheCreation5mTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreation5mTokens() + }) +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsertOne) SetCacheCreation1hTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreation1hTokens(v) + }) +} + +// AddCacheCreation1hTokens adds v to the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsertOne) AddCacheCreation1hTokens(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreation1hTokens(v) + }) +} + +// UpdateCacheCreation1hTokens sets the "cache_creation_1h_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheCreation1hTokens() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreation1hTokens() + }) +} + +// SetInputCost sets the "input_cost" field. +func (u *UsageLogUpsertOne) SetInputCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetInputCost(v) + }) +} + +// AddInputCost adds v to the "input_cost" field. +func (u *UsageLogUpsertOne) AddInputCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddInputCost(v) + }) +} + +// UpdateInputCost sets the "input_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateInputCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateInputCost() + }) +} + +// SetOutputCost sets the "output_cost" field. +func (u *UsageLogUpsertOne) SetOutputCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetOutputCost(v) + }) +} + +// AddOutputCost adds v to the "output_cost" field. +func (u *UsageLogUpsertOne) AddOutputCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddOutputCost(v) + }) +} + +// UpdateOutputCost sets the "output_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateOutputCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateOutputCost() + }) +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (u *UsageLogUpsertOne) SetCacheCreationCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreationCost(v) + }) +} + +// AddCacheCreationCost adds v to the "cache_creation_cost" field. +func (u *UsageLogUpsertOne) AddCacheCreationCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreationCost(v) + }) +} + +// UpdateCacheCreationCost sets the "cache_creation_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheCreationCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreationCost() + }) +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (u *UsageLogUpsertOne) SetCacheReadCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheReadCost(v) + }) +} + +// AddCacheReadCost adds v to the "cache_read_cost" field. +func (u *UsageLogUpsertOne) AddCacheReadCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheReadCost(v) + }) +} + +// UpdateCacheReadCost sets the "cache_read_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateCacheReadCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheReadCost() + }) +} + +// SetTotalCost sets the "total_cost" field. +func (u *UsageLogUpsertOne) SetTotalCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetTotalCost(v) + }) +} + +// AddTotalCost adds v to the "total_cost" field. +func (u *UsageLogUpsertOne) AddTotalCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddTotalCost(v) + }) +} + +// UpdateTotalCost sets the "total_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateTotalCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateTotalCost() + }) +} + +// SetActualCost sets the "actual_cost" field. +func (u *UsageLogUpsertOne) SetActualCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetActualCost(v) + }) +} + +// AddActualCost adds v to the "actual_cost" field. +func (u *UsageLogUpsertOne) AddActualCost(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddActualCost(v) + }) +} + +// UpdateActualCost sets the "actual_cost" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateActualCost() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateActualCost() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *UsageLogUpsertOne) SetRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *UsageLogUpsertOne) AddRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateRateMultiplier() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) SetAccountRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountRateMultiplier(v) + }) +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) AddAccountRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddAccountRateMultiplier(v) + }) +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateAccountRateMultiplier() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountRateMultiplier() + }) +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) ClearAccountRateMultiplier() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearAccountRateMultiplier() + }) +} + +// SetBillingType sets the "billing_type" field. +func (u *UsageLogUpsertOne) SetBillingType(v int8) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetBillingType(v) + }) +} + +// AddBillingType adds v to the "billing_type" field. +func (u *UsageLogUpsertOne) AddBillingType(v int8) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddBillingType(v) + }) +} + +// UpdateBillingType sets the "billing_type" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateBillingType() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateBillingType() + }) +} + +// SetStream sets the "stream" field. +func (u *UsageLogUpsertOne) SetStream(v bool) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetStream(v) + }) +} + +// UpdateStream sets the "stream" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateStream() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateStream() + }) +} + +// SetDurationMs sets the "duration_ms" field. +func (u *UsageLogUpsertOne) SetDurationMs(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetDurationMs(v) + }) +} + +// AddDurationMs adds v to the "duration_ms" field. +func (u *UsageLogUpsertOne) AddDurationMs(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddDurationMs(v) + }) +} + +// UpdateDurationMs sets the "duration_ms" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateDurationMs() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateDurationMs() + }) +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (u *UsageLogUpsertOne) ClearDurationMs() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearDurationMs() + }) +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (u *UsageLogUpsertOne) SetFirstTokenMs(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetFirstTokenMs(v) + }) +} + +// AddFirstTokenMs adds v to the "first_token_ms" field. +func (u *UsageLogUpsertOne) AddFirstTokenMs(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddFirstTokenMs(v) + }) +} + +// UpdateFirstTokenMs sets the "first_token_ms" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateFirstTokenMs() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateFirstTokenMs() + }) +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (u *UsageLogUpsertOne) ClearFirstTokenMs() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearFirstTokenMs() + }) +} + +// SetUserAgent sets the "user_agent" field. +func (u *UsageLogUpsertOne) SetUserAgent(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetUserAgent(v) + }) +} + +// UpdateUserAgent sets the "user_agent" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateUserAgent() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateUserAgent() + }) +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (u *UsageLogUpsertOne) ClearUserAgent() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearUserAgent() + }) +} + +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsertOne) SetIPAddress(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetIPAddress(v) + }) +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateIPAddress() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateIPAddress() + }) +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsertOne) ClearIPAddress() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearIPAddress() + }) +} + +// SetImageCount sets the "image_count" field. +func (u *UsageLogUpsertOne) SetImageCount(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetImageCount(v) + }) +} + +// AddImageCount adds v to the "image_count" field. +func (u *UsageLogUpsertOne) AddImageCount(v int) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddImageCount(v) + }) +} + +// UpdateImageCount sets the "image_count" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateImageCount() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateImageCount() + }) +} + +// SetImageSize sets the "image_size" field. +func (u *UsageLogUpsertOne) SetImageSize(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetImageSize(v) + }) +} + +// UpdateImageSize sets the "image_size" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateImageSize() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateImageSize() + }) +} + +// ClearImageSize clears the value of the "image_size" field. +func (u *UsageLogUpsertOne) ClearImageSize() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearImageSize() + }) +} + +// Exec executes the query. +func (u *UsageLogUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageLogCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageLogUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UsageLogUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UsageLogUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UsageLogCreateBulk is the builder for creating many UsageLog entities in bulk. +type UsageLogCreateBulk struct { + config + err error + builders []*UsageLogCreate + conflict []sql.ConflictOption +} + +// Save creates the UsageLog entities in the database. +func (_c *UsageLogCreateBulk) Save(ctx context.Context) ([]*UsageLog, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UsageLog, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UsageLogMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UsageLogCreateBulk) SaveX(ctx context.Context) []*UsageLog { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UsageLogCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UsageLogCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UsageLog.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UsageLogUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UsageLogCreateBulk) OnConflict(opts ...sql.ConflictOption) *UsageLogUpsertBulk { + _c.conflict = opts + return &UsageLogUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UsageLogCreateBulk) OnConflictColumns(columns ...string) *UsageLogUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UsageLogUpsertBulk{ + create: _c, + } +} + +// UsageLogUpsertBulk is the builder for "upsert"-ing +// a bulk of UsageLog nodes. +type UsageLogUpsertBulk struct { + create *UsageLogCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UsageLogUpsertBulk) UpdateNewValues() *UsageLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(usagelog.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UsageLog.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UsageLogUpsertBulk) Ignore() *UsageLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UsageLogUpsertBulk) DoNothing() *UsageLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UsageLogCreateBulk.OnConflict +// documentation for more info. +func (u *UsageLogUpsertBulk) Update(set func(*UsageLogUpsert)) *UsageLogUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UsageLogUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UsageLogUpsertBulk) SetUserID(v int64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateUserID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateUserID() + }) +} + +// SetAPIKeyID sets the "api_key_id" field. +func (u *UsageLogUpsertBulk) SetAPIKeyID(v int64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetAPIKeyID(v) + }) +} + +// UpdateAPIKeyID sets the "api_key_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateAPIKeyID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAPIKeyID() + }) +} + +// SetAccountID sets the "account_id" field. +func (u *UsageLogUpsertBulk) SetAccountID(v int64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountID(v) + }) +} + +// UpdateAccountID sets the "account_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateAccountID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountID() + }) +} + +// SetRequestID sets the "request_id" field. +func (u *UsageLogUpsertBulk) SetRequestID(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetRequestID(v) + }) +} + +// UpdateRequestID sets the "request_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateRequestID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateRequestID() + }) +} + +// SetModel sets the "model" field. +func (u *UsageLogUpsertBulk) SetModel(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetModel(v) + }) +} + +// UpdateModel sets the "model" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateModel() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateModel() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UsageLogUpsertBulk) SetGroupID(v int64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateGroupID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateGroupID() + }) +} + +// ClearGroupID clears the value of the "group_id" field. +func (u *UsageLogUpsertBulk) ClearGroupID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearGroupID() + }) +} + +// SetSubscriptionID sets the "subscription_id" field. +func (u *UsageLogUpsertBulk) SetSubscriptionID(v int64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetSubscriptionID(v) + }) +} + +// UpdateSubscriptionID sets the "subscription_id" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateSubscriptionID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateSubscriptionID() + }) +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (u *UsageLogUpsertBulk) ClearSubscriptionID() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearSubscriptionID() + }) +} + +// SetInputTokens sets the "input_tokens" field. +func (u *UsageLogUpsertBulk) SetInputTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetInputTokens(v) + }) +} + +// AddInputTokens adds v to the "input_tokens" field. +func (u *UsageLogUpsertBulk) AddInputTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddInputTokens(v) + }) +} + +// UpdateInputTokens sets the "input_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateInputTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateInputTokens() + }) +} + +// SetOutputTokens sets the "output_tokens" field. +func (u *UsageLogUpsertBulk) SetOutputTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetOutputTokens(v) + }) +} + +// AddOutputTokens adds v to the "output_tokens" field. +func (u *UsageLogUpsertBulk) AddOutputTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddOutputTokens(v) + }) +} + +// UpdateOutputTokens sets the "output_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateOutputTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateOutputTokens() + }) +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (u *UsageLogUpsertBulk) SetCacheCreationTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreationTokens(v) + }) +} + +// AddCacheCreationTokens adds v to the "cache_creation_tokens" field. +func (u *UsageLogUpsertBulk) AddCacheCreationTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreationTokens(v) + }) +} + +// UpdateCacheCreationTokens sets the "cache_creation_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheCreationTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreationTokens() + }) +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (u *UsageLogUpsertBulk) SetCacheReadTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheReadTokens(v) + }) +} + +// AddCacheReadTokens adds v to the "cache_read_tokens" field. +func (u *UsageLogUpsertBulk) AddCacheReadTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheReadTokens(v) + }) +} + +// UpdateCacheReadTokens sets the "cache_read_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheReadTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheReadTokens() + }) +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsertBulk) SetCacheCreation5mTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreation5mTokens(v) + }) +} + +// AddCacheCreation5mTokens adds v to the "cache_creation_5m_tokens" field. +func (u *UsageLogUpsertBulk) AddCacheCreation5mTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreation5mTokens(v) + }) +} + +// UpdateCacheCreation5mTokens sets the "cache_creation_5m_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheCreation5mTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreation5mTokens() + }) +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsertBulk) SetCacheCreation1hTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreation1hTokens(v) + }) +} + +// AddCacheCreation1hTokens adds v to the "cache_creation_1h_tokens" field. +func (u *UsageLogUpsertBulk) AddCacheCreation1hTokens(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreation1hTokens(v) + }) +} + +// UpdateCacheCreation1hTokens sets the "cache_creation_1h_tokens" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheCreation1hTokens() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreation1hTokens() + }) +} + +// SetInputCost sets the "input_cost" field. +func (u *UsageLogUpsertBulk) SetInputCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetInputCost(v) + }) +} + +// AddInputCost adds v to the "input_cost" field. +func (u *UsageLogUpsertBulk) AddInputCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddInputCost(v) + }) +} + +// UpdateInputCost sets the "input_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateInputCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateInputCost() + }) +} + +// SetOutputCost sets the "output_cost" field. +func (u *UsageLogUpsertBulk) SetOutputCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetOutputCost(v) + }) +} + +// AddOutputCost adds v to the "output_cost" field. +func (u *UsageLogUpsertBulk) AddOutputCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddOutputCost(v) + }) +} + +// UpdateOutputCost sets the "output_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateOutputCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateOutputCost() + }) +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (u *UsageLogUpsertBulk) SetCacheCreationCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheCreationCost(v) + }) +} + +// AddCacheCreationCost adds v to the "cache_creation_cost" field. +func (u *UsageLogUpsertBulk) AddCacheCreationCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheCreationCost(v) + }) +} + +// UpdateCacheCreationCost sets the "cache_creation_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheCreationCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheCreationCost() + }) +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (u *UsageLogUpsertBulk) SetCacheReadCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetCacheReadCost(v) + }) +} + +// AddCacheReadCost adds v to the "cache_read_cost" field. +func (u *UsageLogUpsertBulk) AddCacheReadCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddCacheReadCost(v) + }) +} + +// UpdateCacheReadCost sets the "cache_read_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateCacheReadCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateCacheReadCost() + }) +} + +// SetTotalCost sets the "total_cost" field. +func (u *UsageLogUpsertBulk) SetTotalCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetTotalCost(v) + }) +} + +// AddTotalCost adds v to the "total_cost" field. +func (u *UsageLogUpsertBulk) AddTotalCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddTotalCost(v) + }) +} + +// UpdateTotalCost sets the "total_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateTotalCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateTotalCost() + }) +} + +// SetActualCost sets the "actual_cost" field. +func (u *UsageLogUpsertBulk) SetActualCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetActualCost(v) + }) +} + +// AddActualCost adds v to the "actual_cost" field. +func (u *UsageLogUpsertBulk) AddActualCost(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddActualCost(v) + }) +} + +// UpdateActualCost sets the "actual_cost" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateActualCost() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateActualCost() + }) +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *UsageLogUpsertBulk) SetRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *UsageLogUpsertBulk) AddRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateRateMultiplier() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateRateMultiplier() + }) +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) SetAccountRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountRateMultiplier(v) + }) +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) AddAccountRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddAccountRateMultiplier(v) + }) +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateAccountRateMultiplier() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountRateMultiplier() + }) +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) ClearAccountRateMultiplier() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearAccountRateMultiplier() + }) +} + +// SetBillingType sets the "billing_type" field. +func (u *UsageLogUpsertBulk) SetBillingType(v int8) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetBillingType(v) + }) +} + +// AddBillingType adds v to the "billing_type" field. +func (u *UsageLogUpsertBulk) AddBillingType(v int8) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddBillingType(v) + }) +} + +// UpdateBillingType sets the "billing_type" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateBillingType() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateBillingType() + }) +} + +// SetStream sets the "stream" field. +func (u *UsageLogUpsertBulk) SetStream(v bool) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetStream(v) + }) +} + +// UpdateStream sets the "stream" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateStream() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateStream() + }) +} + +// SetDurationMs sets the "duration_ms" field. +func (u *UsageLogUpsertBulk) SetDurationMs(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetDurationMs(v) + }) +} + +// AddDurationMs adds v to the "duration_ms" field. +func (u *UsageLogUpsertBulk) AddDurationMs(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddDurationMs(v) + }) +} + +// UpdateDurationMs sets the "duration_ms" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateDurationMs() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateDurationMs() + }) +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (u *UsageLogUpsertBulk) ClearDurationMs() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearDurationMs() + }) +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (u *UsageLogUpsertBulk) SetFirstTokenMs(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetFirstTokenMs(v) + }) +} + +// AddFirstTokenMs adds v to the "first_token_ms" field. +func (u *UsageLogUpsertBulk) AddFirstTokenMs(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddFirstTokenMs(v) + }) +} + +// UpdateFirstTokenMs sets the "first_token_ms" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateFirstTokenMs() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateFirstTokenMs() + }) +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (u *UsageLogUpsertBulk) ClearFirstTokenMs() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearFirstTokenMs() + }) +} + +// SetUserAgent sets the "user_agent" field. +func (u *UsageLogUpsertBulk) SetUserAgent(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetUserAgent(v) + }) +} + +// UpdateUserAgent sets the "user_agent" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateUserAgent() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateUserAgent() + }) +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (u *UsageLogUpsertBulk) ClearUserAgent() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearUserAgent() + }) +} + +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsertBulk) SetIPAddress(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetIPAddress(v) + }) +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateIPAddress() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateIPAddress() + }) +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsertBulk) ClearIPAddress() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearIPAddress() + }) +} + +// SetImageCount sets the "image_count" field. +func (u *UsageLogUpsertBulk) SetImageCount(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetImageCount(v) + }) +} + +// AddImageCount adds v to the "image_count" field. +func (u *UsageLogUpsertBulk) AddImageCount(v int) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddImageCount(v) + }) +} + +// UpdateImageCount sets the "image_count" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateImageCount() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateImageCount() + }) +} + +// SetImageSize sets the "image_size" field. +func (u *UsageLogUpsertBulk) SetImageSize(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetImageSize(v) + }) +} + +// UpdateImageSize sets the "image_size" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateImageSize() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateImageSize() + }) +} + +// ClearImageSize clears the value of the "image_size" field. +func (u *UsageLogUpsertBulk) ClearImageSize() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearImageSize() + }) +} + +// Exec executes the query. +func (u *UsageLogUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UsageLogCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UsageLogCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UsageLogUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagelog_delete.go b/backend/ent/usagelog_delete.go new file mode 100644 index 00000000..73450fda --- /dev/null +++ b/backend/ent/usagelog_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" +) + +// UsageLogDelete is the builder for deleting a UsageLog entity. +type UsageLogDelete struct { + config + hooks []Hook + mutation *UsageLogMutation +} + +// Where appends a list predicates to the UsageLogDelete builder. +func (_d *UsageLogDelete) Where(ps ...predicate.UsageLog) *UsageLogDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UsageLogDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageLogDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UsageLogDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(usagelog.Table, sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UsageLogDeleteOne is the builder for deleting a single UsageLog entity. +type UsageLogDeleteOne struct { + _d *UsageLogDelete +} + +// Where appends a list predicates to the UsageLogDelete builder. +func (_d *UsageLogDeleteOne) Where(ps ...predicate.UsageLog) *UsageLogDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UsageLogDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{usagelog.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UsageLogDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usagelog_query.go b/backend/ent/usagelog_query.go new file mode 100644 index 00000000..c709bde0 --- /dev/null +++ b/backend/ent/usagelog_query.go @@ -0,0 +1,949 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UsageLogQuery is the builder for querying UsageLog entities. +type UsageLogQuery struct { + config + ctx *QueryContext + order []usagelog.OrderOption + inters []Interceptor + predicates []predicate.UsageLog + withUser *UserQuery + withAPIKey *APIKeyQuery + withAccount *AccountQuery + withGroup *GroupQuery + withSubscription *UserSubscriptionQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UsageLogQuery builder. +func (_q *UsageLogQuery) Where(ps ...predicate.UsageLog) *UsageLogQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UsageLogQuery) Limit(limit int) *UsageLogQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UsageLogQuery) Offset(offset int) *UsageLogQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UsageLogQuery) Unique(unique bool) *UsageLogQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UsageLogQuery) Order(o ...usagelog.OrderOption) *UsageLogQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UsageLogQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.UserTable, usagelog.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAPIKey chains the current query on the "api_key" edge. +func (_q *UsageLogQuery) QueryAPIKey() *APIKeyQuery { + query := (&APIKeyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, selector), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.APIKeyTable, usagelog.APIKeyColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAccount chains the current query on the "account" edge. +func (_q *UsageLogQuery) QueryAccount() *AccountQuery { + query := (&AccountClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, selector), + sqlgraph.To(account.Table, account.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.AccountTable, usagelog.AccountColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *UsageLogQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.GroupTable, usagelog.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySubscription chains the current query on the "subscription" edge. +func (_q *UsageLogQuery) QuerySubscription() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usagelog.Table, usagelog.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usagelog.SubscriptionTable, usagelog.SubscriptionColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UsageLog entity from the query. +// Returns a *NotFoundError when no UsageLog was found. +func (_q *UsageLogQuery) First(ctx context.Context) (*UsageLog, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{usagelog.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UsageLogQuery) FirstX(ctx context.Context) *UsageLog { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UsageLog ID from the query. +// Returns a *NotFoundError when no UsageLog ID was found. +func (_q *UsageLogQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{usagelog.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UsageLogQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UsageLog entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UsageLog entity is found. +// Returns a *NotFoundError when no UsageLog entities are found. +func (_q *UsageLogQuery) Only(ctx context.Context) (*UsageLog, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{usagelog.Label} + default: + return nil, &NotSingularError{usagelog.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UsageLogQuery) OnlyX(ctx context.Context) *UsageLog { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UsageLog ID in the query. +// Returns a *NotSingularError when more than one UsageLog ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UsageLogQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{usagelog.Label} + default: + err = &NotSingularError{usagelog.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UsageLogQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UsageLogs. +func (_q *UsageLogQuery) All(ctx context.Context) ([]*UsageLog, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UsageLog, *UsageLogQuery]() + return withInterceptors[[]*UsageLog](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UsageLogQuery) AllX(ctx context.Context) []*UsageLog { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UsageLog IDs. +func (_q *UsageLogQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(usagelog.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UsageLogQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UsageLogQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UsageLogQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UsageLogQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UsageLogQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UsageLogQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UsageLogQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UsageLogQuery) Clone() *UsageLogQuery { + if _q == nil { + return nil + } + return &UsageLogQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]usagelog.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UsageLog{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withAPIKey: _q.withAPIKey.Clone(), + withAccount: _q.withAccount.Clone(), + withGroup: _q.withGroup.Clone(), + withSubscription: _q.withSubscription.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UsageLogQuery) WithUser(opts ...func(*UserQuery)) *UsageLogQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithAPIKey tells the query-builder to eager-load the nodes that are connected to +// the "api_key" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UsageLogQuery) WithAPIKey(opts ...func(*APIKeyQuery)) *UsageLogQuery { + query := (&APIKeyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAPIKey = query + return _q +} + +// WithAccount tells the query-builder to eager-load the nodes that are connected to +// the "account" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UsageLogQuery) WithAccount(opts ...func(*AccountQuery)) *UsageLogQuery { + query := (&AccountClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAccount = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UsageLogQuery) WithGroup(opts ...func(*GroupQuery)) *UsageLogQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// WithSubscription tells the query-builder to eager-load the nodes that are connected to +// the "subscription" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UsageLogQuery) WithSubscription(opts ...func(*UserSubscriptionQuery)) *UsageLogQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withSubscription = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UsageLog.Query(). +// GroupBy(usagelog.FieldUserID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UsageLogQuery) GroupBy(field string, fields ...string) *UsageLogGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UsageLogGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = usagelog.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// } +// +// client.UsageLog.Query(). +// Select(usagelog.FieldUserID). +// Scan(ctx, &v) +func (_q *UsageLogQuery) Select(fields ...string) *UsageLogSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UsageLogSelect{UsageLogQuery: _q} + sbuild.label = usagelog.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UsageLogSelect configured with the given aggregations. +func (_q *UsageLogQuery) Aggregate(fns ...AggregateFunc) *UsageLogSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UsageLogQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !usagelog.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UsageLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UsageLog, error) { + var ( + nodes = []*UsageLog{} + _spec = _q.querySpec() + loadedTypes = [5]bool{ + _q.withUser != nil, + _q.withAPIKey != nil, + _q.withAccount != nil, + _q.withGroup != nil, + _q.withSubscription != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UsageLog).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UsageLog{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UsageLog, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withAPIKey; query != nil { + if err := _q.loadAPIKey(ctx, query, nodes, nil, + func(n *UsageLog, e *APIKey) { n.Edges.APIKey = e }); err != nil { + return nil, err + } + } + if query := _q.withAccount; query != nil { + if err := _q.loadAccount(ctx, query, nodes, nil, + func(n *UsageLog, e *Account) { n.Edges.Account = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *UsageLog, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := _q.withSubscription; query != nil { + if err := _q.loadSubscription(ctx, query, nodes, nil, + func(n *UsageLog, e *UserSubscription) { n.Edges.Subscription = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UsageLogQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UsageLog, init func(*UsageLog), assign func(*UsageLog, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UsageLog) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UsageLogQuery) loadAPIKey(ctx context.Context, query *APIKeyQuery, nodes []*UsageLog, init func(*UsageLog), assign func(*UsageLog, *APIKey)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UsageLog) + for i := range nodes { + fk := nodes[i].APIKeyID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(apikey.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "api_key_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UsageLogQuery) loadAccount(ctx context.Context, query *AccountQuery, nodes []*UsageLog, init func(*UsageLog), assign func(*UsageLog, *Account)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UsageLog) + for i := range nodes { + fk := nodes[i].AccountID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(account.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "account_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UsageLogQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*UsageLog, init func(*UsageLog), assign func(*UsageLog, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UsageLog) + for i := range nodes { + if nodes[i].GroupID == nil { + continue + } + fk := *nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UsageLogQuery) loadSubscription(ctx context.Context, query *UserSubscriptionQuery, nodes []*UsageLog, init func(*UsageLog), assign func(*UsageLog, *UserSubscription)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UsageLog) + for i := range nodes { + if nodes[i].SubscriptionID == nil { + continue + } + fk := *nodes[i].SubscriptionID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(usersubscription.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "subscription_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UsageLogQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UsageLogQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(usagelog.Table, usagelog.Columns, sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagelog.FieldID) + for i := range fields { + if fields[i] != usagelog.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(usagelog.FieldUserID) + } + if _q.withAPIKey != nil { + _spec.Node.AddColumnOnce(usagelog.FieldAPIKeyID) + } + if _q.withAccount != nil { + _spec.Node.AddColumnOnce(usagelog.FieldAccountID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(usagelog.FieldGroupID) + } + if _q.withSubscription != nil { + _spec.Node.AddColumnOnce(usagelog.FieldSubscriptionID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UsageLogQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(usagelog.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = usagelog.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UsageLogQuery) ForUpdate(opts ...sql.LockOption) *UsageLogQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UsageLogQuery) ForShare(opts ...sql.LockOption) *UsageLogQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UsageLogGroupBy is the group-by builder for UsageLog entities. +type UsageLogGroupBy struct { + selector + build *UsageLogQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UsageLogGroupBy) Aggregate(fns ...AggregateFunc) *UsageLogGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UsageLogGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageLogQuery, *UsageLogGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UsageLogGroupBy) sqlScan(ctx context.Context, root *UsageLogQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UsageLogSelect is the builder for selecting fields of UsageLog entities. +type UsageLogSelect struct { + *UsageLogQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UsageLogSelect) Aggregate(fns ...AggregateFunc) *UsageLogSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UsageLogSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UsageLogQuery, *UsageLogSelect](ctx, _s.UsageLogQuery, _s, _s.inters, v) +} + +func (_s *UsageLogSelect) sqlScan(ctx context.Context, root *UsageLogQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/usagelog_update.go b/backend/ent/usagelog_update.go new file mode 100644 index 00000000..571a7b3c --- /dev/null +++ b/backend/ent/usagelog_update.go @@ -0,0 +1,2112 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/account" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UsageLogUpdate is the builder for updating UsageLog entities. +type UsageLogUpdate struct { + config + hooks []Hook + mutation *UsageLogMutation +} + +// Where appends a list predicates to the UsageLogUpdate builder. +func (_u *UsageLogUpdate) Where(ps ...predicate.UsageLog) *UsageLogUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UsageLogUpdate) SetUserID(v int64) *UsageLogUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableUserID(v *int64) *UsageLogUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAPIKeyID sets the "api_key_id" field. +func (_u *UsageLogUpdate) SetAPIKeyID(v int64) *UsageLogUpdate { + _u.mutation.SetAPIKeyID(v) + return _u +} + +// SetNillableAPIKeyID sets the "api_key_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableAPIKeyID(v *int64) *UsageLogUpdate { + if v != nil { + _u.SetAPIKeyID(*v) + } + return _u +} + +// SetAccountID sets the "account_id" field. +func (_u *UsageLogUpdate) SetAccountID(v int64) *UsageLogUpdate { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableAccountID(v *int64) *UsageLogUpdate { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetRequestID sets the "request_id" field. +func (_u *UsageLogUpdate) SetRequestID(v string) *UsageLogUpdate { + _u.mutation.SetRequestID(v) + return _u +} + +// SetNillableRequestID sets the "request_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableRequestID(v *string) *UsageLogUpdate { + if v != nil { + _u.SetRequestID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *UsageLogUpdate) SetModel(v string) *UsageLogUpdate { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableModel(v *string) *UsageLogUpdate { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UsageLogUpdate) SetGroupID(v int64) *UsageLogUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableGroupID(v *int64) *UsageLogUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *UsageLogUpdate) ClearGroupID() *UsageLogUpdate { + _u.mutation.ClearGroupID() + return _u +} + +// SetSubscriptionID sets the "subscription_id" field. +func (_u *UsageLogUpdate) SetSubscriptionID(v int64) *UsageLogUpdate { + _u.mutation.SetSubscriptionID(v) + return _u +} + +// SetNillableSubscriptionID sets the "subscription_id" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableSubscriptionID(v *int64) *UsageLogUpdate { + if v != nil { + _u.SetSubscriptionID(*v) + } + return _u +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (_u *UsageLogUpdate) ClearSubscriptionID() *UsageLogUpdate { + _u.mutation.ClearSubscriptionID() + return _u +} + +// SetInputTokens sets the "input_tokens" field. +func (_u *UsageLogUpdate) SetInputTokens(v int) *UsageLogUpdate { + _u.mutation.ResetInputTokens() + _u.mutation.SetInputTokens(v) + return _u +} + +// SetNillableInputTokens sets the "input_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableInputTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetInputTokens(*v) + } + return _u +} + +// AddInputTokens adds value to the "input_tokens" field. +func (_u *UsageLogUpdate) AddInputTokens(v int) *UsageLogUpdate { + _u.mutation.AddInputTokens(v) + return _u +} + +// SetOutputTokens sets the "output_tokens" field. +func (_u *UsageLogUpdate) SetOutputTokens(v int) *UsageLogUpdate { + _u.mutation.ResetOutputTokens() + _u.mutation.SetOutputTokens(v) + return _u +} + +// SetNillableOutputTokens sets the "output_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableOutputTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetOutputTokens(*v) + } + return _u +} + +// AddOutputTokens adds value to the "output_tokens" field. +func (_u *UsageLogUpdate) AddOutputTokens(v int) *UsageLogUpdate { + _u.mutation.AddOutputTokens(v) + return _u +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (_u *UsageLogUpdate) SetCacheCreationTokens(v int) *UsageLogUpdate { + _u.mutation.ResetCacheCreationTokens() + _u.mutation.SetCacheCreationTokens(v) + return _u +} + +// SetNillableCacheCreationTokens sets the "cache_creation_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheCreationTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetCacheCreationTokens(*v) + } + return _u +} + +// AddCacheCreationTokens adds value to the "cache_creation_tokens" field. +func (_u *UsageLogUpdate) AddCacheCreationTokens(v int) *UsageLogUpdate { + _u.mutation.AddCacheCreationTokens(v) + return _u +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (_u *UsageLogUpdate) SetCacheReadTokens(v int) *UsageLogUpdate { + _u.mutation.ResetCacheReadTokens() + _u.mutation.SetCacheReadTokens(v) + return _u +} + +// SetNillableCacheReadTokens sets the "cache_read_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheReadTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetCacheReadTokens(*v) + } + return _u +} + +// AddCacheReadTokens adds value to the "cache_read_tokens" field. +func (_u *UsageLogUpdate) AddCacheReadTokens(v int) *UsageLogUpdate { + _u.mutation.AddCacheReadTokens(v) + return _u +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (_u *UsageLogUpdate) SetCacheCreation5mTokens(v int) *UsageLogUpdate { + _u.mutation.ResetCacheCreation5mTokens() + _u.mutation.SetCacheCreation5mTokens(v) + return _u +} + +// SetNillableCacheCreation5mTokens sets the "cache_creation_5m_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheCreation5mTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetCacheCreation5mTokens(*v) + } + return _u +} + +// AddCacheCreation5mTokens adds value to the "cache_creation_5m_tokens" field. +func (_u *UsageLogUpdate) AddCacheCreation5mTokens(v int) *UsageLogUpdate { + _u.mutation.AddCacheCreation5mTokens(v) + return _u +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (_u *UsageLogUpdate) SetCacheCreation1hTokens(v int) *UsageLogUpdate { + _u.mutation.ResetCacheCreation1hTokens() + _u.mutation.SetCacheCreation1hTokens(v) + return _u +} + +// SetNillableCacheCreation1hTokens sets the "cache_creation_1h_tokens" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheCreation1hTokens(v *int) *UsageLogUpdate { + if v != nil { + _u.SetCacheCreation1hTokens(*v) + } + return _u +} + +// AddCacheCreation1hTokens adds value to the "cache_creation_1h_tokens" field. +func (_u *UsageLogUpdate) AddCacheCreation1hTokens(v int) *UsageLogUpdate { + _u.mutation.AddCacheCreation1hTokens(v) + return _u +} + +// SetInputCost sets the "input_cost" field. +func (_u *UsageLogUpdate) SetInputCost(v float64) *UsageLogUpdate { + _u.mutation.ResetInputCost() + _u.mutation.SetInputCost(v) + return _u +} + +// SetNillableInputCost sets the "input_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableInputCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetInputCost(*v) + } + return _u +} + +// AddInputCost adds value to the "input_cost" field. +func (_u *UsageLogUpdate) AddInputCost(v float64) *UsageLogUpdate { + _u.mutation.AddInputCost(v) + return _u +} + +// SetOutputCost sets the "output_cost" field. +func (_u *UsageLogUpdate) SetOutputCost(v float64) *UsageLogUpdate { + _u.mutation.ResetOutputCost() + _u.mutation.SetOutputCost(v) + return _u +} + +// SetNillableOutputCost sets the "output_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableOutputCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetOutputCost(*v) + } + return _u +} + +// AddOutputCost adds value to the "output_cost" field. +func (_u *UsageLogUpdate) AddOutputCost(v float64) *UsageLogUpdate { + _u.mutation.AddOutputCost(v) + return _u +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (_u *UsageLogUpdate) SetCacheCreationCost(v float64) *UsageLogUpdate { + _u.mutation.ResetCacheCreationCost() + _u.mutation.SetCacheCreationCost(v) + return _u +} + +// SetNillableCacheCreationCost sets the "cache_creation_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheCreationCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetCacheCreationCost(*v) + } + return _u +} + +// AddCacheCreationCost adds value to the "cache_creation_cost" field. +func (_u *UsageLogUpdate) AddCacheCreationCost(v float64) *UsageLogUpdate { + _u.mutation.AddCacheCreationCost(v) + return _u +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (_u *UsageLogUpdate) SetCacheReadCost(v float64) *UsageLogUpdate { + _u.mutation.ResetCacheReadCost() + _u.mutation.SetCacheReadCost(v) + return _u +} + +// SetNillableCacheReadCost sets the "cache_read_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableCacheReadCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetCacheReadCost(*v) + } + return _u +} + +// AddCacheReadCost adds value to the "cache_read_cost" field. +func (_u *UsageLogUpdate) AddCacheReadCost(v float64) *UsageLogUpdate { + _u.mutation.AddCacheReadCost(v) + return _u +} + +// SetTotalCost sets the "total_cost" field. +func (_u *UsageLogUpdate) SetTotalCost(v float64) *UsageLogUpdate { + _u.mutation.ResetTotalCost() + _u.mutation.SetTotalCost(v) + return _u +} + +// SetNillableTotalCost sets the "total_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableTotalCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetTotalCost(*v) + } + return _u +} + +// AddTotalCost adds value to the "total_cost" field. +func (_u *UsageLogUpdate) AddTotalCost(v float64) *UsageLogUpdate { + _u.mutation.AddTotalCost(v) + return _u +} + +// SetActualCost sets the "actual_cost" field. +func (_u *UsageLogUpdate) SetActualCost(v float64) *UsageLogUpdate { + _u.mutation.ResetActualCost() + _u.mutation.SetActualCost(v) + return _u +} + +// SetNillableActualCost sets the "actual_cost" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableActualCost(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetActualCost(*v) + } + return _u +} + +// AddActualCost adds value to the "actual_cost" field. +func (_u *UsageLogUpdate) AddActualCost(v float64) *UsageLogUpdate { + _u.mutation.AddActualCost(v) + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *UsageLogUpdate) SetRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableRateMultiplier(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *UsageLogUpdate) AddRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) SetAccountRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.ResetAccountRateMultiplier() + _u.mutation.SetAccountRateMultiplier(v) + return _u +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetAccountRateMultiplier(*v) + } + return _u +} + +// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) AddAccountRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.AddAccountRateMultiplier(v) + return _u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) ClearAccountRateMultiplier() *UsageLogUpdate { + _u.mutation.ClearAccountRateMultiplier() + return _u +} + +// SetBillingType sets the "billing_type" field. +func (_u *UsageLogUpdate) SetBillingType(v int8) *UsageLogUpdate { + _u.mutation.ResetBillingType() + _u.mutation.SetBillingType(v) + return _u +} + +// SetNillableBillingType sets the "billing_type" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableBillingType(v *int8) *UsageLogUpdate { + if v != nil { + _u.SetBillingType(*v) + } + return _u +} + +// AddBillingType adds value to the "billing_type" field. +func (_u *UsageLogUpdate) AddBillingType(v int8) *UsageLogUpdate { + _u.mutation.AddBillingType(v) + return _u +} + +// SetStream sets the "stream" field. +func (_u *UsageLogUpdate) SetStream(v bool) *UsageLogUpdate { + _u.mutation.SetStream(v) + return _u +} + +// SetNillableStream sets the "stream" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableStream(v *bool) *UsageLogUpdate { + if v != nil { + _u.SetStream(*v) + } + return _u +} + +// SetDurationMs sets the "duration_ms" field. +func (_u *UsageLogUpdate) SetDurationMs(v int) *UsageLogUpdate { + _u.mutation.ResetDurationMs() + _u.mutation.SetDurationMs(v) + return _u +} + +// SetNillableDurationMs sets the "duration_ms" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableDurationMs(v *int) *UsageLogUpdate { + if v != nil { + _u.SetDurationMs(*v) + } + return _u +} + +// AddDurationMs adds value to the "duration_ms" field. +func (_u *UsageLogUpdate) AddDurationMs(v int) *UsageLogUpdate { + _u.mutation.AddDurationMs(v) + return _u +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (_u *UsageLogUpdate) ClearDurationMs() *UsageLogUpdate { + _u.mutation.ClearDurationMs() + return _u +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (_u *UsageLogUpdate) SetFirstTokenMs(v int) *UsageLogUpdate { + _u.mutation.ResetFirstTokenMs() + _u.mutation.SetFirstTokenMs(v) + return _u +} + +// SetNillableFirstTokenMs sets the "first_token_ms" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableFirstTokenMs(v *int) *UsageLogUpdate { + if v != nil { + _u.SetFirstTokenMs(*v) + } + return _u +} + +// AddFirstTokenMs adds value to the "first_token_ms" field. +func (_u *UsageLogUpdate) AddFirstTokenMs(v int) *UsageLogUpdate { + _u.mutation.AddFirstTokenMs(v) + return _u +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (_u *UsageLogUpdate) ClearFirstTokenMs() *UsageLogUpdate { + _u.mutation.ClearFirstTokenMs() + return _u +} + +// SetUserAgent sets the "user_agent" field. +func (_u *UsageLogUpdate) SetUserAgent(v string) *UsageLogUpdate { + _u.mutation.SetUserAgent(v) + return _u +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableUserAgent(v *string) *UsageLogUpdate { + if v != nil { + _u.SetUserAgent(*v) + } + return _u +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (_u *UsageLogUpdate) ClearUserAgent() *UsageLogUpdate { + _u.mutation.ClearUserAgent() + return _u +} + +// SetIPAddress sets the "ip_address" field. +func (_u *UsageLogUpdate) SetIPAddress(v string) *UsageLogUpdate { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableIPAddress(v *string) *UsageLogUpdate { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (_u *UsageLogUpdate) ClearIPAddress() *UsageLogUpdate { + _u.mutation.ClearIPAddress() + return _u +} + +// SetImageCount sets the "image_count" field. +func (_u *UsageLogUpdate) SetImageCount(v int) *UsageLogUpdate { + _u.mutation.ResetImageCount() + _u.mutation.SetImageCount(v) + return _u +} + +// SetNillableImageCount sets the "image_count" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableImageCount(v *int) *UsageLogUpdate { + if v != nil { + _u.SetImageCount(*v) + } + return _u +} + +// AddImageCount adds value to the "image_count" field. +func (_u *UsageLogUpdate) AddImageCount(v int) *UsageLogUpdate { + _u.mutation.AddImageCount(v) + return _u +} + +// SetImageSize sets the "image_size" field. +func (_u *UsageLogUpdate) SetImageSize(v string) *UsageLogUpdate { + _u.mutation.SetImageSize(v) + return _u +} + +// SetNillableImageSize sets the "image_size" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableImageSize(v *string) *UsageLogUpdate { + if v != nil { + _u.SetImageSize(*v) + } + return _u +} + +// ClearImageSize clears the value of the "image_size" field. +func (_u *UsageLogUpdate) ClearImageSize() *UsageLogUpdate { + _u.mutation.ClearImageSize() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UsageLogUpdate) SetUser(v *User) *UsageLogUpdate { + return _u.SetUserID(v.ID) +} + +// SetAPIKey sets the "api_key" edge to the APIKey entity. +func (_u *UsageLogUpdate) SetAPIKey(v *APIKey) *UsageLogUpdate { + return _u.SetAPIKeyID(v.ID) +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *UsageLogUpdate) SetAccount(v *Account) *UsageLogUpdate { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UsageLogUpdate) SetGroup(v *Group) *UsageLogUpdate { + return _u.SetGroupID(v.ID) +} + +// SetSubscription sets the "subscription" edge to the UserSubscription entity. +func (_u *UsageLogUpdate) SetSubscription(v *UserSubscription) *UsageLogUpdate { + return _u.SetSubscriptionID(v.ID) +} + +// Mutation returns the UsageLogMutation object of the builder. +func (_u *UsageLogUpdate) Mutation() *UsageLogMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UsageLogUpdate) ClearUser() *UsageLogUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearAPIKey clears the "api_key" edge to the APIKey entity. +func (_u *UsageLogUpdate) ClearAPIKey() *UsageLogUpdate { + _u.mutation.ClearAPIKey() + return _u +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *UsageLogUpdate) ClearAccount() *UsageLogUpdate { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UsageLogUpdate) ClearGroup() *UsageLogUpdate { + _u.mutation.ClearGroup() + return _u +} + +// ClearSubscription clears the "subscription" edge to the UserSubscription entity. +func (_u *UsageLogUpdate) ClearSubscription() *UsageLogUpdate { + _u.mutation.ClearSubscription() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UsageLogUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageLogUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UsageLogUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageLogUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageLogUpdate) check() error { + if v, ok := _u.mutation.RequestID(); ok { + if err := usagelog.RequestIDValidator(v); err != nil { + return &ValidationError{Name: "request_id", err: fmt.Errorf(`ent: validator failed for field "UsageLog.request_id": %w`, err)} + } + } + if v, ok := _u.mutation.Model(); ok { + if err := usagelog.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)} + } + } + if v, ok := _u.mutation.UserAgent(); ok { + if err := usagelog.UserAgentValidator(v); err != nil { + return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} + } + } + if v, ok := _u.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } + if v, ok := _u.mutation.ImageSize(); ok { + if err := usagelog.ImageSizeValidator(v); err != nil { + return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.user"`) + } + if _u.mutation.APIKeyCleared() && len(_u.mutation.APIKeyIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.api_key"`) + } + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.account"`) + } + return nil +} + +func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagelog.Table, usagelog.Columns, sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.RequestID(); ok { + _spec.SetField(usagelog.FieldRequestID, field.TypeString, value) + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(usagelog.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.InputTokens(); ok { + _spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedInputTokens(); ok { + _spec.AddField(usagelog.FieldInputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.OutputTokens(); ok { + _spec.SetField(usagelog.FieldOutputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOutputTokens(); ok { + _spec.AddField(usagelog.FieldOutputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreationTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreationTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreationTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreationTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheReadTokens(); ok { + _spec.SetField(usagelog.FieldCacheReadTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheReadTokens(); ok { + _spec.AddField(usagelog.FieldCacheReadTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreation5mTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation5mTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreation5mTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreation5mTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreation1hTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation1hTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreation1hTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreation1hTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.InputCost(); ok { + _spec.SetField(usagelog.FieldInputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedInputCost(); ok { + _spec.AddField(usagelog.FieldInputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.OutputCost(); ok { + _spec.SetField(usagelog.FieldOutputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedOutputCost(); ok { + _spec.AddField(usagelog.FieldOutputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.CacheCreationCost(); ok { + _spec.SetField(usagelog.FieldCacheCreationCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedCacheCreationCost(); ok { + _spec.AddField(usagelog.FieldCacheCreationCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.CacheReadCost(); ok { + _spec.SetField(usagelog.FieldCacheReadCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedCacheReadCost(); ok { + _spec.AddField(usagelog.FieldCacheReadCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.TotalCost(); ok { + _spec.SetField(usagelog.FieldTotalCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedTotalCost(); ok { + _spec.AddField(usagelog.FieldTotalCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.ActualCost(); ok { + _spec.SetField(usagelog.FieldActualCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedActualCost(); ok { + _spec.AddField(usagelog.FieldActualCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok { + _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if _u.mutation.AccountRateMultiplierCleared() { + _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64) + } + if value, ok := _u.mutation.BillingType(); ok { + _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) + } + if value, ok := _u.mutation.AddedBillingType(); ok { + _spec.AddField(usagelog.FieldBillingType, field.TypeInt8, value) + } + if value, ok := _u.mutation.Stream(); ok { + _spec.SetField(usagelog.FieldStream, field.TypeBool, value) + } + if value, ok := _u.mutation.DurationMs(); ok { + _spec.SetField(usagelog.FieldDurationMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDurationMs(); ok { + _spec.AddField(usagelog.FieldDurationMs, field.TypeInt, value) + } + if _u.mutation.DurationMsCleared() { + _spec.ClearField(usagelog.FieldDurationMs, field.TypeInt) + } + if value, ok := _u.mutation.FirstTokenMs(); ok { + _spec.SetField(usagelog.FieldFirstTokenMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedFirstTokenMs(); ok { + _spec.AddField(usagelog.FieldFirstTokenMs, field.TypeInt, value) + } + if _u.mutation.FirstTokenMsCleared() { + _spec.ClearField(usagelog.FieldFirstTokenMs, field.TypeInt) + } + if value, ok := _u.mutation.UserAgent(); ok { + _spec.SetField(usagelog.FieldUserAgent, field.TypeString, value) + } + if _u.mutation.UserAgentCleared() { + _spec.ClearField(usagelog.FieldUserAgent, field.TypeString) + } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + } + if _u.mutation.IPAddressCleared() { + _spec.ClearField(usagelog.FieldIPAddress, field.TypeString) + } + if value, ok := _u.mutation.ImageCount(); ok { + _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedImageCount(); ok { + _spec.AddField(usagelog.FieldImageCount, field.TypeInt, value) + } + if value, ok := _u.mutation.ImageSize(); ok { + _spec.SetField(usagelog.FieldImageSize, field.TypeString, value) + } + if _u.mutation.ImageSizeCleared() { + _spec.ClearField(usagelog.FieldImageSize, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.UserTable, + Columns: []string{usagelog.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.UserTable, + Columns: []string{usagelog.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.APIKeyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.APIKeyTable, + Columns: []string{usagelog.APIKeyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.APIKeyTable, + Columns: []string{usagelog.APIKeyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.AccountTable, + Columns: []string{usagelog.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.AccountTable, + Columns: []string{usagelog.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.GroupTable, + Columns: []string{usagelog.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.GroupTable, + Columns: []string{usagelog.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.SubscriptionTable, + Columns: []string{usagelog.SubscriptionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.SubscriptionTable, + Columns: []string{usagelog.SubscriptionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagelog.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UsageLogUpdateOne is the builder for updating a single UsageLog entity. +type UsageLogUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UsageLogMutation +} + +// SetUserID sets the "user_id" field. +func (_u *UsageLogUpdateOne) SetUserID(v int64) *UsageLogUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableUserID(v *int64) *UsageLogUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAPIKeyID sets the "api_key_id" field. +func (_u *UsageLogUpdateOne) SetAPIKeyID(v int64) *UsageLogUpdateOne { + _u.mutation.SetAPIKeyID(v) + return _u +} + +// SetNillableAPIKeyID sets the "api_key_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableAPIKeyID(v *int64) *UsageLogUpdateOne { + if v != nil { + _u.SetAPIKeyID(*v) + } + return _u +} + +// SetAccountID sets the "account_id" field. +func (_u *UsageLogUpdateOne) SetAccountID(v int64) *UsageLogUpdateOne { + _u.mutation.SetAccountID(v) + return _u +} + +// SetNillableAccountID sets the "account_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableAccountID(v *int64) *UsageLogUpdateOne { + if v != nil { + _u.SetAccountID(*v) + } + return _u +} + +// SetRequestID sets the "request_id" field. +func (_u *UsageLogUpdateOne) SetRequestID(v string) *UsageLogUpdateOne { + _u.mutation.SetRequestID(v) + return _u +} + +// SetNillableRequestID sets the "request_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableRequestID(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetRequestID(*v) + } + return _u +} + +// SetModel sets the "model" field. +func (_u *UsageLogUpdateOne) SetModel(v string) *UsageLogUpdateOne { + _u.mutation.SetModel(v) + return _u +} + +// SetNillableModel sets the "model" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableModel(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetModel(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UsageLogUpdateOne) SetGroupID(v int64) *UsageLogUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableGroupID(v *int64) *UsageLogUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// ClearGroupID clears the value of the "group_id" field. +func (_u *UsageLogUpdateOne) ClearGroupID() *UsageLogUpdateOne { + _u.mutation.ClearGroupID() + return _u +} + +// SetSubscriptionID sets the "subscription_id" field. +func (_u *UsageLogUpdateOne) SetSubscriptionID(v int64) *UsageLogUpdateOne { + _u.mutation.SetSubscriptionID(v) + return _u +} + +// SetNillableSubscriptionID sets the "subscription_id" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableSubscriptionID(v *int64) *UsageLogUpdateOne { + if v != nil { + _u.SetSubscriptionID(*v) + } + return _u +} + +// ClearSubscriptionID clears the value of the "subscription_id" field. +func (_u *UsageLogUpdateOne) ClearSubscriptionID() *UsageLogUpdateOne { + _u.mutation.ClearSubscriptionID() + return _u +} + +// SetInputTokens sets the "input_tokens" field. +func (_u *UsageLogUpdateOne) SetInputTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetInputTokens() + _u.mutation.SetInputTokens(v) + return _u +} + +// SetNillableInputTokens sets the "input_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableInputTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetInputTokens(*v) + } + return _u +} + +// AddInputTokens adds value to the "input_tokens" field. +func (_u *UsageLogUpdateOne) AddInputTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddInputTokens(v) + return _u +} + +// SetOutputTokens sets the "output_tokens" field. +func (_u *UsageLogUpdateOne) SetOutputTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetOutputTokens() + _u.mutation.SetOutputTokens(v) + return _u +} + +// SetNillableOutputTokens sets the "output_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableOutputTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetOutputTokens(*v) + } + return _u +} + +// AddOutputTokens adds value to the "output_tokens" field. +func (_u *UsageLogUpdateOne) AddOutputTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddOutputTokens(v) + return _u +} + +// SetCacheCreationTokens sets the "cache_creation_tokens" field. +func (_u *UsageLogUpdateOne) SetCacheCreationTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetCacheCreationTokens() + _u.mutation.SetCacheCreationTokens(v) + return _u +} + +// SetNillableCacheCreationTokens sets the "cache_creation_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheCreationTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheCreationTokens(*v) + } + return _u +} + +// AddCacheCreationTokens adds value to the "cache_creation_tokens" field. +func (_u *UsageLogUpdateOne) AddCacheCreationTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddCacheCreationTokens(v) + return _u +} + +// SetCacheReadTokens sets the "cache_read_tokens" field. +func (_u *UsageLogUpdateOne) SetCacheReadTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetCacheReadTokens() + _u.mutation.SetCacheReadTokens(v) + return _u +} + +// SetNillableCacheReadTokens sets the "cache_read_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheReadTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheReadTokens(*v) + } + return _u +} + +// AddCacheReadTokens adds value to the "cache_read_tokens" field. +func (_u *UsageLogUpdateOne) AddCacheReadTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddCacheReadTokens(v) + return _u +} + +// SetCacheCreation5mTokens sets the "cache_creation_5m_tokens" field. +func (_u *UsageLogUpdateOne) SetCacheCreation5mTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetCacheCreation5mTokens() + _u.mutation.SetCacheCreation5mTokens(v) + return _u +} + +// SetNillableCacheCreation5mTokens sets the "cache_creation_5m_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheCreation5mTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheCreation5mTokens(*v) + } + return _u +} + +// AddCacheCreation5mTokens adds value to the "cache_creation_5m_tokens" field. +func (_u *UsageLogUpdateOne) AddCacheCreation5mTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddCacheCreation5mTokens(v) + return _u +} + +// SetCacheCreation1hTokens sets the "cache_creation_1h_tokens" field. +func (_u *UsageLogUpdateOne) SetCacheCreation1hTokens(v int) *UsageLogUpdateOne { + _u.mutation.ResetCacheCreation1hTokens() + _u.mutation.SetCacheCreation1hTokens(v) + return _u +} + +// SetNillableCacheCreation1hTokens sets the "cache_creation_1h_tokens" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheCreation1hTokens(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheCreation1hTokens(*v) + } + return _u +} + +// AddCacheCreation1hTokens adds value to the "cache_creation_1h_tokens" field. +func (_u *UsageLogUpdateOne) AddCacheCreation1hTokens(v int) *UsageLogUpdateOne { + _u.mutation.AddCacheCreation1hTokens(v) + return _u +} + +// SetInputCost sets the "input_cost" field. +func (_u *UsageLogUpdateOne) SetInputCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetInputCost() + _u.mutation.SetInputCost(v) + return _u +} + +// SetNillableInputCost sets the "input_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableInputCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetInputCost(*v) + } + return _u +} + +// AddInputCost adds value to the "input_cost" field. +func (_u *UsageLogUpdateOne) AddInputCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddInputCost(v) + return _u +} + +// SetOutputCost sets the "output_cost" field. +func (_u *UsageLogUpdateOne) SetOutputCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetOutputCost() + _u.mutation.SetOutputCost(v) + return _u +} + +// SetNillableOutputCost sets the "output_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableOutputCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetOutputCost(*v) + } + return _u +} + +// AddOutputCost adds value to the "output_cost" field. +func (_u *UsageLogUpdateOne) AddOutputCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddOutputCost(v) + return _u +} + +// SetCacheCreationCost sets the "cache_creation_cost" field. +func (_u *UsageLogUpdateOne) SetCacheCreationCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetCacheCreationCost() + _u.mutation.SetCacheCreationCost(v) + return _u +} + +// SetNillableCacheCreationCost sets the "cache_creation_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheCreationCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheCreationCost(*v) + } + return _u +} + +// AddCacheCreationCost adds value to the "cache_creation_cost" field. +func (_u *UsageLogUpdateOne) AddCacheCreationCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddCacheCreationCost(v) + return _u +} + +// SetCacheReadCost sets the "cache_read_cost" field. +func (_u *UsageLogUpdateOne) SetCacheReadCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetCacheReadCost() + _u.mutation.SetCacheReadCost(v) + return _u +} + +// SetNillableCacheReadCost sets the "cache_read_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableCacheReadCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetCacheReadCost(*v) + } + return _u +} + +// AddCacheReadCost adds value to the "cache_read_cost" field. +func (_u *UsageLogUpdateOne) AddCacheReadCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddCacheReadCost(v) + return _u +} + +// SetTotalCost sets the "total_cost" field. +func (_u *UsageLogUpdateOne) SetTotalCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetTotalCost() + _u.mutation.SetTotalCost(v) + return _u +} + +// SetNillableTotalCost sets the "total_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableTotalCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetTotalCost(*v) + } + return _u +} + +// AddTotalCost adds value to the "total_cost" field. +func (_u *UsageLogUpdateOne) AddTotalCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddTotalCost(v) + return _u +} + +// SetActualCost sets the "actual_cost" field. +func (_u *UsageLogUpdateOne) SetActualCost(v float64) *UsageLogUpdateOne { + _u.mutation.ResetActualCost() + _u.mutation.SetActualCost(v) + return _u +} + +// SetNillableActualCost sets the "actual_cost" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableActualCost(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetActualCost(*v) + } + return _u +} + +// AddActualCost adds value to the "actual_cost" field. +func (_u *UsageLogUpdateOne) AddActualCost(v float64) *UsageLogUpdateOne { + _u.mutation.AddActualCost(v) + return _u +} + +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *UsageLogUpdateOne) SetRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableRateMultiplier(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *UsageLogUpdateOne) AddRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.AddRateMultiplier(v) + return _u +} + +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) SetAccountRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.ResetAccountRateMultiplier() + _u.mutation.SetAccountRateMultiplier(v) + return _u +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetAccountRateMultiplier(*v) + } + return _u +} + +// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) AddAccountRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.AddAccountRateMultiplier(v) + return _u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) ClearAccountRateMultiplier() *UsageLogUpdateOne { + _u.mutation.ClearAccountRateMultiplier() + return _u +} + +// SetBillingType sets the "billing_type" field. +func (_u *UsageLogUpdateOne) SetBillingType(v int8) *UsageLogUpdateOne { + _u.mutation.ResetBillingType() + _u.mutation.SetBillingType(v) + return _u +} + +// SetNillableBillingType sets the "billing_type" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableBillingType(v *int8) *UsageLogUpdateOne { + if v != nil { + _u.SetBillingType(*v) + } + return _u +} + +// AddBillingType adds value to the "billing_type" field. +func (_u *UsageLogUpdateOne) AddBillingType(v int8) *UsageLogUpdateOne { + _u.mutation.AddBillingType(v) + return _u +} + +// SetStream sets the "stream" field. +func (_u *UsageLogUpdateOne) SetStream(v bool) *UsageLogUpdateOne { + _u.mutation.SetStream(v) + return _u +} + +// SetNillableStream sets the "stream" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableStream(v *bool) *UsageLogUpdateOne { + if v != nil { + _u.SetStream(*v) + } + return _u +} + +// SetDurationMs sets the "duration_ms" field. +func (_u *UsageLogUpdateOne) SetDurationMs(v int) *UsageLogUpdateOne { + _u.mutation.ResetDurationMs() + _u.mutation.SetDurationMs(v) + return _u +} + +// SetNillableDurationMs sets the "duration_ms" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableDurationMs(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetDurationMs(*v) + } + return _u +} + +// AddDurationMs adds value to the "duration_ms" field. +func (_u *UsageLogUpdateOne) AddDurationMs(v int) *UsageLogUpdateOne { + _u.mutation.AddDurationMs(v) + return _u +} + +// ClearDurationMs clears the value of the "duration_ms" field. +func (_u *UsageLogUpdateOne) ClearDurationMs() *UsageLogUpdateOne { + _u.mutation.ClearDurationMs() + return _u +} + +// SetFirstTokenMs sets the "first_token_ms" field. +func (_u *UsageLogUpdateOne) SetFirstTokenMs(v int) *UsageLogUpdateOne { + _u.mutation.ResetFirstTokenMs() + _u.mutation.SetFirstTokenMs(v) + return _u +} + +// SetNillableFirstTokenMs sets the "first_token_ms" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableFirstTokenMs(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetFirstTokenMs(*v) + } + return _u +} + +// AddFirstTokenMs adds value to the "first_token_ms" field. +func (_u *UsageLogUpdateOne) AddFirstTokenMs(v int) *UsageLogUpdateOne { + _u.mutation.AddFirstTokenMs(v) + return _u +} + +// ClearFirstTokenMs clears the value of the "first_token_ms" field. +func (_u *UsageLogUpdateOne) ClearFirstTokenMs() *UsageLogUpdateOne { + _u.mutation.ClearFirstTokenMs() + return _u +} + +// SetUserAgent sets the "user_agent" field. +func (_u *UsageLogUpdateOne) SetUserAgent(v string) *UsageLogUpdateOne { + _u.mutation.SetUserAgent(v) + return _u +} + +// SetNillableUserAgent sets the "user_agent" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableUserAgent(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetUserAgent(*v) + } + return _u +} + +// ClearUserAgent clears the value of the "user_agent" field. +func (_u *UsageLogUpdateOne) ClearUserAgent() *UsageLogUpdateOne { + _u.mutation.ClearUserAgent() + return _u +} + +// SetIPAddress sets the "ip_address" field. +func (_u *UsageLogUpdateOne) SetIPAddress(v string) *UsageLogUpdateOne { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableIPAddress(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (_u *UsageLogUpdateOne) ClearIPAddress() *UsageLogUpdateOne { + _u.mutation.ClearIPAddress() + return _u +} + +// SetImageCount sets the "image_count" field. +func (_u *UsageLogUpdateOne) SetImageCount(v int) *UsageLogUpdateOne { + _u.mutation.ResetImageCount() + _u.mutation.SetImageCount(v) + return _u +} + +// SetNillableImageCount sets the "image_count" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableImageCount(v *int) *UsageLogUpdateOne { + if v != nil { + _u.SetImageCount(*v) + } + return _u +} + +// AddImageCount adds value to the "image_count" field. +func (_u *UsageLogUpdateOne) AddImageCount(v int) *UsageLogUpdateOne { + _u.mutation.AddImageCount(v) + return _u +} + +// SetImageSize sets the "image_size" field. +func (_u *UsageLogUpdateOne) SetImageSize(v string) *UsageLogUpdateOne { + _u.mutation.SetImageSize(v) + return _u +} + +// SetNillableImageSize sets the "image_size" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableImageSize(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetImageSize(*v) + } + return _u +} + +// ClearImageSize clears the value of the "image_size" field. +func (_u *UsageLogUpdateOne) ClearImageSize() *UsageLogUpdateOne { + _u.mutation.ClearImageSize() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UsageLogUpdateOne) SetUser(v *User) *UsageLogUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetAPIKey sets the "api_key" edge to the APIKey entity. +func (_u *UsageLogUpdateOne) SetAPIKey(v *APIKey) *UsageLogUpdateOne { + return _u.SetAPIKeyID(v.ID) +} + +// SetAccount sets the "account" edge to the Account entity. +func (_u *UsageLogUpdateOne) SetAccount(v *Account) *UsageLogUpdateOne { + return _u.SetAccountID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UsageLogUpdateOne) SetGroup(v *Group) *UsageLogUpdateOne { + return _u.SetGroupID(v.ID) +} + +// SetSubscription sets the "subscription" edge to the UserSubscription entity. +func (_u *UsageLogUpdateOne) SetSubscription(v *UserSubscription) *UsageLogUpdateOne { + return _u.SetSubscriptionID(v.ID) +} + +// Mutation returns the UsageLogMutation object of the builder. +func (_u *UsageLogUpdateOne) Mutation() *UsageLogMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UsageLogUpdateOne) ClearUser() *UsageLogUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearAPIKey clears the "api_key" edge to the APIKey entity. +func (_u *UsageLogUpdateOne) ClearAPIKey() *UsageLogUpdateOne { + _u.mutation.ClearAPIKey() + return _u +} + +// ClearAccount clears the "account" edge to the Account entity. +func (_u *UsageLogUpdateOne) ClearAccount() *UsageLogUpdateOne { + _u.mutation.ClearAccount() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UsageLogUpdateOne) ClearGroup() *UsageLogUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// ClearSubscription clears the "subscription" edge to the UserSubscription entity. +func (_u *UsageLogUpdateOne) ClearSubscription() *UsageLogUpdateOne { + _u.mutation.ClearSubscription() + return _u +} + +// Where appends a list predicates to the UsageLogUpdate builder. +func (_u *UsageLogUpdateOne) Where(ps ...predicate.UsageLog) *UsageLogUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UsageLogUpdateOne) Select(field string, fields ...string) *UsageLogUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UsageLog entity. +func (_u *UsageLogUpdateOne) Save(ctx context.Context) (*UsageLog, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UsageLogUpdateOne) SaveX(ctx context.Context) *UsageLog { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UsageLogUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UsageLogUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UsageLogUpdateOne) check() error { + if v, ok := _u.mutation.RequestID(); ok { + if err := usagelog.RequestIDValidator(v); err != nil { + return &ValidationError{Name: "request_id", err: fmt.Errorf(`ent: validator failed for field "UsageLog.request_id": %w`, err)} + } + } + if v, ok := _u.mutation.Model(); ok { + if err := usagelog.ModelValidator(v); err != nil { + return &ValidationError{Name: "model", err: fmt.Errorf(`ent: validator failed for field "UsageLog.model": %w`, err)} + } + } + if v, ok := _u.mutation.UserAgent(); ok { + if err := usagelog.UserAgentValidator(v); err != nil { + return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} + } + } + if v, ok := _u.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } + if v, ok := _u.mutation.ImageSize(); ok { + if err := usagelog.ImageSizeValidator(v); err != nil { + return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.user"`) + } + if _u.mutation.APIKeyCleared() && len(_u.mutation.APIKeyIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.api_key"`) + } + if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UsageLog.account"`) + } + return nil +} + +func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usagelog.Table, usagelog.Columns, sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UsageLog.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usagelog.FieldID) + for _, f := range fields { + if !usagelog.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != usagelog.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.RequestID(); ok { + _spec.SetField(usagelog.FieldRequestID, field.TypeString, value) + } + if value, ok := _u.mutation.Model(); ok { + _spec.SetField(usagelog.FieldModel, field.TypeString, value) + } + if value, ok := _u.mutation.InputTokens(); ok { + _spec.SetField(usagelog.FieldInputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedInputTokens(); ok { + _spec.AddField(usagelog.FieldInputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.OutputTokens(); ok { + _spec.SetField(usagelog.FieldOutputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedOutputTokens(); ok { + _spec.AddField(usagelog.FieldOutputTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreationTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreationTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreationTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreationTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheReadTokens(); ok { + _spec.SetField(usagelog.FieldCacheReadTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheReadTokens(); ok { + _spec.AddField(usagelog.FieldCacheReadTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreation5mTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation5mTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreation5mTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreation5mTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.CacheCreation1hTokens(); ok { + _spec.SetField(usagelog.FieldCacheCreation1hTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedCacheCreation1hTokens(); ok { + _spec.AddField(usagelog.FieldCacheCreation1hTokens, field.TypeInt, value) + } + if value, ok := _u.mutation.InputCost(); ok { + _spec.SetField(usagelog.FieldInputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedInputCost(); ok { + _spec.AddField(usagelog.FieldInputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.OutputCost(); ok { + _spec.SetField(usagelog.FieldOutputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedOutputCost(); ok { + _spec.AddField(usagelog.FieldOutputCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.CacheCreationCost(); ok { + _spec.SetField(usagelog.FieldCacheCreationCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedCacheCreationCost(); ok { + _spec.AddField(usagelog.FieldCacheCreationCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.CacheReadCost(); ok { + _spec.SetField(usagelog.FieldCacheReadCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedCacheReadCost(); ok { + _spec.AddField(usagelog.FieldCacheReadCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.TotalCost(); ok { + _spec.SetField(usagelog.FieldTotalCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedTotalCost(); ok { + _spec.AddField(usagelog.FieldTotalCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.ActualCost(); ok { + _spec.SetField(usagelog.FieldActualCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedActualCost(); ok { + _spec.AddField(usagelog.FieldActualCost, field.TypeFloat64, value) + } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok { + _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if _u.mutation.AccountRateMultiplierCleared() { + _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64) + } + if value, ok := _u.mutation.BillingType(); ok { + _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) + } + if value, ok := _u.mutation.AddedBillingType(); ok { + _spec.AddField(usagelog.FieldBillingType, field.TypeInt8, value) + } + if value, ok := _u.mutation.Stream(); ok { + _spec.SetField(usagelog.FieldStream, field.TypeBool, value) + } + if value, ok := _u.mutation.DurationMs(); ok { + _spec.SetField(usagelog.FieldDurationMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDurationMs(); ok { + _spec.AddField(usagelog.FieldDurationMs, field.TypeInt, value) + } + if _u.mutation.DurationMsCleared() { + _spec.ClearField(usagelog.FieldDurationMs, field.TypeInt) + } + if value, ok := _u.mutation.FirstTokenMs(); ok { + _spec.SetField(usagelog.FieldFirstTokenMs, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedFirstTokenMs(); ok { + _spec.AddField(usagelog.FieldFirstTokenMs, field.TypeInt, value) + } + if _u.mutation.FirstTokenMsCleared() { + _spec.ClearField(usagelog.FieldFirstTokenMs, field.TypeInt) + } + if value, ok := _u.mutation.UserAgent(); ok { + _spec.SetField(usagelog.FieldUserAgent, field.TypeString, value) + } + if _u.mutation.UserAgentCleared() { + _spec.ClearField(usagelog.FieldUserAgent, field.TypeString) + } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + } + if _u.mutation.IPAddressCleared() { + _spec.ClearField(usagelog.FieldIPAddress, field.TypeString) + } + if value, ok := _u.mutation.ImageCount(); ok { + _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedImageCount(); ok { + _spec.AddField(usagelog.FieldImageCount, field.TypeInt, value) + } + if value, ok := _u.mutation.ImageSize(); ok { + _spec.SetField(usagelog.FieldImageSize, field.TypeString, value) + } + if _u.mutation.ImageSizeCleared() { + _spec.ClearField(usagelog.FieldImageSize, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.UserTable, + Columns: []string{usagelog.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.UserTable, + Columns: []string{usagelog.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.APIKeyCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.APIKeyTable, + Columns: []string{usagelog.APIKeyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeyIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.APIKeyTable, + Columns: []string{usagelog.APIKeyColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AccountCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.AccountTable, + Columns: []string{usagelog.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.AccountTable, + Columns: []string{usagelog.AccountColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.GroupTable, + Columns: []string{usagelog.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.GroupTable, + Columns: []string{usagelog.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.SubscriptionTable, + Columns: []string{usagelog.SubscriptionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usagelog.SubscriptionTable, + Columns: []string{usagelog.SubscriptionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UsageLog{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usagelog.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/user.go b/backend/ent/user.go new file mode 100644 index 00000000..0b9a48cc --- /dev/null +++ b/backend/ent/user.go @@ -0,0 +1,375 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Email holds the value of the "email" field. + Email string `json:"email,omitempty"` + // PasswordHash holds the value of the "password_hash" field. + PasswordHash string `json:"password_hash,omitempty"` + // Role holds the value of the "role" field. + Role string `json:"role,omitempty"` + // Balance holds the value of the "balance" field. + Balance float64 `json:"balance,omitempty"` + // Concurrency holds the value of the "concurrency" field. + Concurrency int `json:"concurrency,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // Username holds the value of the "username" field. + Username string `json:"username,omitempty"` + // Notes holds the value of the "notes" field. + Notes string `json:"notes,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserQuery when eager-loading is set. + Edges UserEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserEdges holds the relations/edges for other nodes in the graph. +type UserEdges struct { + // APIKeys holds the value of the api_keys edge. + APIKeys []*APIKey `json:"api_keys,omitempty"` + // RedeemCodes holds the value of the redeem_codes edge. + RedeemCodes []*RedeemCode `json:"redeem_codes,omitempty"` + // Subscriptions holds the value of the subscriptions edge. + Subscriptions []*UserSubscription `json:"subscriptions,omitempty"` + // AssignedSubscriptions holds the value of the assigned_subscriptions edge. + AssignedSubscriptions []*UserSubscription `json:"assigned_subscriptions,omitempty"` + // AllowedGroups holds the value of the allowed_groups edge. + AllowedGroups []*Group `json:"allowed_groups,omitempty"` + // UsageLogs holds the value of the usage_logs edge. + UsageLogs []*UsageLog `json:"usage_logs,omitempty"` + // AttributeValues holds the value of the attribute_values edge. + AttributeValues []*UserAttributeValue `json:"attribute_values,omitempty"` + // PromoCodeUsages holds the value of the promo_code_usages edge. + PromoCodeUsages []*PromoCodeUsage `json:"promo_code_usages,omitempty"` + // UserAllowedGroups holds the value of the user_allowed_groups edge. + UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [9]bool +} + +// APIKeysOrErr returns the APIKeys value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) APIKeysOrErr() ([]*APIKey, error) { + if e.loadedTypes[0] { + return e.APIKeys, nil + } + return nil, &NotLoadedError{edge: "api_keys"} +} + +// RedeemCodesOrErr returns the RedeemCodes value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) RedeemCodesOrErr() ([]*RedeemCode, error) { + if e.loadedTypes[1] { + return e.RedeemCodes, nil + } + return nil, &NotLoadedError{edge: "redeem_codes"} +} + +// SubscriptionsOrErr returns the Subscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) SubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[2] { + return e.Subscriptions, nil + } + return nil, &NotLoadedError{edge: "subscriptions"} +} + +// AssignedSubscriptionsOrErr returns the AssignedSubscriptions value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AssignedSubscriptionsOrErr() ([]*UserSubscription, error) { + if e.loadedTypes[3] { + return e.AssignedSubscriptions, nil + } + return nil, &NotLoadedError{edge: "assigned_subscriptions"} +} + +// AllowedGroupsOrErr returns the AllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AllowedGroupsOrErr() ([]*Group, error) { + if e.loadedTypes[4] { + return e.AllowedGroups, nil + } + return nil, &NotLoadedError{edge: "allowed_groups"} +} + +// UsageLogsOrErr returns the UsageLogs value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) UsageLogsOrErr() ([]*UsageLog, error) { + if e.loadedTypes[5] { + return e.UsageLogs, nil + } + return nil, &NotLoadedError{edge: "usage_logs"} +} + +// AttributeValuesOrErr returns the AttributeValues value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { + if e.loadedTypes[6] { + return e.AttributeValues, nil + } + return nil, &NotLoadedError{edge: "attribute_values"} +} + +// PromoCodeUsagesOrErr returns the PromoCodeUsages value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { + if e.loadedTypes[7] { + return e.PromoCodeUsages, nil + } + return nil, &NotLoadedError{edge: "promo_code_usages"} +} + +// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { + if e.loadedTypes[8] { + return e.UserAllowedGroups, nil + } + return nil, &NotLoadedError{edge: "user_allowed_groups"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldBalance: + values[i] = new(sql.NullFloat64) + case user.FieldID, user.FieldConcurrency: + values[i] = new(sql.NullInt64) + case user.FieldEmail, user.FieldPasswordHash, user.FieldRole, user.FieldStatus, user.FieldUsername, user.FieldNotes: + values[i] = new(sql.NullString) + case user.FieldCreatedAt, user.FieldUpdatedAt, user.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (_m *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case user.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case user.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case user.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case user.FieldEmail: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field email", values[i]) + } else if value.Valid { + _m.Email = value.String + } + case user.FieldPasswordHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password_hash", values[i]) + } else if value.Valid { + _m.PasswordHash = value.String + } + case user.FieldRole: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field role", values[i]) + } else if value.Valid { + _m.Role = value.String + } + case user.FieldBalance: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field balance", values[i]) + } else if value.Valid { + _m.Balance = value.Float64 + } + case user.FieldConcurrency: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field concurrency", values[i]) + } else if value.Valid { + _m.Concurrency = int(value.Int64) + } + case user.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case user.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + _m.Username = value.String + } + case user.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (_m *User) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryAPIKeys queries the "api_keys" edge of the User entity. +func (_m *User) QueryAPIKeys() *APIKeyQuery { + return NewUserClient(_m.config).QueryAPIKeys(_m) +} + +// QueryRedeemCodes queries the "redeem_codes" edge of the User entity. +func (_m *User) QueryRedeemCodes() *RedeemCodeQuery { + return NewUserClient(_m.config).QueryRedeemCodes(_m) +} + +// QuerySubscriptions queries the "subscriptions" edge of the User entity. +func (_m *User) QuerySubscriptions() *UserSubscriptionQuery { + return NewUserClient(_m.config).QuerySubscriptions(_m) +} + +// QueryAssignedSubscriptions queries the "assigned_subscriptions" edge of the User entity. +func (_m *User) QueryAssignedSubscriptions() *UserSubscriptionQuery { + return NewUserClient(_m.config).QueryAssignedSubscriptions(_m) +} + +// QueryAllowedGroups queries the "allowed_groups" edge of the User entity. +func (_m *User) QueryAllowedGroups() *GroupQuery { + return NewUserClient(_m.config).QueryAllowedGroups(_m) +} + +// QueryUsageLogs queries the "usage_logs" edge of the User entity. +func (_m *User) QueryUsageLogs() *UsageLogQuery { + return NewUserClient(_m.config).QueryUsageLogs(_m) +} + +// QueryAttributeValues queries the "attribute_values" edge of the User entity. +func (_m *User) QueryAttributeValues() *UserAttributeValueQuery { + return NewUserClient(_m.config).QueryAttributeValues(_m) +} + +// QueryPromoCodeUsages queries the "promo_code_usages" edge of the User entity. +func (_m *User) QueryPromoCodeUsages() *PromoCodeUsageQuery { + return NewUserClient(_m.config).QueryPromoCodeUsages(_m) +} + +// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the User entity. +func (_m *User) QueryUserAllowedGroups() *UserAllowedGroupQuery { + return NewUserClient(_m.config).QueryUserAllowedGroups(_m) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *User) Update() *UserUpdateOne { + return NewUserClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *User) Unwrap() *User { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("email=") + builder.WriteString(_m.Email) + builder.WriteString(", ") + builder.WriteString("password_hash=") + builder.WriteString(_m.PasswordHash) + builder.WriteString(", ") + builder.WriteString("role=") + builder.WriteString(_m.Role) + builder.WriteString(", ") + builder.WriteString("balance=") + builder.WriteString(fmt.Sprintf("%v", _m.Balance)) + builder.WriteString(", ") + builder.WriteString("concurrency=") + builder.WriteString(fmt.Sprintf("%v", _m.Concurrency)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("username=") + builder.WriteString(_m.Username) + builder.WriteString(", ") + builder.WriteString("notes=") + builder.WriteString(_m.Notes) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go new file mode 100644 index 00000000..1be1d871 --- /dev/null +++ b/backend/ent/user/user.go @@ -0,0 +1,443 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldEmail holds the string denoting the email field in the database. + FieldEmail = "email" + // FieldPasswordHash holds the string denoting the password_hash field in the database. + FieldPasswordHash = "password_hash" + // FieldRole holds the string denoting the role field in the database. + FieldRole = "role" + // FieldBalance holds the string denoting the balance field in the database. + FieldBalance = "balance" + // FieldConcurrency holds the string denoting the concurrency field in the database. + FieldConcurrency = "concurrency" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. + EdgeAPIKeys = "api_keys" + // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. + EdgeRedeemCodes = "redeem_codes" + // EdgeSubscriptions holds the string denoting the subscriptions edge name in mutations. + EdgeSubscriptions = "subscriptions" + // EdgeAssignedSubscriptions holds the string denoting the assigned_subscriptions edge name in mutations. + EdgeAssignedSubscriptions = "assigned_subscriptions" + // EdgeAllowedGroups holds the string denoting the allowed_groups edge name in mutations. + EdgeAllowedGroups = "allowed_groups" + // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. + EdgeUsageLogs = "usage_logs" + // EdgeAttributeValues holds the string denoting the attribute_values edge name in mutations. + EdgeAttributeValues = "attribute_values" + // EdgePromoCodeUsages holds the string denoting the promo_code_usages edge name in mutations. + EdgePromoCodeUsages = "promo_code_usages" + // EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations. + EdgeUserAllowedGroups = "user_allowed_groups" + // Table holds the table name of the user in the database. + Table = "users" + // APIKeysTable is the table that holds the api_keys relation/edge. + APIKeysTable = "api_keys" + // APIKeysInverseTable is the table name for the APIKey entity. + // It exists in this package in order to avoid circular dependency with the "apikey" package. + APIKeysInverseTable = "api_keys" + // APIKeysColumn is the table column denoting the api_keys relation/edge. + APIKeysColumn = "user_id" + // RedeemCodesTable is the table that holds the redeem_codes relation/edge. + RedeemCodesTable = "redeem_codes" + // RedeemCodesInverseTable is the table name for the RedeemCode entity. + // It exists in this package in order to avoid circular dependency with the "redeemcode" package. + RedeemCodesInverseTable = "redeem_codes" + // RedeemCodesColumn is the table column denoting the redeem_codes relation/edge. + RedeemCodesColumn = "used_by" + // SubscriptionsTable is the table that holds the subscriptions relation/edge. + SubscriptionsTable = "user_subscriptions" + // SubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + SubscriptionsInverseTable = "user_subscriptions" + // SubscriptionsColumn is the table column denoting the subscriptions relation/edge. + SubscriptionsColumn = "user_id" + // AssignedSubscriptionsTable is the table that holds the assigned_subscriptions relation/edge. + AssignedSubscriptionsTable = "user_subscriptions" + // AssignedSubscriptionsInverseTable is the table name for the UserSubscription entity. + // It exists in this package in order to avoid circular dependency with the "usersubscription" package. + AssignedSubscriptionsInverseTable = "user_subscriptions" + // AssignedSubscriptionsColumn is the table column denoting the assigned_subscriptions relation/edge. + AssignedSubscriptionsColumn = "assigned_by" + // AllowedGroupsTable is the table that holds the allowed_groups relation/edge. The primary key declared below. + AllowedGroupsTable = "user_allowed_groups" + // AllowedGroupsInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + AllowedGroupsInverseTable = "groups" + // UsageLogsTable is the table that holds the usage_logs relation/edge. + UsageLogsTable = "usage_logs" + // UsageLogsInverseTable is the table name for the UsageLog entity. + // It exists in this package in order to avoid circular dependency with the "usagelog" package. + UsageLogsInverseTable = "usage_logs" + // UsageLogsColumn is the table column denoting the usage_logs relation/edge. + UsageLogsColumn = "user_id" + // AttributeValuesTable is the table that holds the attribute_values relation/edge. + AttributeValuesTable = "user_attribute_values" + // AttributeValuesInverseTable is the table name for the UserAttributeValue entity. + // It exists in this package in order to avoid circular dependency with the "userattributevalue" package. + AttributeValuesInverseTable = "user_attribute_values" + // AttributeValuesColumn is the table column denoting the attribute_values relation/edge. + AttributeValuesColumn = "user_id" + // PromoCodeUsagesTable is the table that holds the promo_code_usages relation/edge. + PromoCodeUsagesTable = "promo_code_usages" + // PromoCodeUsagesInverseTable is the table name for the PromoCodeUsage entity. + // It exists in this package in order to avoid circular dependency with the "promocodeusage" package. + PromoCodeUsagesInverseTable = "promo_code_usages" + // PromoCodeUsagesColumn is the table column denoting the promo_code_usages relation/edge. + PromoCodeUsagesColumn = "user_id" + // UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge. + UserAllowedGroupsTable = "user_allowed_groups" + // UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity. + // It exists in this package in order to avoid circular dependency with the "userallowedgroup" package. + UserAllowedGroupsInverseTable = "user_allowed_groups" + // UserAllowedGroupsColumn is the table column denoting the user_allowed_groups relation/edge. + UserAllowedGroupsColumn = "user_id" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldEmail, + FieldPasswordHash, + FieldRole, + FieldBalance, + FieldConcurrency, + FieldStatus, + FieldUsername, + FieldNotes, +} + +var ( + // AllowedGroupsPrimaryKey and AllowedGroupsColumn2 are the table columns denoting the + // primary key for the allowed_groups relation (M2M). + AllowedGroupsPrimaryKey = []string{"user_id", "group_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // EmailValidator is a validator for the "email" field. It is called by the builders before save. + EmailValidator func(string) error + // PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save. + PasswordHashValidator func(string) error + // DefaultRole holds the default value on creation for the "role" field. + DefaultRole string + // RoleValidator is a validator for the "role" field. It is called by the builders before save. + RoleValidator func(string) error + // DefaultBalance holds the default value on creation for the "balance" field. + DefaultBalance float64 + // DefaultConcurrency holds the default value on creation for the "concurrency" field. + DefaultConcurrency int + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultUsername holds the default value on creation for the "username" field. + DefaultUsername string + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // DefaultNotes holds the default value on creation for the "notes" field. + DefaultNotes string +) + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByEmail orders the results by the email field. +func ByEmail(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEmail, opts...).ToFunc() +} + +// ByPasswordHash orders the results by the password_hash field. +func ByPasswordHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPasswordHash, opts...).ToFunc() +} + +// ByRole orders the results by the role field. +func ByRole(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRole, opts...).ToFunc() +} + +// ByBalance orders the results by the balance field. +func ByBalance(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBalance, opts...).ToFunc() +} + +// ByConcurrency orders the results by the concurrency field. +func ByConcurrency(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldConcurrency, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByAPIKeysCount orders the results by api_keys count. +func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAPIKeysStep(), opts...) + } +} + +// ByAPIKeys orders the results by api_keys terms. +func ByAPIKeys(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAPIKeysStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByRedeemCodesCount orders the results by redeem_codes count. +func ByRedeemCodesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newRedeemCodesStep(), opts...) + } +} + +// ByRedeemCodes orders the results by redeem_codes terms. +func ByRedeemCodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newRedeemCodesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// BySubscriptionsCount orders the results by subscriptions count. +func BySubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newSubscriptionsStep(), opts...) + } +} + +// BySubscriptions orders the results by subscriptions terms. +func BySubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAssignedSubscriptionsCount orders the results by assigned_subscriptions count. +func ByAssignedSubscriptionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAssignedSubscriptionsStep(), opts...) + } +} + +// ByAssignedSubscriptions orders the results by assigned_subscriptions terms. +func ByAssignedSubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAssignedSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAllowedGroupsCount orders the results by allowed_groups count. +func ByAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAllowedGroupsStep(), opts...) + } +} + +// ByAllowedGroups orders the results by allowed_groups terms. +func ByAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUsageLogsCount orders the results by usage_logs count. +func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...) + } +} + +// ByUsageLogs orders the results by usage_logs terms. +func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAttributeValuesCount orders the results by attribute_values count. +func ByAttributeValuesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAttributeValuesStep(), opts...) + } +} + +// ByAttributeValues orders the results by attribute_values terms. +func ByAttributeValues(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAttributeValuesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByPromoCodeUsagesCount orders the results by promo_code_usages count. +func ByPromoCodeUsagesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPromoCodeUsagesStep(), opts...) + } +} + +// ByPromoCodeUsages orders the results by promo_code_usages terms. +func ByPromoCodeUsages(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPromoCodeUsagesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByUserAllowedGroupsCount orders the results by user_allowed_groups count. +func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUserAllowedGroupsStep(), opts...) + } +} + +// ByUserAllowedGroups orders the results by user_allowed_groups terms. +func ByUserAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAPIKeysStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(APIKeysInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) +} +func newRedeemCodesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(RedeemCodesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) +} +func newSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(SubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) +} +func newAssignedSubscriptionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AssignedSubscriptionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), + ) +} +func newAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AllowedGroupsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowedGroupsTable, AllowedGroupsPrimaryKey...), + ) +} +func newUsageLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) +} +func newAttributeValuesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttributeValuesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttributeValuesTable, AttributeValuesColumn), + ) +} +func newPromoCodeUsagesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PromoCodeUsagesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn), + ) +} +func newUserAllowedGroupsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserAllowedGroupsInverseTable, UserAllowedGroupsColumn), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) +} diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go new file mode 100644 index 00000000..6a460f10 --- /dev/null +++ b/backend/ent/user/where.go @@ -0,0 +1,933 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Email applies equality check predicate on the "email" field. It's identical to EmailEQ. +func Email(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// PasswordHash applies equality check predicate on the "password_hash" field. It's identical to PasswordHashEQ. +func PasswordHash(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// Role applies equality check predicate on the "role" field. It's identical to RoleEQ. +func Role(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldRole, v)) +} + +// Balance applies equality check predicate on the "balance" field. It's identical to BalanceEQ. +func Balance(v float64) predicate.User { + return predicate.User(sql.FieldEQ(FieldBalance, v)) +} + +// Concurrency applies equality check predicate on the "concurrency" field. It's identical to ConcurrencyEQ. +func Concurrency(v int) predicate.User { + return predicate.User(sql.FieldEQ(FieldConcurrency, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldStatus, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldDeletedAt)) +} + +// EmailEQ applies the EQ predicate on the "email" field. +func EmailEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldEmail, v)) +} + +// EmailNEQ applies the NEQ predicate on the "email" field. +func EmailNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldEmail, v)) +} + +// EmailIn applies the In predicate on the "email" field. +func EmailIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldEmail, vs...)) +} + +// EmailNotIn applies the NotIn predicate on the "email" field. +func EmailNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldEmail, vs...)) +} + +// EmailGT applies the GT predicate on the "email" field. +func EmailGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldEmail, v)) +} + +// EmailGTE applies the GTE predicate on the "email" field. +func EmailGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldEmail, v)) +} + +// EmailLT applies the LT predicate on the "email" field. +func EmailLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldEmail, v)) +} + +// EmailLTE applies the LTE predicate on the "email" field. +func EmailLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldEmail, v)) +} + +// EmailContains applies the Contains predicate on the "email" field. +func EmailContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldEmail, v)) +} + +// EmailHasPrefix applies the HasPrefix predicate on the "email" field. +func EmailHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldEmail, v)) +} + +// EmailHasSuffix applies the HasSuffix predicate on the "email" field. +func EmailHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldEmail, v)) +} + +// EmailEqualFold applies the EqualFold predicate on the "email" field. +func EmailEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldEmail, v)) +} + +// EmailContainsFold applies the ContainsFold predicate on the "email" field. +func EmailContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldEmail, v)) +} + +// PasswordHashEQ applies the EQ predicate on the "password_hash" field. +func PasswordHashEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// PasswordHashNEQ applies the NEQ predicate on the "password_hash" field. +func PasswordHashNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldPasswordHash, v)) +} + +// PasswordHashIn applies the In predicate on the "password_hash" field. +func PasswordHashIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldPasswordHash, vs...)) +} + +// PasswordHashNotIn applies the NotIn predicate on the "password_hash" field. +func PasswordHashNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldPasswordHash, vs...)) +} + +// PasswordHashGT applies the GT predicate on the "password_hash" field. +func PasswordHashGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldPasswordHash, v)) +} + +// PasswordHashGTE applies the GTE predicate on the "password_hash" field. +func PasswordHashGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldPasswordHash, v)) +} + +// PasswordHashLT applies the LT predicate on the "password_hash" field. +func PasswordHashLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldPasswordHash, v)) +} + +// PasswordHashLTE applies the LTE predicate on the "password_hash" field. +func PasswordHashLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldPasswordHash, v)) +} + +// PasswordHashContains applies the Contains predicate on the "password_hash" field. +func PasswordHashContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldPasswordHash, v)) +} + +// PasswordHashHasPrefix applies the HasPrefix predicate on the "password_hash" field. +func PasswordHashHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldPasswordHash, v)) +} + +// PasswordHashHasSuffix applies the HasSuffix predicate on the "password_hash" field. +func PasswordHashHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldPasswordHash, v)) +} + +// PasswordHashEqualFold applies the EqualFold predicate on the "password_hash" field. +func PasswordHashEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldPasswordHash, v)) +} + +// PasswordHashContainsFold applies the ContainsFold predicate on the "password_hash" field. +func PasswordHashContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldPasswordHash, v)) +} + +// RoleEQ applies the EQ predicate on the "role" field. +func RoleEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldRole, v)) +} + +// RoleNEQ applies the NEQ predicate on the "role" field. +func RoleNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldRole, v)) +} + +// RoleIn applies the In predicate on the "role" field. +func RoleIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldRole, vs...)) +} + +// RoleNotIn applies the NotIn predicate on the "role" field. +func RoleNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldRole, vs...)) +} + +// RoleGT applies the GT predicate on the "role" field. +func RoleGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldRole, v)) +} + +// RoleGTE applies the GTE predicate on the "role" field. +func RoleGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldRole, v)) +} + +// RoleLT applies the LT predicate on the "role" field. +func RoleLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldRole, v)) +} + +// RoleLTE applies the LTE predicate on the "role" field. +func RoleLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldRole, v)) +} + +// RoleContains applies the Contains predicate on the "role" field. +func RoleContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldRole, v)) +} + +// RoleHasPrefix applies the HasPrefix predicate on the "role" field. +func RoleHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldRole, v)) +} + +// RoleHasSuffix applies the HasSuffix predicate on the "role" field. +func RoleHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldRole, v)) +} + +// RoleEqualFold applies the EqualFold predicate on the "role" field. +func RoleEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldRole, v)) +} + +// RoleContainsFold applies the ContainsFold predicate on the "role" field. +func RoleContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldRole, v)) +} + +// BalanceEQ applies the EQ predicate on the "balance" field. +func BalanceEQ(v float64) predicate.User { + return predicate.User(sql.FieldEQ(FieldBalance, v)) +} + +// BalanceNEQ applies the NEQ predicate on the "balance" field. +func BalanceNEQ(v float64) predicate.User { + return predicate.User(sql.FieldNEQ(FieldBalance, v)) +} + +// BalanceIn applies the In predicate on the "balance" field. +func BalanceIn(vs ...float64) predicate.User { + return predicate.User(sql.FieldIn(FieldBalance, vs...)) +} + +// BalanceNotIn applies the NotIn predicate on the "balance" field. +func BalanceNotIn(vs ...float64) predicate.User { + return predicate.User(sql.FieldNotIn(FieldBalance, vs...)) +} + +// BalanceGT applies the GT predicate on the "balance" field. +func BalanceGT(v float64) predicate.User { + return predicate.User(sql.FieldGT(FieldBalance, v)) +} + +// BalanceGTE applies the GTE predicate on the "balance" field. +func BalanceGTE(v float64) predicate.User { + return predicate.User(sql.FieldGTE(FieldBalance, v)) +} + +// BalanceLT applies the LT predicate on the "balance" field. +func BalanceLT(v float64) predicate.User { + return predicate.User(sql.FieldLT(FieldBalance, v)) +} + +// BalanceLTE applies the LTE predicate on the "balance" field. +func BalanceLTE(v float64) predicate.User { + return predicate.User(sql.FieldLTE(FieldBalance, v)) +} + +// ConcurrencyEQ applies the EQ predicate on the "concurrency" field. +func ConcurrencyEQ(v int) predicate.User { + return predicate.User(sql.FieldEQ(FieldConcurrency, v)) +} + +// ConcurrencyNEQ applies the NEQ predicate on the "concurrency" field. +func ConcurrencyNEQ(v int) predicate.User { + return predicate.User(sql.FieldNEQ(FieldConcurrency, v)) +} + +// ConcurrencyIn applies the In predicate on the "concurrency" field. +func ConcurrencyIn(vs ...int) predicate.User { + return predicate.User(sql.FieldIn(FieldConcurrency, vs...)) +} + +// ConcurrencyNotIn applies the NotIn predicate on the "concurrency" field. +func ConcurrencyNotIn(vs ...int) predicate.User { + return predicate.User(sql.FieldNotIn(FieldConcurrency, vs...)) +} + +// ConcurrencyGT applies the GT predicate on the "concurrency" field. +func ConcurrencyGT(v int) predicate.User { + return predicate.User(sql.FieldGT(FieldConcurrency, v)) +} + +// ConcurrencyGTE applies the GTE predicate on the "concurrency" field. +func ConcurrencyGTE(v int) predicate.User { + return predicate.User(sql.FieldGTE(FieldConcurrency, v)) +} + +// ConcurrencyLT applies the LT predicate on the "concurrency" field. +func ConcurrencyLT(v int) predicate.User { + return predicate.User(sql.FieldLT(FieldConcurrency, v)) +} + +// ConcurrencyLTE applies the LTE predicate on the "concurrency" field. +func ConcurrencyLTE(v int) predicate.User { + return predicate.User(sql.FieldLTE(FieldConcurrency, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldStatus, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldUsername, v)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldNotes, v)) +} + +// HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. +func HasAPIKeys() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAPIKeysWith applies the HasEdge predicate on the "api_keys" edge with a given conditions (other predicates). +func HasAPIKeysWith(preds ...predicate.APIKey) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAPIKeysStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasRedeemCodes applies the HasEdge predicate on the "redeem_codes" edge. +func HasRedeemCodes() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasRedeemCodesWith applies the HasEdge predicate on the "redeem_codes" edge with a given conditions (other predicates). +func HasRedeemCodesWith(preds ...predicate.RedeemCode) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newRedeemCodesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasSubscriptions applies the HasEdge predicate on the "subscriptions" edge. +func HasSubscriptions() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasSubscriptionsWith applies the HasEdge predicate on the "subscriptions" edge with a given conditions (other predicates). +func HasSubscriptionsWith(preds ...predicate.UserSubscription) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAssignedSubscriptions applies the HasEdge predicate on the "assigned_subscriptions" edge. +func HasAssignedSubscriptions() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AssignedSubscriptionsTable, AssignedSubscriptionsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAssignedSubscriptionsWith applies the HasEdge predicate on the "assigned_subscriptions" edge with a given conditions (other predicates). +func HasAssignedSubscriptionsWith(preds ...predicate.UserSubscription) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAssignedSubscriptionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAllowedGroups applies the HasEdge predicate on the "allowed_groups" edge. +func HasAllowedGroups() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, AllowedGroupsTable, AllowedGroupsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAllowedGroupsWith applies the HasEdge predicate on the "allowed_groups" edge with a given conditions (other predicates). +func HasAllowedGroupsWith(preds ...predicate.Group) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge. +func HasUsageLogs() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates). +func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newUsageLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAttributeValues applies the HasEdge predicate on the "attribute_values" edge. +func HasAttributeValues() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttributeValuesTable, AttributeValuesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAttributeValuesWith applies the HasEdge predicate on the "attribute_values" edge with a given conditions (other predicates). +func HasAttributeValuesWith(preds ...predicate.UserAttributeValue) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAttributeValuesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasPromoCodeUsages applies the HasEdge predicate on the "promo_code_usages" edge. +func HasPromoCodeUsages() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPromoCodeUsagesWith applies the HasEdge predicate on the "promo_code_usages" edge with a given conditions (other predicates). +func HasPromoCodeUsagesWith(preds ...predicate.PromoCodeUsage) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newPromoCodeUsagesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge. +func HasUserAllowedGroups() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserAllowedGroupsWith applies the HasEdge predicate on the "user_allowed_groups" edge with a given conditions (other predicates). +func HasUserAllowedGroupsWith(preds ...predicate.UserAllowedGroup) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newUserAllowedGroupsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go new file mode 100644 index 00000000..e12e476c --- /dev/null +++ b/backend/ent/user_create.go @@ -0,0 +1,1417 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserCreate) SetCreatedAt(v time.Time) *UserCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableCreatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserCreate) SetUpdatedAt(v time.Time) *UserCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableUpdatedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *UserCreate) SetDeletedAt(v time.Time) *UserCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *UserCreate) SetNillableDeletedAt(v *time.Time) *UserCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetEmail sets the "email" field. +func (_c *UserCreate) SetEmail(v string) *UserCreate { + _c.mutation.SetEmail(v) + return _c +} + +// SetPasswordHash sets the "password_hash" field. +func (_c *UserCreate) SetPasswordHash(v string) *UserCreate { + _c.mutation.SetPasswordHash(v) + return _c +} + +// SetRole sets the "role" field. +func (_c *UserCreate) SetRole(v string) *UserCreate { + _c.mutation.SetRole(v) + return _c +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_c *UserCreate) SetNillableRole(v *string) *UserCreate { + if v != nil { + _c.SetRole(*v) + } + return _c +} + +// SetBalance sets the "balance" field. +func (_c *UserCreate) SetBalance(v float64) *UserCreate { + _c.mutation.SetBalance(v) + return _c +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_c *UserCreate) SetNillableBalance(v *float64) *UserCreate { + if v != nil { + _c.SetBalance(*v) + } + return _c +} + +// SetConcurrency sets the "concurrency" field. +func (_c *UserCreate) SetConcurrency(v int) *UserCreate { + _c.mutation.SetConcurrency(v) + return _c +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_c *UserCreate) SetNillableConcurrency(v *int) *UserCreate { + if v != nil { + _c.SetConcurrency(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *UserCreate) SetStatus(v string) *UserCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *UserCreate) SetNillableStatus(v *string) *UserCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetUsername sets the "username" field. +func (_c *UserCreate) SetUsername(v string) *UserCreate { + _c.mutation.SetUsername(v) + return _c +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_c *UserCreate) SetNillableUsername(v *string) *UserCreate { + if v != nil { + _c.SetUsername(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *UserCreate) SetNotes(v string) *UserCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *UserCreate) SetNillableNotes(v *string) *UserCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_c *UserCreate) AddAPIKeyIDs(ids ...int64) *UserCreate { + _c.mutation.AddAPIKeyIDs(ids...) + return _c +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_c *UserCreate) AddAPIKeys(v ...*APIKey) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_c *UserCreate) AddRedeemCodeIDs(ids ...int64) *UserCreate { + _c.mutation.AddRedeemCodeIDs(ids...) + return _c +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_c *UserCreate) AddRedeemCodes(v ...*RedeemCode) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_c *UserCreate) AddSubscriptionIDs(ids ...int64) *UserCreate { + _c.mutation.AddSubscriptionIDs(ids...) + return _c +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_c *UserCreate) AddSubscriptions(v ...*UserSubscription) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_c *UserCreate) AddAssignedSubscriptionIDs(ids ...int64) *UserCreate { + _c.mutation.AddAssignedSubscriptionIDs(ids...) + return _c +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_c *UserCreate) AddAssignedSubscriptions(v ...*UserSubscription) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_c *UserCreate) AddAllowedGroupIDs(ids ...int64) *UserCreate { + _c.mutation.AddAllowedGroupIDs(ids...) + return _c +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_c *UserCreate) AddAllowedGroups(v ...*Group) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAllowedGroupIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_c *UserCreate) AddUsageLogIDs(ids ...int64) *UserCreate { + _c.mutation.AddUsageLogIDs(ids...) + return _c +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_c *UserCreate) AddUsageLogs(v ...*UsageLog) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageLogIDs(ids...) +} + +// AddAttributeValueIDs adds the "attribute_values" edge to the UserAttributeValue entity by IDs. +func (_c *UserCreate) AddAttributeValueIDs(ids ...int64) *UserCreate { + _c.mutation.AddAttributeValueIDs(ids...) + return _c +} + +// AddAttributeValues adds the "attribute_values" edges to the UserAttributeValue entity. +func (_c *UserCreate) AddAttributeValues(v ...*UserAttributeValue) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddAttributeValueIDs(ids...) +} + +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_c *UserCreate) AddPromoCodeUsageIDs(ids ...int64) *UserCreate { + _c.mutation.AddPromoCodeUsageIDs(ids...) + return _c +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_c *UserCreate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddPromoCodeUsageIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_c *UserCreate) Mutation() *UserMutation { + return _c.mutation +} + +// Save creates the User in the database. +func (_c *UserCreate) Save(ctx context.Context) (*User, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserCreate) SaveX(ctx context.Context) *User { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if user.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized user.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := user.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if user.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Role(); !ok { + v := user.DefaultRole + _c.mutation.SetRole(v) + } + if _, ok := _c.mutation.Balance(); !ok { + v := user.DefaultBalance + _c.mutation.SetBalance(v) + } + if _, ok := _c.mutation.Concurrency(); !ok { + v := user.DefaultConcurrency + _c.mutation.SetConcurrency(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := user.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.Username(); !ok { + v := user.DefaultUsername + _c.mutation.SetUsername(v) + } + if _, ok := _c.mutation.Notes(); !ok { + v := user.DefaultNotes + _c.mutation.SetNotes(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} + } + if _, ok := _c.mutation.Email(); !ok { + return &ValidationError{Name: "email", err: errors.New(`ent: missing required field "User.email"`)} + } + if v, ok := _c.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if _, ok := _c.mutation.PasswordHash(); !ok { + return &ValidationError{Name: "password_hash", err: errors.New(`ent: missing required field "User.password_hash"`)} + } + if v, ok := _c.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if _, ok := _c.mutation.Role(); !ok { + return &ValidationError{Name: "role", err: errors.New(`ent: missing required field "User.role"`)} + } + if v, ok := _c.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if _, ok := _c.mutation.Balance(); !ok { + return &ValidationError{Name: "balance", err: errors.New(`ent: missing required field "User.balance"`)} + } + if _, ok := _c.mutation.Concurrency(); !ok { + return &ValidationError{Name: "concurrency", err: errors.New(`ent: missing required field "User.concurrency"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "User.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if _, ok := _c.mutation.Username(); !ok { + return &ValidationError{Name: "username", err: errors.New(`ent: missing required field "User.username"`)} + } + if v, ok := _c.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if _, ok := _c.mutation.Notes(); !ok { + return &ValidationError{Name: "notes", err: errors.New(`ent: missing required field "User.notes"`)} + } + return nil +} + +func (_c *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: _c.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + _node.Email = value + } + if value, ok := _c.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + _node.PasswordHash = value + } + if value, ok := _c.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + _node.Role = value + } + if value, ok := _c.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + _node.Balance = value + } + if value, ok := _c.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + _node.Concurrency = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + _node.Username = value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + _node.Notes = value + } + if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _c.config, mutation: newUserAllowedGroupMutation(_c.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AttributeValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { + _c.conflict = opts + return &UserUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertOne{ + create: _c, + } +} + +type ( + // UserUpsertOne is the builder for "upsert"-ing + // one User node. + UserUpsertOne struct { + create *UserCreate + } + + // UserUpsert is the "OnConflict" setter. + UserUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsert) SetUpdatedAt(v time.Time) *UserUpsert { + u.Set(user.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateUpdatedAt() *UserUpsert { + u.SetExcluded(user.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsert) SetDeletedAt(v time.Time) *UserUpsert { + u.Set(user.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateDeletedAt() *UserUpsert { + u.SetExcluded(user.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsert) ClearDeletedAt() *UserUpsert { + u.SetNull(user.FieldDeletedAt) + return u +} + +// SetEmail sets the "email" field. +func (u *UserUpsert) SetEmail(v string) *UserUpsert { + u.Set(user.FieldEmail, v) + return u +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsert) UpdateEmail() *UserUpsert { + u.SetExcluded(user.FieldEmail) + return u +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsert) SetPasswordHash(v string) *UserUpsert { + u.Set(user.FieldPasswordHash, v) + return u +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsert) UpdatePasswordHash() *UserUpsert { + u.SetExcluded(user.FieldPasswordHash) + return u +} + +// SetRole sets the "role" field. +func (u *UserUpsert) SetRole(v string) *UserUpsert { + u.Set(user.FieldRole, v) + return u +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsert) UpdateRole() *UserUpsert { + u.SetExcluded(user.FieldRole) + return u +} + +// SetBalance sets the "balance" field. +func (u *UserUpsert) SetBalance(v float64) *UserUpsert { + u.Set(user.FieldBalance, v) + return u +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsert) UpdateBalance() *UserUpsert { + u.SetExcluded(user.FieldBalance) + return u +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsert) AddBalance(v float64) *UserUpsert { + u.Add(user.FieldBalance, v) + return u +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsert) SetConcurrency(v int) *UserUpsert { + u.Set(user.FieldConcurrency, v) + return u +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsert) UpdateConcurrency() *UserUpsert { + u.SetExcluded(user.FieldConcurrency) + return u +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsert) AddConcurrency(v int) *UserUpsert { + u.Add(user.FieldConcurrency, v) + return u +} + +// SetStatus sets the "status" field. +func (u *UserUpsert) SetStatus(v string) *UserUpsert { + u.Set(user.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsert) UpdateStatus() *UserUpsert { + u.SetExcluded(user.FieldStatus) + return u +} + +// SetUsername sets the "username" field. +func (u *UserUpsert) SetUsername(v string) *UserUpsert { + u.Set(user.FieldUsername, v) + return u +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsert) UpdateUsername() *UserUpsert { + u.SetExcluded(user.FieldUsername) + return u +} + +// SetNotes sets the "notes" field. +func (u *UserUpsert) SetNotes(v string) *UserUpsert { + u.Set(user.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsert) UpdateNotes() *UserUpsert { + u.SetExcluded(user.FieldNotes) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserUpsertOne) UpdateNewValues() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertOne) Ignore() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertOne) DoNothing() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreate.OnConflict +// documentation for more info. +func (u *UserUpsertOne) Update(set func(*UserUpsert)) *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertOne) SetUpdatedAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUpdatedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsertOne) SetDeletedAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateDeletedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsertOne) ClearDeletedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearDeletedAt() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertOne) SetEmail(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateEmail() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsertOne) SetPasswordHash(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePasswordHash() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// SetRole sets the "role" field. +func (u *UserUpsertOne) SetRole(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetRole(v) + }) +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateRole() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateRole() + }) +} + +// SetBalance sets the "balance" field. +func (u *UserUpsertOne) SetBalance(v float64) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetBalance(v) + }) +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsertOne) AddBalance(v float64) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.AddBalance(v) + }) +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateBalance() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateBalance() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsertOne) SetConcurrency(v int) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsertOne) AddConcurrency(v int) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateConcurrency() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateConcurrency() + }) +} + +// SetStatus sets the "status" field. +func (u *UserUpsertOne) SetStatus(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateStatus() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateStatus() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertOne) SetUsername(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUsername() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserUpsertOne) SetNotes(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateNotes() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateNotes() + }) +} + +// Exec executes the query. +func (u *UserUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate + conflict []sql.ConflictOption +} + +// Save creates the User entities in the database. +func (_c *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*User, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { + _c.conflict = opts + return &UserUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertBulk{ + create: _c, + } +} + +// UserUpsertBulk is the builder for "upsert"-ing +// a bulk of User nodes. +type UserUpsertBulk struct { + create *UserCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserUpsertBulk) UpdateNewValues() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertBulk) Ignore() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertBulk) DoNothing() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreateBulk.OnConflict +// documentation for more info. +func (u *UserUpsertBulk) Update(set func(*UserUpsert)) *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertBulk) SetUpdatedAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUpdatedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserUpsertBulk) SetDeletedAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateDeletedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserUpsertBulk) ClearDeletedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearDeletedAt() + }) +} + +// SetEmail sets the "email" field. +func (u *UserUpsertBulk) SetEmail(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetEmail(v) + }) +} + +// UpdateEmail sets the "email" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateEmail() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateEmail() + }) +} + +// SetPasswordHash sets the "password_hash" field. +func (u *UserUpsertBulk) SetPasswordHash(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "password_hash" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePasswordHash() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// SetRole sets the "role" field. +func (u *UserUpsertBulk) SetRole(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetRole(v) + }) +} + +// UpdateRole sets the "role" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateRole() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateRole() + }) +} + +// SetBalance sets the "balance" field. +func (u *UserUpsertBulk) SetBalance(v float64) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetBalance(v) + }) +} + +// AddBalance adds v to the "balance" field. +func (u *UserUpsertBulk) AddBalance(v float64) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.AddBalance(v) + }) +} + +// UpdateBalance sets the "balance" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateBalance() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateBalance() + }) +} + +// SetConcurrency sets the "concurrency" field. +func (u *UserUpsertBulk) SetConcurrency(v int) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetConcurrency(v) + }) +} + +// AddConcurrency adds v to the "concurrency" field. +func (u *UserUpsertBulk) AddConcurrency(v int) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.AddConcurrency(v) + }) +} + +// UpdateConcurrency sets the "concurrency" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateConcurrency() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateConcurrency() + }) +} + +// SetStatus sets the "status" field. +func (u *UserUpsertBulk) SetStatus(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateStatus() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateStatus() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertBulk) SetUsername(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUsername() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserUpsertBulk) SetNotes(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateNotes() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateNotes() + }) +} + +// Exec executes the query. +func (u *UserUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_delete.go b/backend/ent/user_delete.go new file mode 100644 index 00000000..002ef1cf --- /dev/null +++ b/backend/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (_d *UserDelete) Where(ps ...predicate.User) *UserDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + _d *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (_d *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go new file mode 100644 index 00000000..e66e2dc8 --- /dev/null +++ b/backend/ent/user_query.go @@ -0,0 +1,1273 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withAPIKeys *APIKeyQuery + withRedeemCodes *RedeemCodeQuery + withSubscriptions *UserSubscriptionQuery + withAssignedSubscriptions *UserSubscriptionQuery + withAllowedGroups *GroupQuery + withUsageLogs *UsageLogQuery + withAttributeValues *UserAttributeValueQuery + withPromoCodeUsages *PromoCodeUsageQuery + withUserAllowedGroups *UserAllowedGroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (_q *UserQuery) Where(ps ...predicate.User) *UserQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserQuery) Limit(limit int) *UserQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserQuery) Offset(offset int) *UserQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserQuery) Unique(unique bool) *UserQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserQuery) Order(o ...user.OrderOption) *UserQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryAPIKeys chains the current query on the "api_keys" edge. +func (_q *UserQuery) QueryAPIKeys() *APIKeyQuery { + query := (&APIKeyClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(apikey.Table, apikey.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.APIKeysTable, user.APIKeysColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryRedeemCodes chains the current query on the "redeem_codes" edge. +func (_q *UserQuery) QueryRedeemCodes() *RedeemCodeQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(redeemcode.Table, redeemcode.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.RedeemCodesTable, user.RedeemCodesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QuerySubscriptions chains the current query on the "subscriptions" edge. +func (_q *UserQuery) QuerySubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.SubscriptionsTable, user.SubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAssignedSubscriptions chains the current query on the "assigned_subscriptions" edge. +func (_q *UserQuery) QueryAssignedSubscriptions() *UserSubscriptionQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(usersubscription.Table, usersubscription.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AssignedSubscriptionsTable, user.AssignedSubscriptionsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAllowedGroups chains the current query on the "allowed_groups" edge. +func (_q *UserQuery) QueryAllowedGroups() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, user.AllowedGroupsTable, user.AllowedGroupsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsageLogs chains the current query on the "usage_logs" edge. +func (_q *UserQuery) QueryUsageLogs() *UsageLogQuery { + query := (&UsageLogClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.UsageLogsTable, user.UsageLogsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAttributeValues chains the current query on the "attribute_values" edge. +func (_q *UserQuery) QueryAttributeValues() *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.AttributeValuesTable, user.AttributeValuesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryPromoCodeUsages chains the current query on the "promo_code_usages" edge. +func (_q *UserQuery) QueryPromoCodeUsages() *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge. +func (_q *UserQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(userallowedgroup.Table, userallowedgroup.UserColumn), + sqlgraph.Edge(sqlgraph.O2M, true, user.UserAllowedGroupsTable, user.UserAllowedGroupsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (_q *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserQuery) FirstX(ctx context.Context) *User { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (_q *UserQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (_q *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserQuery) OnlyX(ctx context.Context) *User { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (_q *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (_q *UserQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserQuery) Clone() *UserQuery { + if _q == nil { + return nil + } + return &UserQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]user.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.User{}, _q.predicates...), + withAPIKeys: _q.withAPIKeys.Clone(), + withRedeemCodes: _q.withRedeemCodes.Clone(), + withSubscriptions: _q.withSubscriptions.Clone(), + withAssignedSubscriptions: _q.withAssignedSubscriptions.Clone(), + withAllowedGroups: _q.withAllowedGroups.Clone(), + withUsageLogs: _q.withUsageLogs.Clone(), + withAttributeValues: _q.withAttributeValues.Clone(), + withPromoCodeUsages: _q.withPromoCodeUsages.Clone(), + withUserAllowedGroups: _q.withUserAllowedGroups.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithAPIKeys tells the query-builder to eager-load the nodes that are connected to +// the "api_keys" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAPIKeys(opts ...func(*APIKeyQuery)) *UserQuery { + query := (&APIKeyClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAPIKeys = query + return _q +} + +// WithRedeemCodes tells the query-builder to eager-load the nodes that are connected to +// the "redeem_codes" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithRedeemCodes(opts ...func(*RedeemCodeQuery)) *UserQuery { + query := (&RedeemCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withRedeemCodes = query + return _q +} + +// WithSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithSubscriptions(opts ...func(*UserSubscriptionQuery)) *UserQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withSubscriptions = query + return _q +} + +// WithAssignedSubscriptions tells the query-builder to eager-load the nodes that are connected to +// the "assigned_subscriptions" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAssignedSubscriptions(opts ...func(*UserSubscriptionQuery)) *UserQuery { + query := (&UserSubscriptionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAssignedSubscriptions = query + return _q +} + +// WithAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAllowedGroups(opts ...func(*GroupQuery)) *UserQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAllowedGroups = query + return _q +} + +// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to +// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *UserQuery { + query := (&UsageLogClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageLogs = query + return _q +} + +// WithAttributeValues tells the query-builder to eager-load the nodes that are connected to +// the "attribute_values" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithAttributeValues(opts ...func(*UserAttributeValueQuery)) *UserQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAttributeValues = query + return _q +} + +// WithPromoCodeUsages tells the query-builder to eager-load the nodes that are connected to +// the "promo_code_usages" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithPromoCodeUsages(opts ...func(*PromoCodeUsageQuery)) *UserQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withPromoCodeUsages = query + return _q +} + +// WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to +// the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *UserQuery { + query := (&UserAllowedGroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUserAllowedGroups = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserQuery) Select(fields ...string) *UserSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: _q} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (_q *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + _spec = _q.querySpec() + loadedTypes = [9]bool{ + _q.withAPIKeys != nil, + _q.withRedeemCodes != nil, + _q.withSubscriptions != nil, + _q.withAssignedSubscriptions != nil, + _q.withAllowedGroups != nil, + _q.withUsageLogs != nil, + _q.withAttributeValues != nil, + _q.withPromoCodeUsages != nil, + _q.withUserAllowedGroups != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withAPIKeys; query != nil { + if err := _q.loadAPIKeys(ctx, query, nodes, + func(n *User) { n.Edges.APIKeys = []*APIKey{} }, + func(n *User, e *APIKey) { n.Edges.APIKeys = append(n.Edges.APIKeys, e) }); err != nil { + return nil, err + } + } + if query := _q.withRedeemCodes; query != nil { + if err := _q.loadRedeemCodes(ctx, query, nodes, + func(n *User) { n.Edges.RedeemCodes = []*RedeemCode{} }, + func(n *User, e *RedeemCode) { n.Edges.RedeemCodes = append(n.Edges.RedeemCodes, e) }); err != nil { + return nil, err + } + } + if query := _q.withSubscriptions; query != nil { + if err := _q.loadSubscriptions(ctx, query, nodes, + func(n *User) { n.Edges.Subscriptions = []*UserSubscription{} }, + func(n *User, e *UserSubscription) { n.Edges.Subscriptions = append(n.Edges.Subscriptions, e) }); err != nil { + return nil, err + } + } + if query := _q.withAssignedSubscriptions; query != nil { + if err := _q.loadAssignedSubscriptions(ctx, query, nodes, + func(n *User) { n.Edges.AssignedSubscriptions = []*UserSubscription{} }, + func(n *User, e *UserSubscription) { + n.Edges.AssignedSubscriptions = append(n.Edges.AssignedSubscriptions, e) + }); err != nil { + return nil, err + } + } + if query := _q.withAllowedGroups; query != nil { + if err := _q.loadAllowedGroups(ctx, query, nodes, + func(n *User) { n.Edges.AllowedGroups = []*Group{} }, + func(n *User, e *Group) { n.Edges.AllowedGroups = append(n.Edges.AllowedGroups, e) }); err != nil { + return nil, err + } + } + if query := _q.withUsageLogs; query != nil { + if err := _q.loadUsageLogs(ctx, query, nodes, + func(n *User) { n.Edges.UsageLogs = []*UsageLog{} }, + func(n *User, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil { + return nil, err + } + } + if query := _q.withAttributeValues; query != nil { + if err := _q.loadAttributeValues(ctx, query, nodes, + func(n *User) { n.Edges.AttributeValues = []*UserAttributeValue{} }, + func(n *User, e *UserAttributeValue) { n.Edges.AttributeValues = append(n.Edges.AttributeValues, e) }); err != nil { + return nil, err + } + } + if query := _q.withPromoCodeUsages; query != nil { + if err := _q.loadPromoCodeUsages(ctx, query, nodes, + func(n *User) { n.Edges.PromoCodeUsages = []*PromoCodeUsage{} }, + func(n *User, e *PromoCodeUsage) { n.Edges.PromoCodeUsages = append(n.Edges.PromoCodeUsages, e) }); err != nil { + return nil, err + } + } + if query := _q.withUserAllowedGroups; query != nil { + if err := _q.loadUserAllowedGroups(ctx, query, nodes, + func(n *User) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} }, + func(n *User, e *UserAllowedGroup) { n.Edges.UserAllowedGroups = append(n.Edges.UserAllowedGroups, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserQuery) loadAPIKeys(ctx context.Context, query *APIKeyQuery, nodes []*User, init func(*User), assign func(*User, *APIKey)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(apikey.FieldUserID) + } + query.Where(predicate.APIKey(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.APIKeysColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadRedeemCodes(ctx context.Context, query *RedeemCodeQuery, nodes []*User, init func(*User), assign func(*User, *RedeemCode)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(redeemcode.FieldUsedBy) + } + query.Where(predicate.RedeemCode(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.RedeemCodesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UsedBy + if fk == nil { + return fmt.Errorf(`foreign-key "used_by" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "used_by" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*User, init func(*User), assign func(*User, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldUserID) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.SubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadAssignedSubscriptions(ctx context.Context, query *UserSubscriptionQuery, nodes []*User, init func(*User), assign func(*User, *UserSubscription)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usersubscription.FieldAssignedBy) + } + query.Where(predicate.UserSubscription(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AssignedSubscriptionsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AssignedBy + if fk == nil { + return fmt.Errorf(`foreign-key "assigned_by" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "assigned_by" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadAllowedGroups(ctx context.Context, query *GroupQuery, nodes []*User, init func(*User), assign func(*User, *Group)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[int64]*User) + nids := make(map[int64]map[*User]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(user.AllowedGroupsTable) + s.Join(joinT).On(s.C(group.FieldID), joinT.C(user.AllowedGroupsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(user.AllowedGroupsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(user.AllowedGroupsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(sql.NullInt64)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := values[0].(*sql.NullInt64).Int64 + inValue := values[1].(*sql.NullInt64).Int64 + if nids[inValue] == nil { + nids[inValue] = map[*User]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "allowed_groups" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (_q *UserQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*User, init func(*User), assign func(*User, *UsageLog)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usagelog.FieldUserID) + } + query.Where(predicate.UsageLog(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.UsageLogsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadAttributeValues(ctx context.Context, query *UserAttributeValueQuery, nodes []*User, init func(*User), assign func(*User, *UserAttributeValue)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userattributevalue.FieldUserID) + } + query.Where(predicate.UserAttributeValue(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AttributeValuesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadPromoCodeUsages(ctx context.Context, query *PromoCodeUsageQuery, nodes []*User, init func(*User), assign func(*User, *PromoCodeUsage)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(promocodeusage.FieldUserID) + } + query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.PromoCodeUsagesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} +func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*User, init func(*User), assign func(*User, *UserAllowedGroup)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userallowedgroup.FieldUserID) + } + query.Where(predicate.UserAllowedGroup(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.UserAllowedGroupsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n) + } + assign(node, n) + } + return nil +} + +func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, _s.UserQuery, _s, _s.inters, v) +} + +func (_s *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go new file mode 100644 index 00000000..cf189fea --- /dev/null +++ b/backend/ent/user_update.go @@ -0,0 +1,1981 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (_u *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserUpdate) SetUpdatedAt(v time.Time) *UserUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserUpdate) SetDeletedAt(v time.Time) *UserUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserUpdate) SetNillableDeletedAt(v *time.Time) *UserUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserUpdate) ClearDeletedAt() *UserUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetEmail sets the "email" field. +func (_u *UserUpdate) SetEmail(v string) *UserUpdate { + _u.mutation.SetEmail(v) + return _u +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (_u *UserUpdate) SetNillableEmail(v *string) *UserUpdate { + if v != nil { + _u.SetEmail(*v) + } + return _u +} + +// SetPasswordHash sets the "password_hash" field. +func (_u *UserUpdate) SetPasswordHash(v string) *UserUpdate { + _u.mutation.SetPasswordHash(v) + return _u +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (_u *UserUpdate) SetNillablePasswordHash(v *string) *UserUpdate { + if v != nil { + _u.SetPasswordHash(*v) + } + return _u +} + +// SetRole sets the "role" field. +func (_u *UserUpdate) SetRole(v string) *UserUpdate { + _u.mutation.SetRole(v) + return _u +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_u *UserUpdate) SetNillableRole(v *string) *UserUpdate { + if v != nil { + _u.SetRole(*v) + } + return _u +} + +// SetBalance sets the "balance" field. +func (_u *UserUpdate) SetBalance(v float64) *UserUpdate { + _u.mutation.ResetBalance() + _u.mutation.SetBalance(v) + return _u +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_u *UserUpdate) SetNillableBalance(v *float64) *UserUpdate { + if v != nil { + _u.SetBalance(*v) + } + return _u +} + +// AddBalance adds value to the "balance" field. +func (_u *UserUpdate) AddBalance(v float64) *UserUpdate { + _u.mutation.AddBalance(v) + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *UserUpdate) SetConcurrency(v int) *UserUpdate { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *UserUpdate) SetNillableConcurrency(v *int) *UserUpdate { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *UserUpdate) AddConcurrency(v int) *UserUpdate { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserUpdate) SetStatus(v string) *UserUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserUpdate) SetNillableStatus(v *string) *UserUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsername sets the "username" field. +func (_u *UserUpdate) SetUsername(v string) *UserUpdate { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *UserUpdate) SetNillableUsername(v *string) *UserUpdate { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserUpdate) SetNotes(v string) *UserUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserUpdate) SetNillableNotes(v *string) *UserUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_u *UserUpdate) AddAPIKeyIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_u *UserUpdate) AddAPIKeys(v ...*APIKey) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *UserUpdate) AddRedeemCodeIDs(ids ...int64) *UserUpdate { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdate) AddRedeemCodes(v ...*RedeemCode) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdate) AddSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) AddSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdate) AddAssignedSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAssignedSubscriptionIDs(ids...) + return _u +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_u *UserUpdate) AddAllowedGroupIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAllowedGroupIDs(ids...) + return _u +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_u *UserUpdate) AddAllowedGroups(v ...*Group) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedGroupIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *UserUpdate) AddUsageLogIDs(ids ...int64) *UserUpdate { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *UserUpdate) AddUsageLogs(v ...*UsageLog) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// AddAttributeValueIDs adds the "attribute_values" edge to the UserAttributeValue entity by IDs. +func (_u *UserUpdate) AddAttributeValueIDs(ids ...int64) *UserUpdate { + _u.mutation.AddAttributeValueIDs(ids...) + return _u +} + +// AddAttributeValues adds the "attribute_values" edges to the UserAttributeValue entity. +func (_u *UserUpdate) AddAttributeValues(v ...*UserAttributeValue) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAttributeValueIDs(ids...) +} + +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_u *UserUpdate) AddPromoCodeUsageIDs(ids ...int64) *UserUpdate { + _u.mutation.AddPromoCodeUsageIDs(ids...) + return _u +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddPromoCodeUsageIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_u *UserUpdate) Mutation() *UserMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the APIKey entity. +func (_u *UserUpdate) ClearAPIKeys() *UserUpdate { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to APIKey entities by IDs. +func (_u *UserUpdate) RemoveAPIKeyIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to APIKey entities. +func (_u *UserUpdate) RemoveAPIKeys(v ...*APIKey) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdate) ClearRedeemCodes() *UserUpdate { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *UserUpdate) RemoveRedeemCodeIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *UserUpdate) RemoveRedeemCodes(v ...*RedeemCode) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) ClearSubscriptions() *UserUpdate { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdate) RemoveSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *UserUpdate) RemoveSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAssignedSubscriptions clears all "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdate) ClearAssignedSubscriptions() *UserUpdate { + _u.mutation.ClearAssignedSubscriptions() + return _u +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdate) RemoveAssignedSubscriptionIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAssignedSubscriptionIDs(ids...) + return _u +} + +// RemoveAssignedSubscriptions removes "assigned_subscriptions" edges to UserSubscription entities. +func (_u *UserUpdate) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAssignedSubscriptionIDs(ids...) +} + +// ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. +func (_u *UserUpdate) ClearAllowedGroups() *UserUpdate { + _u.mutation.ClearAllowedGroups() + return _u +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to Group entities by IDs. +func (_u *UserUpdate) RemoveAllowedGroupIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAllowedGroupIDs(ids...) + return _u +} + +// RemoveAllowedGroups removes "allowed_groups" edges to Group entities. +func (_u *UserUpdate) RemoveAllowedGroups(v ...*Group) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedGroupIDs(ids...) +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *UserUpdate) ClearUsageLogs() *UserUpdate { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *UserUpdate) RemoveUsageLogIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *UserUpdate) RemoveUsageLogs(v ...*UsageLog) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// ClearAttributeValues clears all "attribute_values" edges to the UserAttributeValue entity. +func (_u *UserUpdate) ClearAttributeValues() *UserUpdate { + _u.mutation.ClearAttributeValues() + return _u +} + +// RemoveAttributeValueIDs removes the "attribute_values" edge to UserAttributeValue entities by IDs. +func (_u *UserUpdate) RemoveAttributeValueIDs(ids ...int64) *UserUpdate { + _u.mutation.RemoveAttributeValueIDs(ids...) + return _u +} + +// RemoveAttributeValues removes "attribute_values" edges to UserAttributeValue entities. +func (_u *UserUpdate) RemoveAttributeValues(v ...*UserAttributeValue) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAttributeValueIDs(ids...) +} + +// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdate) ClearPromoCodeUsages() *UserUpdate { + _u.mutation.ClearPromoCodeUsages() + return _u +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs. +func (_u *UserUpdate) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdate { + _u.mutation.RemovePromoCodeUsageIDs(ids...) + return _u +} + +// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities. +func (_u *UserUpdate) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemovePromoCodeUsageIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if user.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserUpdate) check() error { + if v, ok := _u.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := _u.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := _u.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + return nil +} + +func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(user.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := _u.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := _u.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + } + if value, ok := _u.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBalance(); ok { + _spec.AddField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAssignedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedGroupsIDs(); len(nodes) > 0 && !_u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AttributeValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAttributeValuesIDs(); len(nodes) > 0 && !_u.mutation.AttributeValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AttributeValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserUpdateOne) SetUpdatedAt(v time.Time) *UserUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserUpdateOne) SetDeletedAt(v time.Time) *UserUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableDeletedAt(v *time.Time) *UserUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserUpdateOne) ClearDeletedAt() *UserUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetEmail sets the "email" field. +func (_u *UserUpdateOne) SetEmail(v string) *UserUpdateOne { + _u.mutation.SetEmail(v) + return _u +} + +// SetNillableEmail sets the "email" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableEmail(v *string) *UserUpdateOne { + if v != nil { + _u.SetEmail(*v) + } + return _u +} + +// SetPasswordHash sets the "password_hash" field. +func (_u *UserUpdateOne) SetPasswordHash(v string) *UserUpdateOne { + _u.mutation.SetPasswordHash(v) + return _u +} + +// SetNillablePasswordHash sets the "password_hash" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillablePasswordHash(v *string) *UserUpdateOne { + if v != nil { + _u.SetPasswordHash(*v) + } + return _u +} + +// SetRole sets the "role" field. +func (_u *UserUpdateOne) SetRole(v string) *UserUpdateOne { + _u.mutation.SetRole(v) + return _u +} + +// SetNillableRole sets the "role" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableRole(v *string) *UserUpdateOne { + if v != nil { + _u.SetRole(*v) + } + return _u +} + +// SetBalance sets the "balance" field. +func (_u *UserUpdateOne) SetBalance(v float64) *UserUpdateOne { + _u.mutation.ResetBalance() + _u.mutation.SetBalance(v) + return _u +} + +// SetNillableBalance sets the "balance" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableBalance(v *float64) *UserUpdateOne { + if v != nil { + _u.SetBalance(*v) + } + return _u +} + +// AddBalance adds value to the "balance" field. +func (_u *UserUpdateOne) AddBalance(v float64) *UserUpdateOne { + _u.mutation.AddBalance(v) + return _u +} + +// SetConcurrency sets the "concurrency" field. +func (_u *UserUpdateOne) SetConcurrency(v int) *UserUpdateOne { + _u.mutation.ResetConcurrency() + _u.mutation.SetConcurrency(v) + return _u +} + +// SetNillableConcurrency sets the "concurrency" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableConcurrency(v *int) *UserUpdateOne { + if v != nil { + _u.SetConcurrency(*v) + } + return _u +} + +// AddConcurrency adds value to the "concurrency" field. +func (_u *UserUpdateOne) AddConcurrency(v int) *UserUpdateOne { + _u.mutation.AddConcurrency(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserUpdateOne) SetStatus(v string) *UserUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableStatus(v *string) *UserUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetUsername sets the "username" field. +func (_u *UserUpdateOne) SetUsername(v string) *UserUpdateOne { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableUsername(v *string) *UserUpdateOne { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserUpdateOne) SetNotes(v string) *UserUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserUpdateOne) SetNillableNotes(v *string) *UserUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. +func (_u *UserUpdateOne) AddAPIKeyIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAPIKeyIDs(ids...) + return _u +} + +// AddAPIKeys adds the "api_keys" edges to the APIKey entity. +func (_u *UserUpdateOne) AddAPIKeys(v ...*APIKey) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAPIKeyIDs(ids...) +} + +// AddRedeemCodeIDs adds the "redeem_codes" edge to the RedeemCode entity by IDs. +func (_u *UserUpdateOne) AddRedeemCodeIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddRedeemCodeIDs(ids...) + return _u +} + +// AddRedeemCodes adds the "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdateOne) AddRedeemCodes(v ...*RedeemCode) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddRedeemCodeIDs(ids...) +} + +// AddSubscriptionIDs adds the "subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdateOne) AddSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddSubscriptionIDs(ids...) + return _u +} + +// AddSubscriptions adds the "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) AddSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddSubscriptionIDs(ids...) +} + +// AddAssignedSubscriptionIDs adds the "assigned_subscriptions" edge to the UserSubscription entity by IDs. +func (_u *UserUpdateOne) AddAssignedSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAssignedSubscriptionIDs(ids...) + return _u +} + +// AddAssignedSubscriptions adds the "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) AddAssignedSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAssignedSubscriptionIDs(ids...) +} + +// AddAllowedGroupIDs adds the "allowed_groups" edge to the Group entity by IDs. +func (_u *UserUpdateOne) AddAllowedGroupIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAllowedGroupIDs(ids...) + return _u +} + +// AddAllowedGroups adds the "allowed_groups" edges to the Group entity. +func (_u *UserUpdateOne) AddAllowedGroups(v ...*Group) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAllowedGroupIDs(ids...) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *UserUpdateOne) AddUsageLogIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *UserUpdateOne) AddUsageLogs(v ...*UsageLog) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// AddAttributeValueIDs adds the "attribute_values" edge to the UserAttributeValue entity by IDs. +func (_u *UserUpdateOne) AddAttributeValueIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddAttributeValueIDs(ids...) + return _u +} + +// AddAttributeValues adds the "attribute_values" edges to the UserAttributeValue entity. +func (_u *UserUpdateOne) AddAttributeValues(v ...*UserAttributeValue) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddAttributeValueIDs(ids...) +} + +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_u *UserUpdateOne) AddPromoCodeUsageIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddPromoCodeUsageIDs(ids...) + return _u +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdateOne) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddPromoCodeUsageIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (_u *UserUpdateOne) Mutation() *UserMutation { + return _u.mutation +} + +// ClearAPIKeys clears all "api_keys" edges to the APIKey entity. +func (_u *UserUpdateOne) ClearAPIKeys() *UserUpdateOne { + _u.mutation.ClearAPIKeys() + return _u +} + +// RemoveAPIKeyIDs removes the "api_keys" edge to APIKey entities by IDs. +func (_u *UserUpdateOne) RemoveAPIKeyIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAPIKeyIDs(ids...) + return _u +} + +// RemoveAPIKeys removes "api_keys" edges to APIKey entities. +func (_u *UserUpdateOne) RemoveAPIKeys(v ...*APIKey) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAPIKeyIDs(ids...) +} + +// ClearRedeemCodes clears all "redeem_codes" edges to the RedeemCode entity. +func (_u *UserUpdateOne) ClearRedeemCodes() *UserUpdateOne { + _u.mutation.ClearRedeemCodes() + return _u +} + +// RemoveRedeemCodeIDs removes the "redeem_codes" edge to RedeemCode entities by IDs. +func (_u *UserUpdateOne) RemoveRedeemCodeIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveRedeemCodeIDs(ids...) + return _u +} + +// RemoveRedeemCodes removes "redeem_codes" edges to RedeemCode entities. +func (_u *UserUpdateOne) RemoveRedeemCodes(v ...*RedeemCode) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveRedeemCodeIDs(ids...) +} + +// ClearSubscriptions clears all "subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) ClearSubscriptions() *UserUpdateOne { + _u.mutation.ClearSubscriptions() + return _u +} + +// RemoveSubscriptionIDs removes the "subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdateOne) RemoveSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveSubscriptionIDs(ids...) + return _u +} + +// RemoveSubscriptions removes "subscriptions" edges to UserSubscription entities. +func (_u *UserUpdateOne) RemoveSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveSubscriptionIDs(ids...) +} + +// ClearAssignedSubscriptions clears all "assigned_subscriptions" edges to the UserSubscription entity. +func (_u *UserUpdateOne) ClearAssignedSubscriptions() *UserUpdateOne { + _u.mutation.ClearAssignedSubscriptions() + return _u +} + +// RemoveAssignedSubscriptionIDs removes the "assigned_subscriptions" edge to UserSubscription entities by IDs. +func (_u *UserUpdateOne) RemoveAssignedSubscriptionIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAssignedSubscriptionIDs(ids...) + return _u +} + +// RemoveAssignedSubscriptions removes "assigned_subscriptions" edges to UserSubscription entities. +func (_u *UserUpdateOne) RemoveAssignedSubscriptions(v ...*UserSubscription) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAssignedSubscriptionIDs(ids...) +} + +// ClearAllowedGroups clears all "allowed_groups" edges to the Group entity. +func (_u *UserUpdateOne) ClearAllowedGroups() *UserUpdateOne { + _u.mutation.ClearAllowedGroups() + return _u +} + +// RemoveAllowedGroupIDs removes the "allowed_groups" edge to Group entities by IDs. +func (_u *UserUpdateOne) RemoveAllowedGroupIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAllowedGroupIDs(ids...) + return _u +} + +// RemoveAllowedGroups removes "allowed_groups" edges to Group entities. +func (_u *UserUpdateOne) RemoveAllowedGroups(v ...*Group) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAllowedGroupIDs(ids...) +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *UserUpdateOne) ClearUsageLogs() *UserUpdateOne { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *UserUpdateOne) RemoveUsageLogIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *UserUpdateOne) RemoveUsageLogs(v ...*UsageLog) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// ClearAttributeValues clears all "attribute_values" edges to the UserAttributeValue entity. +func (_u *UserUpdateOne) ClearAttributeValues() *UserUpdateOne { + _u.mutation.ClearAttributeValues() + return _u +} + +// RemoveAttributeValueIDs removes the "attribute_values" edge to UserAttributeValue entities by IDs. +func (_u *UserUpdateOne) RemoveAttributeValueIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemoveAttributeValueIDs(ids...) + return _u +} + +// RemoveAttributeValues removes "attribute_values" edges to UserAttributeValue entities. +func (_u *UserUpdateOne) RemoveAttributeValues(v ...*UserAttributeValue) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveAttributeValueIDs(ids...) +} + +// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdateOne) ClearPromoCodeUsages() *UserUpdateOne { + _u.mutation.ClearPromoCodeUsages() + return _u +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs. +func (_u *UserUpdateOne) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemovePromoCodeUsageIDs(ids...) + return _u +} + +// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities. +func (_u *UserUpdateOne) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemovePromoCodeUsageIDs(ids...) +} + +// Where appends a list predicates to the UserUpdate builder. +func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated User entity. +func (_u *UserUpdateOne) Save(ctx context.Context) (*User, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if user.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized user.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := user.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserUpdateOne) check() error { + if v, ok := _u.mutation.Email(); ok { + if err := user.EmailValidator(v); err != nil { + return &ValidationError{Name: "email", err: fmt.Errorf(`ent: validator failed for field "User.email": %w`, err)} + } + } + if v, ok := _u.mutation.PasswordHash(); ok { + if err := user.PasswordHashValidator(v); err != nil { + return &ValidationError{Name: "password_hash", err: fmt.Errorf(`ent: validator failed for field "User.password_hash": %w`, err)} + } + } + if v, ok := _u.mutation.Role(); ok { + if err := user.RoleValidator(v); err != nil { + return &ValidationError{Name: "role", err: fmt.Errorf(`ent: validator failed for field "User.role": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := user.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "User.status": %w`, err)} + } + } + if v, ok := _u.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + return nil +} + +func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(user.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(user.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Email(); ok { + _spec.SetField(user.FieldEmail, field.TypeString, value) + } + if value, ok := _u.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeString, value) + } + if value, ok := _u.mutation.Role(); ok { + _spec.SetField(user.FieldRole, field.TypeString, value) + } + if value, ok := _u.mutation.Balance(); ok { + _spec.SetField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBalance(); ok { + _spec.AddField(user.FieldBalance, field.TypeFloat64, value) + } + if value, ok := _u.mutation.Concurrency(); ok { + _spec.SetField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedConcurrency(); ok { + _spec.AddField(user.FieldConcurrency, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(user.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(user.FieldNotes, field.TypeString, value) + } + if _u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAPIKeysIDs(); len(nodes) > 0 && !_u.mutation.APIKeysCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.APIKeysIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.APIKeysTable, + Columns: []string{user.APIKeysColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedRedeemCodesIDs(); len(nodes) > 0 && !_u.mutation.RedeemCodesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RedeemCodesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.RedeemCodesTable, + Columns: []string{user.RedeemCodesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.SubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.SubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.SubscriptionsTable, + Columns: []string{user.SubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAssignedSubscriptionsIDs(); len(nodes) > 0 && !_u.mutation.AssignedSubscriptionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedSubscriptionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AssignedSubscriptionsTable, + Columns: []string{user.AssignedSubscriptionsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAllowedGroupsIDs(); len(nodes) > 0 && !_u.mutation.AllowedGroupsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AllowedGroupsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: user.AllowedGroupsTable, + Columns: user.AllowedGroupsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + createE := &UserAllowedGroupCreate{config: _u.config, mutation: newUserAllowedGroupMutation(_u.config, OpCreate)} + createE.defaults() + _, specE := createE.createSpec() + edge.Target.Fields = specE.Fields + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.UsageLogsTable, + Columns: []string{user.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AttributeValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedAttributeValuesIDs(); len(nodes) > 0 && !_u.mutation.AttributeValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AttributeValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.AttributeValuesTable, + Columns: []string{user.AttributeValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &User{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/userallowedgroup.go b/backend/ent/userallowedgroup.go new file mode 100644 index 00000000..93cbd374 --- /dev/null +++ b/backend/ent/userallowedgroup.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroup is the model entity for the UserAllowedGroup schema. +type UserAllowedGroup struct { + config `json:"-"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAllowedGroupQuery when eager-loading is set. + Edges UserAllowedGroupEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAllowedGroupEdges holds the relations/edges for other nodes in the graph. +type UserAllowedGroupEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAllowedGroupEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAllowedGroupEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAllowedGroup) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userallowedgroup.FieldUserID, userallowedgroup.FieldGroupID: + values[i] = new(sql.NullInt64) + case userallowedgroup.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAllowedGroup fields. +func (_m *UserAllowedGroup) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userallowedgroup.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case userallowedgroup.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case userallowedgroup.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserAllowedGroup. +// This includes values selected through modifiers, order, etc. +func (_m *UserAllowedGroup) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserAllowedGroup entity. +func (_m *UserAllowedGroup) QueryUser() *UserQuery { + return NewUserAllowedGroupClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the UserAllowedGroup entity. +func (_m *UserAllowedGroup) QueryGroup() *GroupQuery { + return NewUserAllowedGroupClient(_m.config).QueryGroup(_m) +} + +// Update returns a builder for updating this UserAllowedGroup. +// Note that you need to call UserAllowedGroup.Unwrap() before calling this method if this UserAllowedGroup +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAllowedGroup) Update() *UserAllowedGroupUpdateOne { + return NewUserAllowedGroupClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAllowedGroup entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAllowedGroup) Unwrap() *UserAllowedGroup { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAllowedGroup is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAllowedGroup) String() string { + var builder strings.Builder + builder.WriteString("UserAllowedGroup(") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// UserAllowedGroups is a parsable slice of UserAllowedGroup. +type UserAllowedGroups []*UserAllowedGroup diff --git a/backend/ent/userallowedgroup/userallowedgroup.go b/backend/ent/userallowedgroup/userallowedgroup.go new file mode 100644 index 00000000..56d604c8 --- /dev/null +++ b/backend/ent/userallowedgroup/userallowedgroup.go @@ -0,0 +1,113 @@ +// Code generated by ent, DO NOT EDIT. + +package userallowedgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userallowedgroup type in the database. + Label = "user_allowed_group" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // UserFieldID holds the string denoting the ID field of the User. + UserFieldID = "id" + // GroupFieldID holds the string denoting the ID field of the Group. + GroupFieldID = "id" + // Table holds the table name of the userallowedgroup in the database. + Table = "user_allowed_groups" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_allowed_groups" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "user_allowed_groups" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" +) + +// Columns holds all SQL columns for userallowedgroup fields. +var Columns = []string{ + FieldUserID, + FieldGroupID, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// OrderOption defines the ordering options for the UserAllowedGroup queries. +type OrderOption func(*sql.Selector) + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, UserColumn), + sqlgraph.To(UserInverseTable, UserFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.To(GroupInverseTable, GroupFieldID), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) +} diff --git a/backend/ent/userallowedgroup/where.go b/backend/ent/userallowedgroup/where.go new file mode 100644 index 00000000..0951201b --- /dev/null +++ b/backend/ent/userallowedgroup/where.go @@ -0,0 +1,167 @@ +// Code generated by ent, DO NOT EDIT. + +package userallowedgroup + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldUserID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldUserID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, UserColumn), + sqlgraph.Edge(sqlgraph.M2O, false, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, GroupColumn), + sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAllowedGroup) predicate.UserAllowedGroup { + return predicate.UserAllowedGroup(sql.NotPredicates(p)) +} diff --git a/backend/ent/userallowedgroup_create.go b/backend/ent/userallowedgroup_create.go new file mode 100644 index 00000000..2b04a757 --- /dev/null +++ b/backend/ent/userallowedgroup_create.go @@ -0,0 +1,568 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupCreate is the builder for creating a UserAllowedGroup entity. +type UserAllowedGroupCreate struct { + config + mutation *UserAllowedGroupMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetUserID sets the "user_id" field. +func (_c *UserAllowedGroupCreate) SetUserID(v int64) *UserAllowedGroupCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *UserAllowedGroupCreate) SetGroupID(v int64) *UserAllowedGroupCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAllowedGroupCreate) SetCreatedAt(v time.Time) *UserAllowedGroupCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAllowedGroupCreate) SetNillableCreatedAt(v *time.Time) *UserAllowedGroupCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserAllowedGroupCreate) SetUser(v *User) *UserAllowedGroupCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *UserAllowedGroupCreate) SetGroup(v *Group) *UserAllowedGroupCreate { + return _c.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_c *UserAllowedGroupCreate) Mutation() *UserAllowedGroupMutation { + return _c.mutation +} + +// Save creates the UserAllowedGroup in the database. +func (_c *UserAllowedGroupCreate) Save(ctx context.Context) (*UserAllowedGroup, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAllowedGroupCreate) SaveX(ctx context.Context) *UserAllowedGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAllowedGroupCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAllowedGroupCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAllowedGroupCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := userallowedgroup.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAllowedGroupCreate) check() error { + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserAllowedGroup.user_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "UserAllowedGroup.group_id"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAllowedGroup.created_at"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserAllowedGroup.user"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "UserAllowedGroup.group"`)} + } + return nil +} + +func (_c *UserAllowedGroupCreate) sqlSave(ctx context.Context) (*UserAllowedGroup, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + return _node, nil +} + +func (_c *UserAllowedGroupCreate) createSpec() (*UserAllowedGroup, *sqlgraph.CreateSpec) { + var ( + _node = &UserAllowedGroup{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userallowedgroup.Table, nil) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userallowedgroup.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAllowedGroup.Create(). +// SetUserID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAllowedGroupUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UserAllowedGroupCreate) OnConflict(opts ...sql.ConflictOption) *UserAllowedGroupUpsertOne { + _c.conflict = opts + return &UserAllowedGroupUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAllowedGroupCreate) OnConflictColumns(columns ...string) *UserAllowedGroupUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAllowedGroupUpsertOne{ + create: _c, + } +} + +type ( + // UserAllowedGroupUpsertOne is the builder for "upsert"-ing + // one UserAllowedGroup node. + UserAllowedGroupUpsertOne struct { + create *UserAllowedGroupCreate + } + + // UserAllowedGroupUpsert is the "OnConflict" setter. + UserAllowedGroupUpsert struct { + *sql.UpdateSet + } +) + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsert) SetUserID(v int64) *UserAllowedGroupUpsert { + u.Set(userallowedgroup.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsert) UpdateUserID() *UserAllowedGroupUpsert { + u.SetExcluded(userallowedgroup.FieldUserID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsert) SetGroupID(v int64) *UserAllowedGroupUpsert { + u.Set(userallowedgroup.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsert) UpdateGroupID() *UserAllowedGroupUpsert { + u.SetExcluded(userallowedgroup.FieldGroupID) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAllowedGroupUpsertOne) UpdateNewValues() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userallowedgroup.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAllowedGroupUpsertOne) Ignore() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAllowedGroupUpsertOne) DoNothing() *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAllowedGroupCreate.OnConflict +// documentation for more info. +func (u *UserAllowedGroupUpsertOne) Update(set func(*UserAllowedGroupUpsert)) *UserAllowedGroupUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAllowedGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsertOne) SetUserID(v int64) *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertOne) UpdateUserID() *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsertOne) SetGroupID(v int64) *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertOne) UpdateGroupID() *UserAllowedGroupUpsertOne { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateGroupID() + }) +} + +// Exec executes the query. +func (u *UserAllowedGroupUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAllowedGroupCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAllowedGroupUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// UserAllowedGroupCreateBulk is the builder for creating many UserAllowedGroup entities in bulk. +type UserAllowedGroupCreateBulk struct { + config + err error + builders []*UserAllowedGroupCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAllowedGroup entities in the database. +func (_c *UserAllowedGroupCreateBulk) Save(ctx context.Context) ([]*UserAllowedGroup, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAllowedGroup, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAllowedGroupMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAllowedGroupCreateBulk) SaveX(ctx context.Context) []*UserAllowedGroup { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAllowedGroupCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAllowedGroupCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAllowedGroup.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAllowedGroupUpsert) { +// SetUserID(v+v). +// }). +// Exec(ctx) +func (_c *UserAllowedGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAllowedGroupUpsertBulk { + _c.conflict = opts + return &UserAllowedGroupUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAllowedGroupCreateBulk) OnConflictColumns(columns ...string) *UserAllowedGroupUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAllowedGroupUpsertBulk{ + create: _c, + } +} + +// UserAllowedGroupUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAllowedGroup nodes. +type UserAllowedGroupUpsertBulk struct { + create *UserAllowedGroupCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAllowedGroupUpsertBulk) UpdateNewValues() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userallowedgroup.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAllowedGroup.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAllowedGroupUpsertBulk) Ignore() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAllowedGroupUpsertBulk) DoNothing() *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAllowedGroupCreateBulk.OnConflict +// documentation for more info. +func (u *UserAllowedGroupUpsertBulk) Update(set func(*UserAllowedGroupUpsert)) *UserAllowedGroupUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAllowedGroupUpsert{UpdateSet: update}) + })) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAllowedGroupUpsertBulk) SetUserID(v int64) *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertBulk) UpdateUserID() *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserAllowedGroupUpsertBulk) SetGroupID(v int64) *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserAllowedGroupUpsertBulk) UpdateGroupID() *UserAllowedGroupUpsertBulk { + return u.Update(func(s *UserAllowedGroupUpsert) { + s.UpdateGroupID() + }) +} + +// Exec executes the query. +func (u *UserAllowedGroupUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAllowedGroupCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAllowedGroupCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAllowedGroupUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userallowedgroup_delete.go b/backend/ent/userallowedgroup_delete.go new file mode 100644 index 00000000..e366ea97 --- /dev/null +++ b/backend/ent/userallowedgroup_delete.go @@ -0,0 +1,87 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupDelete is the builder for deleting a UserAllowedGroup entity. +type UserAllowedGroupDelete struct { + config + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// Where appends a list predicates to the UserAllowedGroupDelete builder. +func (_d *UserAllowedGroupDelete) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAllowedGroupDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAllowedGroupDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAllowedGroupDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userallowedgroup.Table, nil) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAllowedGroupDeleteOne is the builder for deleting a single UserAllowedGroup entity. +type UserAllowedGroupDeleteOne struct { + _d *UserAllowedGroupDelete +} + +// Where appends a list predicates to the UserAllowedGroupDelete builder. +func (_d *UserAllowedGroupDeleteOne) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAllowedGroupDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userallowedgroup.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAllowedGroupDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userallowedgroup_query.go b/backend/ent/userallowedgroup_query.go new file mode 100644 index 00000000..527ddc77 --- /dev/null +++ b/backend/ent/userallowedgroup_query.go @@ -0,0 +1,640 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupQuery is the builder for querying UserAllowedGroup entities. +type UserAllowedGroupQuery struct { + config + ctx *QueryContext + order []userallowedgroup.OrderOption + inters []Interceptor + predicates []predicate.UserAllowedGroup + withUser *UserQuery + withGroup *GroupQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAllowedGroupQuery builder. +func (_q *UserAllowedGroupQuery) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAllowedGroupQuery) Limit(limit int) *UserAllowedGroupQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAllowedGroupQuery) Offset(offset int) *UserAllowedGroupQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAllowedGroupQuery) Unique(unique bool) *UserAllowedGroupQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAllowedGroupQuery) Order(o ...userallowedgroup.OrderOption) *UserAllowedGroupQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserAllowedGroupQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userallowedgroup.Table, userallowedgroup.UserColumn, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, userallowedgroup.UserTable, userallowedgroup.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *UserAllowedGroupQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userallowedgroup.Table, userallowedgroup.GroupColumn, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, userallowedgroup.GroupTable, userallowedgroup.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAllowedGroup entity from the query. +// Returns a *NotFoundError when no UserAllowedGroup was found. +func (_q *UserAllowedGroupQuery) First(ctx context.Context) (*UserAllowedGroup, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userallowedgroup.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) FirstX(ctx context.Context) *UserAllowedGroup { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// Only returns a single UserAllowedGroup entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAllowedGroup entity is found. +// Returns a *NotFoundError when no UserAllowedGroup entities are found. +func (_q *UserAllowedGroupQuery) Only(ctx context.Context) (*UserAllowedGroup, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userallowedgroup.Label} + default: + return nil, &NotSingularError{userallowedgroup.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) OnlyX(ctx context.Context) *UserAllowedGroup { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// All executes the query and returns a list of UserAllowedGroups. +func (_q *UserAllowedGroupQuery) All(ctx context.Context) ([]*UserAllowedGroup, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAllowedGroup, *UserAllowedGroupQuery]() + return withInterceptors[[]*UserAllowedGroup](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) AllX(ctx context.Context) []*UserAllowedGroup { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// Count returns the count of the given query. +func (_q *UserAllowedGroupQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAllowedGroupQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAllowedGroupQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.First(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAllowedGroupQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAllowedGroupQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAllowedGroupQuery) Clone() *UserAllowedGroupQuery { + if _q == nil { + return nil + } + return &UserAllowedGroupQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userallowedgroup.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAllowedGroup{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAllowedGroupQuery) WithUser(opts ...func(*UserQuery)) *UserAllowedGroupQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAllowedGroupQuery) WithGroup(opts ...func(*GroupQuery)) *UserAllowedGroupQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAllowedGroup.Query(). +// GroupBy(userallowedgroup.FieldUserID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAllowedGroupQuery) GroupBy(field string, fields ...string) *UserAllowedGroupGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAllowedGroupGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userallowedgroup.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// UserID int64 `json:"user_id,omitempty"` +// } +// +// client.UserAllowedGroup.Query(). +// Select(userallowedgroup.FieldUserID). +// Scan(ctx, &v) +func (_q *UserAllowedGroupQuery) Select(fields ...string) *UserAllowedGroupSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAllowedGroupSelect{UserAllowedGroupQuery: _q} + sbuild.label = userallowedgroup.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAllowedGroupSelect configured with the given aggregations. +func (_q *UserAllowedGroupQuery) Aggregate(fns ...AggregateFunc) *UserAllowedGroupSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAllowedGroupQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userallowedgroup.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAllowedGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAllowedGroup, error) { + var ( + nodes = []*UserAllowedGroup{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withGroup != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAllowedGroup).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAllowedGroup{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserAllowedGroup, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *UserAllowedGroup, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAllowedGroupQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserAllowedGroup, init func(*UserAllowedGroup), assign func(*UserAllowedGroup, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAllowedGroup) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserAllowedGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*UserAllowedGroup, init func(*UserAllowedGroup), assign func(*UserAllowedGroup, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAllowedGroup) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UserAllowedGroupQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Unique = false + _spec.Node.Columns = nil + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAllowedGroupQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userallowedgroup.Table, userallowedgroup.Columns, nil) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + for i := range fields { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(userallowedgroup.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(userallowedgroup.FieldGroupID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userallowedgroup.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userallowedgroup.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAllowedGroupQuery) ForUpdate(opts ...sql.LockOption) *UserAllowedGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAllowedGroupQuery) ForShare(opts ...sql.LockOption) *UserAllowedGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UserAllowedGroupGroupBy is the group-by builder for UserAllowedGroup entities. +type UserAllowedGroupGroupBy struct { + selector + build *UserAllowedGroupQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAllowedGroupGroupBy) Aggregate(fns ...AggregateFunc) *UserAllowedGroupGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAllowedGroupGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAllowedGroupQuery, *UserAllowedGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAllowedGroupGroupBy) sqlScan(ctx context.Context, root *UserAllowedGroupQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAllowedGroupSelect is the builder for selecting fields of UserAllowedGroup entities. +type UserAllowedGroupSelect struct { + *UserAllowedGroupQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAllowedGroupSelect) Aggregate(fns ...AggregateFunc) *UserAllowedGroupSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAllowedGroupSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAllowedGroupQuery, *UserAllowedGroupSelect](ctx, _s.UserAllowedGroupQuery, _s, _s.inters, v) +} + +func (_s *UserAllowedGroupSelect) sqlScan(ctx context.Context, root *UserAllowedGroupQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userallowedgroup_update.go b/backend/ent/userallowedgroup_update.go new file mode 100644 index 00000000..27071b18 --- /dev/null +++ b/backend/ent/userallowedgroup_update.go @@ -0,0 +1,423 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" +) + +// UserAllowedGroupUpdate is the builder for updating UserAllowedGroup entities. +type UserAllowedGroupUpdate struct { + config + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// Where appends a list predicates to the UserAllowedGroupUpdate builder. +func (_u *UserAllowedGroupUpdate) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAllowedGroupUpdate) SetUserID(v int64) *UserAllowedGroupUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdate) SetNillableUserID(v *int64) *UserAllowedGroupUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserAllowedGroupUpdate) SetGroupID(v int64) *UserAllowedGroupUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdate) SetNillableGroupID(v *int64) *UserAllowedGroupUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdate) SetUser(v *User) *UserAllowedGroupUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdate) SetGroup(v *Group) *UserAllowedGroupUpdate { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_u *UserAllowedGroupUpdate) Mutation() *UserAllowedGroupMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdate) ClearUser() *UserAllowedGroupUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdate) ClearGroup() *UserAllowedGroupUpdate { + _u.mutation.ClearGroup() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAllowedGroupUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAllowedGroupUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAllowedGroupUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAllowedGroupUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAllowedGroupUpdate) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.group"`) + } + return nil +} + +func (_u *UserAllowedGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userallowedgroup.Table, userallowedgroup.Columns, sqlgraph.NewFieldSpec(userallowedgroup.FieldUserID, field.TypeInt64), sqlgraph.NewFieldSpec(userallowedgroup.FieldGroupID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userallowedgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAllowedGroupUpdateOne is the builder for updating a single UserAllowedGroup entity. +type UserAllowedGroupUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAllowedGroupMutation +} + +// SetUserID sets the "user_id" field. +func (_u *UserAllowedGroupUpdateOne) SetUserID(v int64) *UserAllowedGroupUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdateOne) SetNillableUserID(v *int64) *UserAllowedGroupUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserAllowedGroupUpdateOne) SetGroupID(v int64) *UserAllowedGroupUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserAllowedGroupUpdateOne) SetNillableGroupID(v *int64) *UserAllowedGroupUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdateOne) SetUser(v *User) *UserAllowedGroupUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdateOne) SetGroup(v *Group) *UserAllowedGroupUpdateOne { + return _u.SetGroupID(v.ID) +} + +// Mutation returns the UserAllowedGroupMutation object of the builder. +func (_u *UserAllowedGroupUpdateOne) Mutation() *UserAllowedGroupMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAllowedGroupUpdateOne) ClearUser() *UserAllowedGroupUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserAllowedGroupUpdateOne) ClearGroup() *UserAllowedGroupUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// Where appends a list predicates to the UserAllowedGroupUpdate builder. +func (_u *UserAllowedGroupUpdateOne) Where(ps ...predicate.UserAllowedGroup) *UserAllowedGroupUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAllowedGroupUpdateOne) Select(field string, fields ...string) *UserAllowedGroupUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAllowedGroup entity. +func (_u *UserAllowedGroupUpdateOne) Save(ctx context.Context) (*UserAllowedGroup, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAllowedGroupUpdateOne) SaveX(ctx context.Context) *UserAllowedGroup { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAllowedGroupUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAllowedGroupUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAllowedGroupUpdateOne) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAllowedGroup.group"`) + } + return nil +} + +func (_u *UserAllowedGroupUpdateOne) sqlSave(ctx context.Context) (_node *UserAllowedGroup, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userallowedgroup.Table, userallowedgroup.Columns, sqlgraph.NewFieldSpec(userallowedgroup.FieldUserID, field.TypeInt64), sqlgraph.NewFieldSpec(userallowedgroup.FieldGroupID, field.TypeInt64)) + if id, ok := _u.mutation.UserID(); !ok { + return nil, &ValidationError{Name: "user_id", err: errors.New(`ent: missing "UserAllowedGroup.user_id" for update`)} + } else { + _spec.Node.CompositeID[0].Value = id + } + if id, ok := _u.mutation.GroupID(); !ok { + return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "UserAllowedGroup.group_id" for update`)} + } else { + _spec.Node.CompositeID[1].Value = id + } + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, len(fields)) + for i, f := range fields { + if !userallowedgroup.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + _spec.Node.Columns[i] = f + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.UserTable, + Columns: []string{userallowedgroup.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: userallowedgroup.GroupTable, + Columns: []string{userallowedgroup.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAllowedGroup{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userallowedgroup.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/userattributedefinition.go b/backend/ent/userattributedefinition.go new file mode 100644 index 00000000..2ed86e4e --- /dev/null +++ b/backend/ent/userattributedefinition.go @@ -0,0 +1,276 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" +) + +// UserAttributeDefinition is the model entity for the UserAttributeDefinition schema. +type UserAttributeDefinition struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // Key holds the value of the "key" field. + Key string `json:"key,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Type holds the value of the "type" field. + Type string `json:"type,omitempty"` + // Options holds the value of the "options" field. + Options []map[string]interface{} `json:"options,omitempty"` + // Required holds the value of the "required" field. + Required bool `json:"required,omitempty"` + // Validation holds the value of the "validation" field. + Validation map[string]interface{} `json:"validation,omitempty"` + // Placeholder holds the value of the "placeholder" field. + Placeholder string `json:"placeholder,omitempty"` + // DisplayOrder holds the value of the "display_order" field. + DisplayOrder int `json:"display_order,omitempty"` + // Enabled holds the value of the "enabled" field. + Enabled bool `json:"enabled,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAttributeDefinitionQuery when eager-loading is set. + Edges UserAttributeDefinitionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAttributeDefinitionEdges holds the relations/edges for other nodes in the graph. +type UserAttributeDefinitionEdges struct { + // Values holds the value of the values edge. + Values []*UserAttributeValue `json:"values,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// ValuesOrErr returns the Values value or an error if the edge +// was not loaded in eager-loading. +func (e UserAttributeDefinitionEdges) ValuesOrErr() ([]*UserAttributeValue, error) { + if e.loadedTypes[0] { + return e.Values, nil + } + return nil, &NotLoadedError{edge: "values"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAttributeDefinition) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userattributedefinition.FieldOptions, userattributedefinition.FieldValidation: + values[i] = new([]byte) + case userattributedefinition.FieldRequired, userattributedefinition.FieldEnabled: + values[i] = new(sql.NullBool) + case userattributedefinition.FieldID, userattributedefinition.FieldDisplayOrder: + values[i] = new(sql.NullInt64) + case userattributedefinition.FieldKey, userattributedefinition.FieldName, userattributedefinition.FieldDescription, userattributedefinition.FieldType, userattributedefinition.FieldPlaceholder: + values[i] = new(sql.NullString) + case userattributedefinition.FieldCreatedAt, userattributedefinition.FieldUpdatedAt, userattributedefinition.FieldDeletedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAttributeDefinition fields. +func (_m *UserAttributeDefinition) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userattributedefinition.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case userattributedefinition.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case userattributedefinition.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case userattributedefinition.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case userattributedefinition.FieldKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field key", values[i]) + } else if value.Valid { + _m.Key = value.String + } + case userattributedefinition.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case userattributedefinition.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + _m.Description = value.String + } + case userattributedefinition.FieldType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field type", values[i]) + } else if value.Valid { + _m.Type = value.String + } + case userattributedefinition.FieldOptions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field options", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Options); err != nil { + return fmt.Errorf("unmarshal field options: %w", err) + } + } + case userattributedefinition.FieldRequired: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field required", values[i]) + } else if value.Valid { + _m.Required = value.Bool + } + case userattributedefinition.FieldValidation: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field validation", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.Validation); err != nil { + return fmt.Errorf("unmarshal field validation: %w", err) + } + } + case userattributedefinition.FieldPlaceholder: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field placeholder", values[i]) + } else if value.Valid { + _m.Placeholder = value.String + } + case userattributedefinition.FieldDisplayOrder: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field display_order", values[i]) + } else if value.Valid { + _m.DisplayOrder = int(value.Int64) + } + case userattributedefinition.FieldEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field enabled", values[i]) + } else if value.Valid { + _m.Enabled = value.Bool + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserAttributeDefinition. +// This includes values selected through modifiers, order, etc. +func (_m *UserAttributeDefinition) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryValues queries the "values" edge of the UserAttributeDefinition entity. +func (_m *UserAttributeDefinition) QueryValues() *UserAttributeValueQuery { + return NewUserAttributeDefinitionClient(_m.config).QueryValues(_m) +} + +// Update returns a builder for updating this UserAttributeDefinition. +// Note that you need to call UserAttributeDefinition.Unwrap() before calling this method if this UserAttributeDefinition +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAttributeDefinition) Update() *UserAttributeDefinitionUpdateOne { + return NewUserAttributeDefinitionClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAttributeDefinition entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAttributeDefinition) Unwrap() *UserAttributeDefinition { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAttributeDefinition is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAttributeDefinition) String() string { + var builder strings.Builder + builder.WriteString("UserAttributeDefinition(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("key=") + builder.WriteString(_m.Key) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(_m.Description) + builder.WriteString(", ") + builder.WriteString("type=") + builder.WriteString(_m.Type) + builder.WriteString(", ") + builder.WriteString("options=") + builder.WriteString(fmt.Sprintf("%v", _m.Options)) + builder.WriteString(", ") + builder.WriteString("required=") + builder.WriteString(fmt.Sprintf("%v", _m.Required)) + builder.WriteString(", ") + builder.WriteString("validation=") + builder.WriteString(fmt.Sprintf("%v", _m.Validation)) + builder.WriteString(", ") + builder.WriteString("placeholder=") + builder.WriteString(_m.Placeholder) + builder.WriteString(", ") + builder.WriteString("display_order=") + builder.WriteString(fmt.Sprintf("%v", _m.DisplayOrder)) + builder.WriteString(", ") + builder.WriteString("enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) + builder.WriteByte(')') + return builder.String() +} + +// UserAttributeDefinitions is a parsable slice of UserAttributeDefinition. +type UserAttributeDefinitions []*UserAttributeDefinition diff --git a/backend/ent/userattributedefinition/userattributedefinition.go b/backend/ent/userattributedefinition/userattributedefinition.go new file mode 100644 index 00000000..ce398c03 --- /dev/null +++ b/backend/ent/userattributedefinition/userattributedefinition.go @@ -0,0 +1,205 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributedefinition + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userattributedefinition type in the database. + Label = "user_attribute_definition" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldKey holds the string denoting the key field in the database. + FieldKey = "key" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldType holds the string denoting the type field in the database. + FieldType = "type" + // FieldOptions holds the string denoting the options field in the database. + FieldOptions = "options" + // FieldRequired holds the string denoting the required field in the database. + FieldRequired = "required" + // FieldValidation holds the string denoting the validation field in the database. + FieldValidation = "validation" + // FieldPlaceholder holds the string denoting the placeholder field in the database. + FieldPlaceholder = "placeholder" + // FieldDisplayOrder holds the string denoting the display_order field in the database. + FieldDisplayOrder = "display_order" + // FieldEnabled holds the string denoting the enabled field in the database. + FieldEnabled = "enabled" + // EdgeValues holds the string denoting the values edge name in mutations. + EdgeValues = "values" + // Table holds the table name of the userattributedefinition in the database. + Table = "user_attribute_definitions" + // ValuesTable is the table that holds the values relation/edge. + ValuesTable = "user_attribute_values" + // ValuesInverseTable is the table name for the UserAttributeValue entity. + // It exists in this package in order to avoid circular dependency with the "userattributevalue" package. + ValuesInverseTable = "user_attribute_values" + // ValuesColumn is the table column denoting the values relation/edge. + ValuesColumn = "attribute_id" +) + +// Columns holds all SQL columns for userattributedefinition fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldKey, + FieldName, + FieldDescription, + FieldType, + FieldOptions, + FieldRequired, + FieldValidation, + FieldPlaceholder, + FieldDisplayOrder, + FieldEnabled, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // KeyValidator is a validator for the "key" field. It is called by the builders before save. + KeyValidator func(string) error + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // DefaultDescription holds the default value on creation for the "description" field. + DefaultDescription string + // TypeValidator is a validator for the "type" field. It is called by the builders before save. + TypeValidator func(string) error + // DefaultOptions holds the default value on creation for the "options" field. + DefaultOptions []map[string]interface{} + // DefaultRequired holds the default value on creation for the "required" field. + DefaultRequired bool + // DefaultValidation holds the default value on creation for the "validation" field. + DefaultValidation map[string]interface{} + // DefaultPlaceholder holds the default value on creation for the "placeholder" field. + DefaultPlaceholder string + // PlaceholderValidator is a validator for the "placeholder" field. It is called by the builders before save. + PlaceholderValidator func(string) error + // DefaultDisplayOrder holds the default value on creation for the "display_order" field. + DefaultDisplayOrder int + // DefaultEnabled holds the default value on creation for the "enabled" field. + DefaultEnabled bool +) + +// OrderOption defines the ordering options for the UserAttributeDefinition queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByKey orders the results by the key field. +func ByKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKey, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByType orders the results by the type field. +func ByType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldType, opts...).ToFunc() +} + +// ByRequired orders the results by the required field. +func ByRequired(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRequired, opts...).ToFunc() +} + +// ByPlaceholder orders the results by the placeholder field. +func ByPlaceholder(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPlaceholder, opts...).ToFunc() +} + +// ByDisplayOrder orders the results by the display_order field. +func ByDisplayOrder(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisplayOrder, opts...).ToFunc() +} + +// ByEnabled orders the results by the enabled field. +func ByEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnabled, opts...).ToFunc() +} + +// ByValuesCount orders the results by values count. +func ByValuesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newValuesStep(), opts...) + } +} + +// ByValues orders the results by values terms. +func ByValues(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newValuesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newValuesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(ValuesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ValuesTable, ValuesColumn), + ) +} diff --git a/backend/ent/userattributedefinition/where.go b/backend/ent/userattributedefinition/where.go new file mode 100644 index 00000000..7f4d06cb --- /dev/null +++ b/backend/ent/userattributedefinition/where.go @@ -0,0 +1,664 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributedefinition + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDeletedAt, v)) +} + +// Key applies equality check predicate on the "key" field. It's identical to KeyEQ. +func Key(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldKey, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDescription, v)) +} + +// Type applies equality check predicate on the "type" field. It's identical to TypeEQ. +func Type(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldType, v)) +} + +// Required applies equality check predicate on the "required" field. It's identical to RequiredEQ. +func Required(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldRequired, v)) +} + +// Placeholder applies equality check predicate on the "placeholder" field. It's identical to PlaceholderEQ. +func Placeholder(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldPlaceholder, v)) +} + +// DisplayOrder applies equality check predicate on the "display_order" field. It's identical to DisplayOrderEQ. +func DisplayOrder(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDisplayOrder, v)) +} + +// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. +func Enabled(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldEnabled, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotNull(FieldDeletedAt)) +} + +// KeyEQ applies the EQ predicate on the "key" field. +func KeyEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldKey, v)) +} + +// KeyNEQ applies the NEQ predicate on the "key" field. +func KeyNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldKey, v)) +} + +// KeyIn applies the In predicate on the "key" field. +func KeyIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldKey, vs...)) +} + +// KeyNotIn applies the NotIn predicate on the "key" field. +func KeyNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldKey, vs...)) +} + +// KeyGT applies the GT predicate on the "key" field. +func KeyGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldKey, v)) +} + +// KeyGTE applies the GTE predicate on the "key" field. +func KeyGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldKey, v)) +} + +// KeyLT applies the LT predicate on the "key" field. +func KeyLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldKey, v)) +} + +// KeyLTE applies the LTE predicate on the "key" field. +func KeyLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldKey, v)) +} + +// KeyContains applies the Contains predicate on the "key" field. +func KeyContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldKey, v)) +} + +// KeyHasPrefix applies the HasPrefix predicate on the "key" field. +func KeyHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldKey, v)) +} + +// KeyHasSuffix applies the HasSuffix predicate on the "key" field. +func KeyHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldKey, v)) +} + +// KeyEqualFold applies the EqualFold predicate on the "key" field. +func KeyEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldKey, v)) +} + +// KeyContainsFold applies the ContainsFold predicate on the "key" field. +func KeyContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldKey, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldDescription, v)) +} + +// TypeEQ applies the EQ predicate on the "type" field. +func TypeEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldType, v)) +} + +// TypeNEQ applies the NEQ predicate on the "type" field. +func TypeNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldType, v)) +} + +// TypeIn applies the In predicate on the "type" field. +func TypeIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldType, vs...)) +} + +// TypeNotIn applies the NotIn predicate on the "type" field. +func TypeNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldType, vs...)) +} + +// TypeGT applies the GT predicate on the "type" field. +func TypeGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldType, v)) +} + +// TypeGTE applies the GTE predicate on the "type" field. +func TypeGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldType, v)) +} + +// TypeLT applies the LT predicate on the "type" field. +func TypeLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldType, v)) +} + +// TypeLTE applies the LTE predicate on the "type" field. +func TypeLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldType, v)) +} + +// TypeContains applies the Contains predicate on the "type" field. +func TypeContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldType, v)) +} + +// TypeHasPrefix applies the HasPrefix predicate on the "type" field. +func TypeHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldType, v)) +} + +// TypeHasSuffix applies the HasSuffix predicate on the "type" field. +func TypeHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldType, v)) +} + +// TypeEqualFold applies the EqualFold predicate on the "type" field. +func TypeEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldType, v)) +} + +// TypeContainsFold applies the ContainsFold predicate on the "type" field. +func TypeContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldType, v)) +} + +// RequiredEQ applies the EQ predicate on the "required" field. +func RequiredEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldRequired, v)) +} + +// RequiredNEQ applies the NEQ predicate on the "required" field. +func RequiredNEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldRequired, v)) +} + +// PlaceholderEQ applies the EQ predicate on the "placeholder" field. +func PlaceholderEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldPlaceholder, v)) +} + +// PlaceholderNEQ applies the NEQ predicate on the "placeholder" field. +func PlaceholderNEQ(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldPlaceholder, v)) +} + +// PlaceholderIn applies the In predicate on the "placeholder" field. +func PlaceholderIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldPlaceholder, vs...)) +} + +// PlaceholderNotIn applies the NotIn predicate on the "placeholder" field. +func PlaceholderNotIn(vs ...string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldPlaceholder, vs...)) +} + +// PlaceholderGT applies the GT predicate on the "placeholder" field. +func PlaceholderGT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldPlaceholder, v)) +} + +// PlaceholderGTE applies the GTE predicate on the "placeholder" field. +func PlaceholderGTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldPlaceholder, v)) +} + +// PlaceholderLT applies the LT predicate on the "placeholder" field. +func PlaceholderLT(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldPlaceholder, v)) +} + +// PlaceholderLTE applies the LTE predicate on the "placeholder" field. +func PlaceholderLTE(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldPlaceholder, v)) +} + +// PlaceholderContains applies the Contains predicate on the "placeholder" field. +func PlaceholderContains(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContains(FieldPlaceholder, v)) +} + +// PlaceholderHasPrefix applies the HasPrefix predicate on the "placeholder" field. +func PlaceholderHasPrefix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasPrefix(FieldPlaceholder, v)) +} + +// PlaceholderHasSuffix applies the HasSuffix predicate on the "placeholder" field. +func PlaceholderHasSuffix(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldHasSuffix(FieldPlaceholder, v)) +} + +// PlaceholderEqualFold applies the EqualFold predicate on the "placeholder" field. +func PlaceholderEqualFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEqualFold(FieldPlaceholder, v)) +} + +// PlaceholderContainsFold applies the ContainsFold predicate on the "placeholder" field. +func PlaceholderContainsFold(v string) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldContainsFold(FieldPlaceholder, v)) +} + +// DisplayOrderEQ applies the EQ predicate on the "display_order" field. +func DisplayOrderEQ(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldDisplayOrder, v)) +} + +// DisplayOrderNEQ applies the NEQ predicate on the "display_order" field. +func DisplayOrderNEQ(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldDisplayOrder, v)) +} + +// DisplayOrderIn applies the In predicate on the "display_order" field. +func DisplayOrderIn(vs ...int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldIn(FieldDisplayOrder, vs...)) +} + +// DisplayOrderNotIn applies the NotIn predicate on the "display_order" field. +func DisplayOrderNotIn(vs ...int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNotIn(FieldDisplayOrder, vs...)) +} + +// DisplayOrderGT applies the GT predicate on the "display_order" field. +func DisplayOrderGT(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGT(FieldDisplayOrder, v)) +} + +// DisplayOrderGTE applies the GTE predicate on the "display_order" field. +func DisplayOrderGTE(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldGTE(FieldDisplayOrder, v)) +} + +// DisplayOrderLT applies the LT predicate on the "display_order" field. +func DisplayOrderLT(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLT(FieldDisplayOrder, v)) +} + +// DisplayOrderLTE applies the LTE predicate on the "display_order" field. +func DisplayOrderLTE(v int) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldLTE(FieldDisplayOrder, v)) +} + +// EnabledEQ applies the EQ predicate on the "enabled" field. +func EnabledEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldEQ(FieldEnabled, v)) +} + +// EnabledNEQ applies the NEQ predicate on the "enabled" field. +func EnabledNEQ(v bool) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.FieldNEQ(FieldEnabled, v)) +} + +// HasValues applies the HasEdge predicate on the "values" edge. +func HasValues() predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, ValuesTable, ValuesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasValuesWith applies the HasEdge predicate on the "values" edge with a given conditions (other predicates). +func HasValuesWith(preds ...predicate.UserAttributeValue) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(func(s *sql.Selector) { + step := newValuesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAttributeDefinition) predicate.UserAttributeDefinition { + return predicate.UserAttributeDefinition(sql.NotPredicates(p)) +} diff --git a/backend/ent/userattributedefinition_create.go b/backend/ent/userattributedefinition_create.go new file mode 100644 index 00000000..a018c060 --- /dev/null +++ b/backend/ent/userattributedefinition_create.go @@ -0,0 +1,1267 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionCreate is the builder for creating a UserAttributeDefinition entity. +type UserAttributeDefinitionCreate struct { + config + mutation *UserAttributeDefinitionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAttributeDefinitionCreate) SetCreatedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableCreatedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserAttributeDefinitionCreate) SetUpdatedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableUpdatedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *UserAttributeDefinitionCreate) SetDeletedAt(v time.Time) *UserAttributeDefinitionCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetKey sets the "key" field. +func (_c *UserAttributeDefinitionCreate) SetKey(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetKey(v) + return _c +} + +// SetName sets the "name" field. +func (_c *UserAttributeDefinitionCreate) SetName(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetName(v) + return _c +} + +// SetDescription sets the "description" field. +func (_c *UserAttributeDefinitionCreate) SetDescription(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetDescription(v) + return _c +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDescription(v *string) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDescription(*v) + } + return _c +} + +// SetType sets the "type" field. +func (_c *UserAttributeDefinitionCreate) SetType(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetType(v) + return _c +} + +// SetOptions sets the "options" field. +func (_c *UserAttributeDefinitionCreate) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionCreate { + _c.mutation.SetOptions(v) + return _c +} + +// SetRequired sets the "required" field. +func (_c *UserAttributeDefinitionCreate) SetRequired(v bool) *UserAttributeDefinitionCreate { + _c.mutation.SetRequired(v) + return _c +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableRequired(v *bool) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetRequired(*v) + } + return _c +} + +// SetValidation sets the "validation" field. +func (_c *UserAttributeDefinitionCreate) SetValidation(v map[string]interface{}) *UserAttributeDefinitionCreate { + _c.mutation.SetValidation(v) + return _c +} + +// SetPlaceholder sets the "placeholder" field. +func (_c *UserAttributeDefinitionCreate) SetPlaceholder(v string) *UserAttributeDefinitionCreate { + _c.mutation.SetPlaceholder(v) + return _c +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillablePlaceholder(v *string) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetPlaceholder(*v) + } + return _c +} + +// SetDisplayOrder sets the "display_order" field. +func (_c *UserAttributeDefinitionCreate) SetDisplayOrder(v int) *UserAttributeDefinitionCreate { + _c.mutation.SetDisplayOrder(v) + return _c +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetDisplayOrder(*v) + } + return _c +} + +// SetEnabled sets the "enabled" field. +func (_c *UserAttributeDefinitionCreate) SetEnabled(v bool) *UserAttributeDefinitionCreate { + _c.mutation.SetEnabled(v) + return _c +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_c *UserAttributeDefinitionCreate) SetNillableEnabled(v *bool) *UserAttributeDefinitionCreate { + if v != nil { + _c.SetEnabled(*v) + } + return _c +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_c *UserAttributeDefinitionCreate) AddValueIDs(ids ...int64) *UserAttributeDefinitionCreate { + _c.mutation.AddValueIDs(ids...) + return _c +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_c *UserAttributeDefinitionCreate) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_c *UserAttributeDefinitionCreate) Mutation() *UserAttributeDefinitionMutation { + return _c.mutation +} + +// Save creates the UserAttributeDefinition in the database. +func (_c *UserAttributeDefinitionCreate) Save(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAttributeDefinitionCreate) SaveX(ctx context.Context) *UserAttributeDefinition { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeDefinitionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAttributeDefinitionCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if userattributedefinition.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if userattributedefinition.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Description(); !ok { + v := userattributedefinition.DefaultDescription + _c.mutation.SetDescription(v) + } + if _, ok := _c.mutation.Options(); !ok { + v := userattributedefinition.DefaultOptions + _c.mutation.SetOptions(v) + } + if _, ok := _c.mutation.Required(); !ok { + v := userattributedefinition.DefaultRequired + _c.mutation.SetRequired(v) + } + if _, ok := _c.mutation.Validation(); !ok { + v := userattributedefinition.DefaultValidation + _c.mutation.SetValidation(v) + } + if _, ok := _c.mutation.Placeholder(); !ok { + v := userattributedefinition.DefaultPlaceholder + _c.mutation.SetPlaceholder(v) + } + if _, ok := _c.mutation.DisplayOrder(); !ok { + v := userattributedefinition.DefaultDisplayOrder + _c.mutation.SetDisplayOrder(v) + } + if _, ok := _c.mutation.Enabled(); !ok { + v := userattributedefinition.DefaultEnabled + _c.mutation.SetEnabled(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAttributeDefinitionCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAttributeDefinition.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserAttributeDefinition.updated_at"`)} + } + if _, ok := _c.mutation.Key(); !ok { + return &ValidationError{Name: "key", err: errors.New(`ent: missing required field "UserAttributeDefinition.key"`)} + } + if v, ok := _c.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "UserAttributeDefinition.name"`)} + } + if v, ok := _c.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if _, ok := _c.mutation.Description(); !ok { + return &ValidationError{Name: "description", err: errors.New(`ent: missing required field "UserAttributeDefinition.description"`)} + } + if _, ok := _c.mutation.GetType(); !ok { + return &ValidationError{Name: "type", err: errors.New(`ent: missing required field "UserAttributeDefinition.type"`)} + } + if v, ok := _c.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if _, ok := _c.mutation.Options(); !ok { + return &ValidationError{Name: "options", err: errors.New(`ent: missing required field "UserAttributeDefinition.options"`)} + } + if _, ok := _c.mutation.Required(); !ok { + return &ValidationError{Name: "required", err: errors.New(`ent: missing required field "UserAttributeDefinition.required"`)} + } + if _, ok := _c.mutation.Validation(); !ok { + return &ValidationError{Name: "validation", err: errors.New(`ent: missing required field "UserAttributeDefinition.validation"`)} + } + if _, ok := _c.mutation.Placeholder(); !ok { + return &ValidationError{Name: "placeholder", err: errors.New(`ent: missing required field "UserAttributeDefinition.placeholder"`)} + } + if v, ok := _c.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + if _, ok := _c.mutation.DisplayOrder(); !ok { + return &ValidationError{Name: "display_order", err: errors.New(`ent: missing required field "UserAttributeDefinition.display_order"`)} + } + if _, ok := _c.mutation.Enabled(); !ok { + return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "UserAttributeDefinition.enabled"`)} + } + return nil +} + +func (_c *UserAttributeDefinitionCreate) sqlSave(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserAttributeDefinitionCreate) createSpec() (*UserAttributeDefinition, *sqlgraph.CreateSpec) { + var ( + _node = &UserAttributeDefinition{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userattributedefinition.Table, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userattributedefinition.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + _node.Key = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := _c.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + _node.Type = value + } + if value, ok := _c.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + _node.Options = value + } + if value, ok := _c.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + _node.Required = value + } + if value, ok := _c.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + _node.Validation = value + } + if value, ok := _c.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + _node.Placeholder = value + } + if value, ok := _c.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + _node.DisplayOrder = value + } + if value, ok := _c.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + _node.Enabled = value + } + if nodes := _c.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeDefinition.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeDefinitionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreate) OnConflict(opts ...sql.ConflictOption) *UserAttributeDefinitionUpsertOne { + _c.conflict = opts + return &UserAttributeDefinitionUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreate) OnConflictColumns(columns ...string) *UserAttributeDefinitionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeDefinitionUpsertOne{ + create: _c, + } +} + +type ( + // UserAttributeDefinitionUpsertOne is the builder for "upsert"-ing + // one UserAttributeDefinition node. + UserAttributeDefinitionUpsertOne struct { + create *UserAttributeDefinitionCreate + } + + // UserAttributeDefinitionUpsert is the "OnConflict" setter. + UserAttributeDefinitionUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsert) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateUpdatedAt() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsert) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDeletedAt() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsert) ClearDeletedAt() *UserAttributeDefinitionUpsert { + u.SetNull(userattributedefinition.FieldDeletedAt) + return u +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsert) SetKey(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldKey, v) + return u +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateKey() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldKey) + return u +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsert) SetName(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateName() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsert) SetDescription(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDescription() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDescription) + return u +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsert) SetType(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldType, v) + return u +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateType() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldType) + return u +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsert) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldOptions, v) + return u +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateOptions() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldOptions) + return u +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsert) SetRequired(v bool) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldRequired, v) + return u +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateRequired() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldRequired) + return u +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsert) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldValidation, v) + return u +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateValidation() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldValidation) + return u +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsert) SetPlaceholder(v string) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldPlaceholder, v) + return u +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdatePlaceholder() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldPlaceholder) + return u +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsert) SetDisplayOrder(v int) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldDisplayOrder, v) + return u +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateDisplayOrder() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldDisplayOrder) + return u +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsert) AddDisplayOrder(v int) *UserAttributeDefinitionUpsert { + u.Add(userattributedefinition.FieldDisplayOrder, v) + return u +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsert) SetEnabled(v bool) *UserAttributeDefinitionUpsert { + u.Set(userattributedefinition.FieldEnabled, v) + return u +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsert) UpdateEnabled() *UserAttributeDefinitionUpsert { + u.SetExcluded(userattributedefinition.FieldEnabled) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertOne) UpdateNewValues() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userattributedefinition.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertOne) Ignore() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeDefinitionUpsertOne) DoNothing() *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeDefinitionCreate.OnConflict +// documentation for more info. +func (u *UserAttributeDefinitionUpsertOne) Update(set func(*UserAttributeDefinitionUpsert)) *UserAttributeDefinitionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeDefinitionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsertOne) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateUpdatedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertOne) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDeletedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertOne) ClearDeletedAt() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsertOne) SetKey(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateKey() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsertOne) SetName(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateName() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsertOne) SetDescription(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDescription() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDescription() + }) +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsertOne) SetType(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateType() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateType() + }) +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsertOne) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetOptions(v) + }) +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateOptions() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateOptions() + }) +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsertOne) SetRequired(v bool) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetRequired(v) + }) +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateRequired() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateRequired() + }) +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsertOne) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetValidation(v) + }) +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateValidation() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateValidation() + }) +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsertOne) SetPlaceholder(v string) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetPlaceholder(v) + }) +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdatePlaceholder() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdatePlaceholder() + }) +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsertOne) SetDisplayOrder(v int) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDisplayOrder(v) + }) +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsertOne) AddDisplayOrder(v int) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.AddDisplayOrder(v) + }) +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateDisplayOrder() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDisplayOrder() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsertOne) SetEnabled(v bool) *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertOne) UpdateEnabled() *UserAttributeDefinitionUpsertOne { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateEnabled() + }) +} + +// Exec executes the query. +func (u *UserAttributeDefinitionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeDefinitionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserAttributeDefinitionUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserAttributeDefinitionCreateBulk is the builder for creating many UserAttributeDefinition entities in bulk. +type UserAttributeDefinitionCreateBulk struct { + config + err error + builders []*UserAttributeDefinitionCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAttributeDefinition entities in the database. +func (_c *UserAttributeDefinitionCreateBulk) Save(ctx context.Context) ([]*UserAttributeDefinition, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAttributeDefinition, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAttributeDefinitionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreateBulk) SaveX(ctx context.Context) []*UserAttributeDefinition { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeDefinitionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeDefinitionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeDefinition.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeDefinitionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAttributeDefinitionUpsertBulk { + _c.conflict = opts + return &UserAttributeDefinitionUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeDefinitionCreateBulk) OnConflictColumns(columns ...string) *UserAttributeDefinitionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeDefinitionUpsertBulk{ + create: _c, + } +} + +// UserAttributeDefinitionUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAttributeDefinition nodes. +type UserAttributeDefinitionUpsertBulk struct { + create *UserAttributeDefinitionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertBulk) UpdateNewValues() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userattributedefinition.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeDefinition.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeDefinitionUpsertBulk) Ignore() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeDefinitionUpsertBulk) DoNothing() *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeDefinitionCreateBulk.OnConflict +// documentation for more info. +func (u *UserAttributeDefinitionUpsertBulk) Update(set func(*UserAttributeDefinitionUpsert)) *UserAttributeDefinitionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeDefinitionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeDefinitionUpsertBulk) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateUpdatedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDeletedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserAttributeDefinitionUpsertBulk) ClearDeletedAt() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetKey sets the "key" field. +func (u *UserAttributeDefinitionUpsertBulk) SetKey(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetKey(v) + }) +} + +// UpdateKey sets the "key" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateKey() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateKey() + }) +} + +// SetName sets the "name" field. +func (u *UserAttributeDefinitionUpsertBulk) SetName(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateName() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDescription(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDescription() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDescription() + }) +} + +// SetType sets the "type" field. +func (u *UserAttributeDefinitionUpsertBulk) SetType(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetType(v) + }) +} + +// UpdateType sets the "type" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateType() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateType() + }) +} + +// SetOptions sets the "options" field. +func (u *UserAttributeDefinitionUpsertBulk) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetOptions(v) + }) +} + +// UpdateOptions sets the "options" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateOptions() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateOptions() + }) +} + +// SetRequired sets the "required" field. +func (u *UserAttributeDefinitionUpsertBulk) SetRequired(v bool) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetRequired(v) + }) +} + +// UpdateRequired sets the "required" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateRequired() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateRequired() + }) +} + +// SetValidation sets the "validation" field. +func (u *UserAttributeDefinitionUpsertBulk) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetValidation(v) + }) +} + +// UpdateValidation sets the "validation" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateValidation() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateValidation() + }) +} + +// SetPlaceholder sets the "placeholder" field. +func (u *UserAttributeDefinitionUpsertBulk) SetPlaceholder(v string) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetPlaceholder(v) + }) +} + +// UpdatePlaceholder sets the "placeholder" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdatePlaceholder() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdatePlaceholder() + }) +} + +// SetDisplayOrder sets the "display_order" field. +func (u *UserAttributeDefinitionUpsertBulk) SetDisplayOrder(v int) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetDisplayOrder(v) + }) +} + +// AddDisplayOrder adds v to the "display_order" field. +func (u *UserAttributeDefinitionUpsertBulk) AddDisplayOrder(v int) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.AddDisplayOrder(v) + }) +} + +// UpdateDisplayOrder sets the "display_order" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateDisplayOrder() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateDisplayOrder() + }) +} + +// SetEnabled sets the "enabled" field. +func (u *UserAttributeDefinitionUpsertBulk) SetEnabled(v bool) *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.SetEnabled(v) + }) +} + +// UpdateEnabled sets the "enabled" field to the value that was provided on create. +func (u *UserAttributeDefinitionUpsertBulk) UpdateEnabled() *UserAttributeDefinitionUpsertBulk { + return u.Update(func(s *UserAttributeDefinitionUpsert) { + s.UpdateEnabled() + }) +} + +// Exec executes the query. +func (u *UserAttributeDefinitionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAttributeDefinitionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeDefinitionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeDefinitionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributedefinition_delete.go b/backend/ent/userattributedefinition_delete.go new file mode 100644 index 00000000..8d879eb5 --- /dev/null +++ b/backend/ent/userattributedefinition_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" +) + +// UserAttributeDefinitionDelete is the builder for deleting a UserAttributeDefinition entity. +type UserAttributeDefinitionDelete struct { + config + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// Where appends a list predicates to the UserAttributeDefinitionDelete builder. +func (_d *UserAttributeDefinitionDelete) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAttributeDefinitionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeDefinitionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAttributeDefinitionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userattributedefinition.Table, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAttributeDefinitionDeleteOne is the builder for deleting a single UserAttributeDefinition entity. +type UserAttributeDefinitionDeleteOne struct { + _d *UserAttributeDefinitionDelete +} + +// Where appends a list predicates to the UserAttributeDefinitionDelete builder. +func (_d *UserAttributeDefinitionDeleteOne) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAttributeDefinitionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userattributedefinition.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeDefinitionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributedefinition_query.go b/backend/ent/userattributedefinition_query.go new file mode 100644 index 00000000..0727b47c --- /dev/null +++ b/backend/ent/userattributedefinition_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionQuery is the builder for querying UserAttributeDefinition entities. +type UserAttributeDefinitionQuery struct { + config + ctx *QueryContext + order []userattributedefinition.OrderOption + inters []Interceptor + predicates []predicate.UserAttributeDefinition + withValues *UserAttributeValueQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAttributeDefinitionQuery builder. +func (_q *UserAttributeDefinitionQuery) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAttributeDefinitionQuery) Limit(limit int) *UserAttributeDefinitionQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAttributeDefinitionQuery) Offset(offset int) *UserAttributeDefinitionQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAttributeDefinitionQuery) Unique(unique bool) *UserAttributeDefinitionQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAttributeDefinitionQuery) Order(o ...userattributedefinition.OrderOption) *UserAttributeDefinitionQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryValues chains the current query on the "values" edge. +func (_q *UserAttributeDefinitionQuery) QueryValues() *UserAttributeValueQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributedefinition.Table, userattributedefinition.FieldID, selector), + sqlgraph.To(userattributevalue.Table, userattributevalue.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, userattributedefinition.ValuesTable, userattributedefinition.ValuesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAttributeDefinition entity from the query. +// Returns a *NotFoundError when no UserAttributeDefinition was found. +func (_q *UserAttributeDefinitionQuery) First(ctx context.Context) (*UserAttributeDefinition, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userattributedefinition.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) FirstX(ctx context.Context) *UserAttributeDefinition { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserAttributeDefinition ID from the query. +// Returns a *NotFoundError when no UserAttributeDefinition ID was found. +func (_q *UserAttributeDefinitionQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{userattributedefinition.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserAttributeDefinition entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAttributeDefinition entity is found. +// Returns a *NotFoundError when no UserAttributeDefinition entities are found. +func (_q *UserAttributeDefinitionQuery) Only(ctx context.Context) (*UserAttributeDefinition, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userattributedefinition.Label} + default: + return nil, &NotSingularError{userattributedefinition.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) OnlyX(ctx context.Context) *UserAttributeDefinition { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserAttributeDefinition ID in the query. +// Returns a *NotSingularError when more than one UserAttributeDefinition ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserAttributeDefinitionQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{userattributedefinition.Label} + default: + err = &NotSingularError{userattributedefinition.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserAttributeDefinitions. +func (_q *UserAttributeDefinitionQuery) All(ctx context.Context) ([]*UserAttributeDefinition, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAttributeDefinition, *UserAttributeDefinitionQuery]() + return withInterceptors[[]*UserAttributeDefinition](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) AllX(ctx context.Context) []*UserAttributeDefinition { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserAttributeDefinition IDs. +func (_q *UserAttributeDefinitionQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(userattributedefinition.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserAttributeDefinitionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAttributeDefinitionQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAttributeDefinitionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAttributeDefinitionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAttributeDefinitionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAttributeDefinitionQuery) Clone() *UserAttributeDefinitionQuery { + if _q == nil { + return nil + } + return &UserAttributeDefinitionQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userattributedefinition.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAttributeDefinition{}, _q.predicates...), + withValues: _q.withValues.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithValues tells the query-builder to eager-load the nodes that are connected to +// the "values" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeDefinitionQuery) WithValues(opts ...func(*UserAttributeValueQuery)) *UserAttributeDefinitionQuery { + query := (&UserAttributeValueClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withValues = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAttributeDefinition.Query(). +// GroupBy(userattributedefinition.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAttributeDefinitionQuery) GroupBy(field string, fields ...string) *UserAttributeDefinitionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAttributeDefinitionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userattributedefinition.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserAttributeDefinition.Query(). +// Select(userattributedefinition.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserAttributeDefinitionQuery) Select(fields ...string) *UserAttributeDefinitionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAttributeDefinitionSelect{UserAttributeDefinitionQuery: _q} + sbuild.label = userattributedefinition.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAttributeDefinitionSelect configured with the given aggregations. +func (_q *UserAttributeDefinitionQuery) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAttributeDefinitionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userattributedefinition.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAttributeDefinitionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAttributeDefinition, error) { + var ( + nodes = []*UserAttributeDefinition{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withValues != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAttributeDefinition).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAttributeDefinition{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withValues; query != nil { + if err := _q.loadValues(ctx, query, nodes, + func(n *UserAttributeDefinition) { n.Edges.Values = []*UserAttributeValue{} }, + func(n *UserAttributeDefinition, e *UserAttributeValue) { n.Edges.Values = append(n.Edges.Values, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAttributeDefinitionQuery) loadValues(ctx context.Context, query *UserAttributeValueQuery, nodes []*UserAttributeDefinition, init func(*UserAttributeDefinition), assign func(*UserAttributeDefinition, *UserAttributeValue)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*UserAttributeDefinition) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(userattributevalue.FieldAttributeID) + } + query.Where(predicate.UserAttributeValue(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(userattributedefinition.ValuesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.AttributeID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "attribute_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *UserAttributeDefinitionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAttributeDefinitionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributedefinition.FieldID) + for i := range fields { + if fields[i] != userattributedefinition.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userattributedefinition.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userattributedefinition.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAttributeDefinitionQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeDefinitionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAttributeDefinitionQuery) ForShare(opts ...sql.LockOption) *UserAttributeDefinitionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UserAttributeDefinitionGroupBy is the group-by builder for UserAttributeDefinition entities. +type UserAttributeDefinitionGroupBy struct { + selector + build *UserAttributeDefinitionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAttributeDefinitionGroupBy) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAttributeDefinitionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeDefinitionQuery, *UserAttributeDefinitionGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAttributeDefinitionGroupBy) sqlScan(ctx context.Context, root *UserAttributeDefinitionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAttributeDefinitionSelect is the builder for selecting fields of UserAttributeDefinition entities. +type UserAttributeDefinitionSelect struct { + *UserAttributeDefinitionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAttributeDefinitionSelect) Aggregate(fns ...AggregateFunc) *UserAttributeDefinitionSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAttributeDefinitionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeDefinitionQuery, *UserAttributeDefinitionSelect](ctx, _s.UserAttributeDefinitionQuery, _s, _s.inters, v) +} + +func (_s *UserAttributeDefinitionSelect) sqlScan(ctx context.Context, root *UserAttributeDefinitionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userattributedefinition_update.go b/backend/ent/userattributedefinition_update.go new file mode 100644 index 00000000..6b9eb7d0 --- /dev/null +++ b/backend/ent/userattributedefinition_update.go @@ -0,0 +1,846 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeDefinitionUpdate is the builder for updating UserAttributeDefinition entities. +type UserAttributeDefinitionUpdate struct { + config + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// Where appends a list predicates to the UserAttributeDefinitionUpdate builder. +func (_u *UserAttributeDefinitionUpdate) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeDefinitionUpdate) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdate) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdate) ClearDeletedAt() *UserAttributeDefinitionUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetKey sets the "key" field. +func (_u *UserAttributeDefinitionUpdate) SetKey(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableKey(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *UserAttributeDefinitionUpdate) SetName(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableName(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *UserAttributeDefinitionUpdate) SetDescription(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDescription(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *UserAttributeDefinitionUpdate) SetType(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableType(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetOptions sets the "options" field. +func (_u *UserAttributeDefinitionUpdate) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.SetOptions(v) + return _u +} + +// AppendOptions appends value to the "options" field. +func (_u *UserAttributeDefinitionUpdate) AppendOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.AppendOptions(v) + return _u +} + +// SetRequired sets the "required" field. +func (_u *UserAttributeDefinitionUpdate) SetRequired(v bool) *UserAttributeDefinitionUpdate { + _u.mutation.SetRequired(v) + return _u +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableRequired(v *bool) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetRequired(*v) + } + return _u +} + +// SetValidation sets the "validation" field. +func (_u *UserAttributeDefinitionUpdate) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpdate { + _u.mutation.SetValidation(v) + return _u +} + +// SetPlaceholder sets the "placeholder" field. +func (_u *UserAttributeDefinitionUpdate) SetPlaceholder(v string) *UserAttributeDefinitionUpdate { + _u.mutation.SetPlaceholder(v) + return _u +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillablePlaceholder(v *string) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetPlaceholder(*v) + } + return _u +} + +// SetDisplayOrder sets the "display_order" field. +func (_u *UserAttributeDefinitionUpdate) SetDisplayOrder(v int) *UserAttributeDefinitionUpdate { + _u.mutation.ResetDisplayOrder() + _u.mutation.SetDisplayOrder(v) + return _u +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetDisplayOrder(*v) + } + return _u +} + +// AddDisplayOrder adds value to the "display_order" field. +func (_u *UserAttributeDefinitionUpdate) AddDisplayOrder(v int) *UserAttributeDefinitionUpdate { + _u.mutation.AddDisplayOrder(v) + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *UserAttributeDefinitionUpdate) SetEnabled(v bool) *UserAttributeDefinitionUpdate { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdate) SetNillableEnabled(v *bool) *UserAttributeDefinitionUpdate { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_u *UserAttributeDefinitionUpdate) AddValueIDs(ids ...int64) *UserAttributeDefinitionUpdate { + _u.mutation.AddValueIDs(ids...) + return _u +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdate) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_u *UserAttributeDefinitionUpdate) Mutation() *UserAttributeDefinitionMutation { + return _u.mutation +} + +// ClearValues clears all "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdate) ClearValues() *UserAttributeDefinitionUpdate { + _u.mutation.ClearValues() + return _u +} + +// RemoveValueIDs removes the "values" edge to UserAttributeValue entities by IDs. +func (_u *UserAttributeDefinitionUpdate) RemoveValueIDs(ids ...int64) *UserAttributeDefinitionUpdate { + _u.mutation.RemoveValueIDs(ids...) + return _u +} + +// RemoveValues removes "values" edges to UserAttributeValue entities. +func (_u *UserAttributeDefinitionUpdate) RemoveValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveValueIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAttributeDefinitionUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAttributeDefinitionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeDefinitionUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if userattributedefinition.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeDefinitionUpdate) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if v, ok := _u.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + return nil +} + +func (_u *UserAttributeDefinitionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(userattributedefinition.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedOptions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, userattributedefinition.FieldOptions, value) + }) + } + if value, ok := _u.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + } + if value, ok := _u.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + } + if value, ok := _u.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + } + if value, ok := _u.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDisplayOrder(); ok { + _spec.AddField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + } + if _u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedValuesIDs(); len(nodes) > 0 && !_u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributedefinition.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAttributeDefinitionUpdateOne is the builder for updating a single UserAttributeDefinition entity. +type UserAttributeDefinitionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAttributeDefinitionMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeDefinitionUpdateOne) SetUpdatedAt(v time.Time) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDeletedAt(v time.Time) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDeletedAt(v *time.Time) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserAttributeDefinitionUpdateOne) ClearDeletedAt() *UserAttributeDefinitionUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetKey sets the "key" field. +func (_u *UserAttributeDefinitionUpdateOne) SetKey(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetKey(v) + return _u +} + +// SetNillableKey sets the "key" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableKey(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetKey(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *UserAttributeDefinitionUpdateOne) SetName(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableName(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetDescription sets the "description" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDescription(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetDescription(v) + return _u +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDescription(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDescription(*v) + } + return _u +} + +// SetType sets the "type" field. +func (_u *UserAttributeDefinitionUpdateOne) SetType(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetType(v) + return _u +} + +// SetNillableType sets the "type" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableType(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetType(*v) + } + return _u +} + +// SetOptions sets the "options" field. +func (_u *UserAttributeDefinitionUpdateOne) SetOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetOptions(v) + return _u +} + +// AppendOptions appends value to the "options" field. +func (_u *UserAttributeDefinitionUpdateOne) AppendOptions(v []map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.AppendOptions(v) + return _u +} + +// SetRequired sets the "required" field. +func (_u *UserAttributeDefinitionUpdateOne) SetRequired(v bool) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetRequired(v) + return _u +} + +// SetNillableRequired sets the "required" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableRequired(v *bool) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetRequired(*v) + } + return _u +} + +// SetValidation sets the "validation" field. +func (_u *UserAttributeDefinitionUpdateOne) SetValidation(v map[string]interface{}) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetValidation(v) + return _u +} + +// SetPlaceholder sets the "placeholder" field. +func (_u *UserAttributeDefinitionUpdateOne) SetPlaceholder(v string) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetPlaceholder(v) + return _u +} + +// SetNillablePlaceholder sets the "placeholder" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillablePlaceholder(v *string) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetPlaceholder(*v) + } + return _u +} + +// SetDisplayOrder sets the "display_order" field. +func (_u *UserAttributeDefinitionUpdateOne) SetDisplayOrder(v int) *UserAttributeDefinitionUpdateOne { + _u.mutation.ResetDisplayOrder() + _u.mutation.SetDisplayOrder(v) + return _u +} + +// SetNillableDisplayOrder sets the "display_order" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableDisplayOrder(v *int) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetDisplayOrder(*v) + } + return _u +} + +// AddDisplayOrder adds value to the "display_order" field. +func (_u *UserAttributeDefinitionUpdateOne) AddDisplayOrder(v int) *UserAttributeDefinitionUpdateOne { + _u.mutation.AddDisplayOrder(v) + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *UserAttributeDefinitionUpdateOne) SetEnabled(v bool) *UserAttributeDefinitionUpdateOne { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *UserAttributeDefinitionUpdateOne) SetNillableEnabled(v *bool) *UserAttributeDefinitionUpdateOne { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// AddValueIDs adds the "values" edge to the UserAttributeValue entity by IDs. +func (_u *UserAttributeDefinitionUpdateOne) AddValueIDs(ids ...int64) *UserAttributeDefinitionUpdateOne { + _u.mutation.AddValueIDs(ids...) + return _u +} + +// AddValues adds the "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdateOne) AddValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddValueIDs(ids...) +} + +// Mutation returns the UserAttributeDefinitionMutation object of the builder. +func (_u *UserAttributeDefinitionUpdateOne) Mutation() *UserAttributeDefinitionMutation { + return _u.mutation +} + +// ClearValues clears all "values" edges to the UserAttributeValue entity. +func (_u *UserAttributeDefinitionUpdateOne) ClearValues() *UserAttributeDefinitionUpdateOne { + _u.mutation.ClearValues() + return _u +} + +// RemoveValueIDs removes the "values" edge to UserAttributeValue entities by IDs. +func (_u *UserAttributeDefinitionUpdateOne) RemoveValueIDs(ids ...int64) *UserAttributeDefinitionUpdateOne { + _u.mutation.RemoveValueIDs(ids...) + return _u +} + +// RemoveValues removes "values" edges to UserAttributeValue entities. +func (_u *UserAttributeDefinitionUpdateOne) RemoveValues(v ...*UserAttributeValue) *UserAttributeDefinitionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveValueIDs(ids...) +} + +// Where appends a list predicates to the UserAttributeDefinitionUpdate builder. +func (_u *UserAttributeDefinitionUpdateOne) Where(ps ...predicate.UserAttributeDefinition) *UserAttributeDefinitionUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAttributeDefinitionUpdateOne) Select(field string, fields ...string) *UserAttributeDefinitionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAttributeDefinition entity. +func (_u *UserAttributeDefinitionUpdateOne) Save(ctx context.Context) (*UserAttributeDefinition, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdateOne) SaveX(ctx context.Context) *UserAttributeDefinition { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAttributeDefinitionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeDefinitionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeDefinitionUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if userattributedefinition.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized userattributedefinition.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := userattributedefinition.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeDefinitionUpdateOne) check() error { + if v, ok := _u.mutation.Key(); ok { + if err := userattributedefinition.KeyValidator(v); err != nil { + return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.key": %w`, err)} + } + } + if v, ok := _u.mutation.Name(); ok { + if err := userattributedefinition.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.name": %w`, err)} + } + } + if v, ok := _u.mutation.GetType(); ok { + if err := userattributedefinition.TypeValidator(v); err != nil { + return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.type": %w`, err)} + } + } + if v, ok := _u.mutation.Placeholder(); ok { + if err := userattributedefinition.PlaceholderValidator(v); err != nil { + return &ValidationError{Name: "placeholder", err: fmt.Errorf(`ent: validator failed for field "UserAttributeDefinition.placeholder": %w`, err)} + } + } + return nil +} + +func (_u *UserAttributeDefinitionUpdateOne) sqlSave(ctx context.Context) (_node *UserAttributeDefinition, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributedefinition.Table, userattributedefinition.Columns, sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserAttributeDefinition.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributedefinition.FieldID) + for _, f := range fields { + if !userattributedefinition.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != userattributedefinition.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributedefinition.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(userattributedefinition.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(userattributedefinition.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.Key(); ok { + _spec.SetField(userattributedefinition.FieldKey, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(userattributedefinition.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.Description(); ok { + _spec.SetField(userattributedefinition.FieldDescription, field.TypeString, value) + } + if value, ok := _u.mutation.GetType(); ok { + _spec.SetField(userattributedefinition.FieldType, field.TypeString, value) + } + if value, ok := _u.mutation.Options(); ok { + _spec.SetField(userattributedefinition.FieldOptions, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedOptions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, userattributedefinition.FieldOptions, value) + }) + } + if value, ok := _u.mutation.Required(); ok { + _spec.SetField(userattributedefinition.FieldRequired, field.TypeBool, value) + } + if value, ok := _u.mutation.Validation(); ok { + _spec.SetField(userattributedefinition.FieldValidation, field.TypeJSON, value) + } + if value, ok := _u.mutation.Placeholder(); ok { + _spec.SetField(userattributedefinition.FieldPlaceholder, field.TypeString, value) + } + if value, ok := _u.mutation.DisplayOrder(); ok { + _spec.SetField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedDisplayOrder(); ok { + _spec.AddField(userattributedefinition.FieldDisplayOrder, field.TypeInt, value) + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(userattributedefinition.FieldEnabled, field.TypeBool, value) + } + if _u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedValuesIDs(); len(nodes) > 0 && !_u.mutation.ValuesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.ValuesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: userattributedefinition.ValuesTable, + Columns: []string{userattributedefinition.ValuesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAttributeDefinition{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributedefinition.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/userattributevalue.go b/backend/ent/userattributevalue.go new file mode 100644 index 00000000..8dced925 --- /dev/null +++ b/backend/ent/userattributevalue.go @@ -0,0 +1,198 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValue is the model entity for the UserAttributeValue schema. +type UserAttributeValue struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // AttributeID holds the value of the "attribute_id" field. + AttributeID int64 `json:"attribute_id,omitempty"` + // Value holds the value of the "value" field. + Value string `json:"value,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserAttributeValueQuery when eager-loading is set. + Edges UserAttributeValueEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserAttributeValueEdges holds the relations/edges for other nodes in the graph. +type UserAttributeValueEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Definition holds the value of the definition edge. + Definition *UserAttributeDefinition `json:"definition,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAttributeValueEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// DefinitionOrErr returns the Definition value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserAttributeValueEdges) DefinitionOrErr() (*UserAttributeDefinition, error) { + if e.Definition != nil { + return e.Definition, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: userattributedefinition.Label} + } + return nil, &NotLoadedError{edge: "definition"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserAttributeValue) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case userattributevalue.FieldID, userattributevalue.FieldUserID, userattributevalue.FieldAttributeID: + values[i] = new(sql.NullInt64) + case userattributevalue.FieldValue: + values[i] = new(sql.NullString) + case userattributevalue.FieldCreatedAt, userattributevalue.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserAttributeValue fields. +func (_m *UserAttributeValue) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case userattributevalue.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case userattributevalue.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case userattributevalue.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case userattributevalue.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case userattributevalue.FieldAttributeID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field attribute_id", values[i]) + } else if value.Valid { + _m.AttributeID = value.Int64 + } + case userattributevalue.FieldValue: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field value", values[i]) + } else if value.Valid { + _m.Value = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// GetValue returns the ent.Value that was dynamically selected and assigned to the UserAttributeValue. +// This includes values selected through modifiers, order, etc. +func (_m *UserAttributeValue) GetValue(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserAttributeValue entity. +func (_m *UserAttributeValue) QueryUser() *UserQuery { + return NewUserAttributeValueClient(_m.config).QueryUser(_m) +} + +// QueryDefinition queries the "definition" edge of the UserAttributeValue entity. +func (_m *UserAttributeValue) QueryDefinition() *UserAttributeDefinitionQuery { + return NewUserAttributeValueClient(_m.config).QueryDefinition(_m) +} + +// Update returns a builder for updating this UserAttributeValue. +// Note that you need to call UserAttributeValue.Unwrap() before calling this method if this UserAttributeValue +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserAttributeValue) Update() *UserAttributeValueUpdateOne { + return NewUserAttributeValueClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserAttributeValue entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserAttributeValue) Unwrap() *UserAttributeValue { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserAttributeValue is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserAttributeValue) String() string { + var builder strings.Builder + builder.WriteString("UserAttributeValue(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("attribute_id=") + builder.WriteString(fmt.Sprintf("%v", _m.AttributeID)) + builder.WriteString(", ") + builder.WriteString("value=") + builder.WriteString(_m.Value) + builder.WriteByte(')') + return builder.String() +} + +// UserAttributeValues is a parsable slice of UserAttributeValue. +type UserAttributeValues []*UserAttributeValue diff --git a/backend/ent/userattributevalue/userattributevalue.go b/backend/ent/userattributevalue/userattributevalue.go new file mode 100644 index 00000000..b8bb5842 --- /dev/null +++ b/backend/ent/userattributevalue/userattributevalue.go @@ -0,0 +1,139 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributevalue + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the userattributevalue type in the database. + Label = "user_attribute_value" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldAttributeID holds the string denoting the attribute_id field in the database. + FieldAttributeID = "attribute_id" + // FieldValue holds the string denoting the value field in the database. + FieldValue = "value" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeDefinition holds the string denoting the definition edge name in mutations. + EdgeDefinition = "definition" + // Table holds the table name of the userattributevalue in the database. + Table = "user_attribute_values" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_attribute_values" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // DefinitionTable is the table that holds the definition relation/edge. + DefinitionTable = "user_attribute_values" + // DefinitionInverseTable is the table name for the UserAttributeDefinition entity. + // It exists in this package in order to avoid circular dependency with the "userattributedefinition" package. + DefinitionInverseTable = "user_attribute_definitions" + // DefinitionColumn is the table column denoting the definition relation/edge. + DefinitionColumn = "attribute_id" +) + +// Columns holds all SQL columns for userattributevalue fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldUserID, + FieldAttributeID, + FieldValue, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultValue holds the default value on creation for the "value" field. + DefaultValue string +) + +// OrderOption defines the ordering options for the UserAttributeValue queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByAttributeID orders the results by the attribute_id field. +func ByAttributeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAttributeID, opts...).ToFunc() +} + +// ByValue orders the results by the value field. +func ByValue(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldValue, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByDefinitionField orders the results by definition field. +func ByDefinitionField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newDefinitionStep(), sql.OrderByField(field, opts...)) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newDefinitionStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(DefinitionInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DefinitionTable, DefinitionColumn), + ) +} diff --git a/backend/ent/userattributevalue/where.go b/backend/ent/userattributevalue/where.go new file mode 100644 index 00000000..43c3213e --- /dev/null +++ b/backend/ent/userattributevalue/where.go @@ -0,0 +1,327 @@ +// Code generated by ent, DO NOT EDIT. + +package userattributevalue + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUserID, v)) +} + +// AttributeID applies equality check predicate on the "attribute_id" field. It's identical to AttributeIDEQ. +func AttributeID(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldAttributeID, v)) +} + +// Value applies equality check predicate on the "value" field. It's identical to ValueEQ. +func Value(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldValue, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldUserID, vs...)) +} + +// AttributeIDEQ applies the EQ predicate on the "attribute_id" field. +func AttributeIDEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldAttributeID, v)) +} + +// AttributeIDNEQ applies the NEQ predicate on the "attribute_id" field. +func AttributeIDNEQ(v int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldAttributeID, v)) +} + +// AttributeIDIn applies the In predicate on the "attribute_id" field. +func AttributeIDIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldAttributeID, vs...)) +} + +// AttributeIDNotIn applies the NotIn predicate on the "attribute_id" field. +func AttributeIDNotIn(vs ...int64) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldAttributeID, vs...)) +} + +// ValueEQ applies the EQ predicate on the "value" field. +func ValueEQ(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEQ(FieldValue, v)) +} + +// ValueNEQ applies the NEQ predicate on the "value" field. +func ValueNEQ(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNEQ(FieldValue, v)) +} + +// ValueIn applies the In predicate on the "value" field. +func ValueIn(vs ...string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldIn(FieldValue, vs...)) +} + +// ValueNotIn applies the NotIn predicate on the "value" field. +func ValueNotIn(vs ...string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldNotIn(FieldValue, vs...)) +} + +// ValueGT applies the GT predicate on the "value" field. +func ValueGT(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGT(FieldValue, v)) +} + +// ValueGTE applies the GTE predicate on the "value" field. +func ValueGTE(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldGTE(FieldValue, v)) +} + +// ValueLT applies the LT predicate on the "value" field. +func ValueLT(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLT(FieldValue, v)) +} + +// ValueLTE applies the LTE predicate on the "value" field. +func ValueLTE(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldLTE(FieldValue, v)) +} + +// ValueContains applies the Contains predicate on the "value" field. +func ValueContains(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldContains(FieldValue, v)) +} + +// ValueHasPrefix applies the HasPrefix predicate on the "value" field. +func ValueHasPrefix(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldHasPrefix(FieldValue, v)) +} + +// ValueHasSuffix applies the HasSuffix predicate on the "value" field. +func ValueHasSuffix(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldHasSuffix(FieldValue, v)) +} + +// ValueEqualFold applies the EqualFold predicate on the "value" field. +func ValueEqualFold(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldEqualFold(FieldValue, v)) +} + +// ValueContainsFold applies the ContainsFold predicate on the "value" field. +func ValueContainsFold(v string) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.FieldContainsFold(FieldValue, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasDefinition applies the HasEdge predicate on the "definition" edge. +func HasDefinition() predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, DefinitionTable, DefinitionColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasDefinitionWith applies the HasEdge predicate on the "definition" edge with a given conditions (other predicates). +func HasDefinitionWith(preds ...predicate.UserAttributeDefinition) predicate.UserAttributeValue { + return predicate.UserAttributeValue(func(s *sql.Selector) { + step := newDefinitionStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserAttributeValue) predicate.UserAttributeValue { + return predicate.UserAttributeValue(sql.NotPredicates(p)) +} diff --git a/backend/ent/userattributevalue_create.go b/backend/ent/userattributevalue_create.go new file mode 100644 index 00000000..c52481dc --- /dev/null +++ b/backend/ent/userattributevalue_create.go @@ -0,0 +1,731 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueCreate is the builder for creating a UserAttributeValue entity. +type UserAttributeValueCreate struct { + config + mutation *UserAttributeValueMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserAttributeValueCreate) SetCreatedAt(v time.Time) *UserAttributeValueCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableCreatedAt(v *time.Time) *UserAttributeValueCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserAttributeValueCreate) SetUpdatedAt(v time.Time) *UserAttributeValueCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableUpdatedAt(v *time.Time) *UserAttributeValueCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *UserAttributeValueCreate) SetUserID(v int64) *UserAttributeValueCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetAttributeID sets the "attribute_id" field. +func (_c *UserAttributeValueCreate) SetAttributeID(v int64) *UserAttributeValueCreate { + _c.mutation.SetAttributeID(v) + return _c +} + +// SetValue sets the "value" field. +func (_c *UserAttributeValueCreate) SetValue(v string) *UserAttributeValueCreate { + _c.mutation.SetValue(v) + return _c +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_c *UserAttributeValueCreate) SetNillableValue(v *string) *UserAttributeValueCreate { + if v != nil { + _c.SetValue(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserAttributeValueCreate) SetUser(v *User) *UserAttributeValueCreate { + return _c.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_c *UserAttributeValueCreate) SetDefinitionID(id int64) *UserAttributeValueCreate { + _c.mutation.SetDefinitionID(id) + return _c +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_c *UserAttributeValueCreate) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueCreate { + return _c.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_c *UserAttributeValueCreate) Mutation() *UserAttributeValueMutation { + return _c.mutation +} + +// Save creates the UserAttributeValue in the database. +func (_c *UserAttributeValueCreate) Save(ctx context.Context) (*UserAttributeValue, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserAttributeValueCreate) SaveX(ctx context.Context) *UserAttributeValue { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeValueCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeValueCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserAttributeValueCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := userattributevalue.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := userattributevalue.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Value(); !ok { + v := userattributevalue.DefaultValue + _c.mutation.SetValue(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserAttributeValueCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserAttributeValue.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserAttributeValue.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserAttributeValue.user_id"`)} + } + if _, ok := _c.mutation.AttributeID(); !ok { + return &ValidationError{Name: "attribute_id", err: errors.New(`ent: missing required field "UserAttributeValue.attribute_id"`)} + } + if _, ok := _c.mutation.Value(); !ok { + return &ValidationError{Name: "value", err: errors.New(`ent: missing required field "UserAttributeValue.value"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserAttributeValue.user"`)} + } + if len(_c.mutation.DefinitionIDs()) == 0 { + return &ValidationError{Name: "definition", err: errors.New(`ent: missing required edge "UserAttributeValue.definition"`)} + } + return nil +} + +func (_c *UserAttributeValueCreate) sqlSave(ctx context.Context) (*UserAttributeValue, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserAttributeValueCreate) createSpec() (*UserAttributeValue, *sqlgraph.CreateSpec) { + var ( + _node = &UserAttributeValue{config: _c.config} + _spec = sqlgraph.NewCreateSpec(userattributevalue.Table, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(userattributevalue.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + _node.Value = value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AttributeID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeValue.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeValueUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeValueCreate) OnConflict(opts ...sql.ConflictOption) *UserAttributeValueUpsertOne { + _c.conflict = opts + return &UserAttributeValueUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeValueCreate) OnConflictColumns(columns ...string) *UserAttributeValueUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeValueUpsertOne{ + create: _c, + } +} + +type ( + // UserAttributeValueUpsertOne is the builder for "upsert"-ing + // one UserAttributeValue node. + UserAttributeValueUpsertOne struct { + create *UserAttributeValueCreate + } + + // UserAttributeValueUpsert is the "OnConflict" setter. + UserAttributeValueUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsert) SetUpdatedAt(v time.Time) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateUpdatedAt() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldUpdatedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsert) SetUserID(v int64) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateUserID() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldUserID) + return u +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsert) SetAttributeID(v int64) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldAttributeID, v) + return u +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateAttributeID() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldAttributeID) + return u +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsert) SetValue(v string) *UserAttributeValueUpsert { + u.Set(userattributevalue.FieldValue, v) + return u +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsert) UpdateValue() *UserAttributeValueUpsert { + u.SetExcluded(userattributevalue.FieldValue) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeValueUpsertOne) UpdateNewValues() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(userattributevalue.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeValueUpsertOne) Ignore() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeValueUpsertOne) DoNothing() *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeValueCreate.OnConflict +// documentation for more info. +func (u *UserAttributeValueUpsertOne) Update(set func(*UserAttributeValueUpsert)) *UserAttributeValueUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeValueUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsertOne) SetUpdatedAt(v time.Time) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateUpdatedAt() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsertOne) SetUserID(v int64) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateUserID() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUserID() + }) +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsertOne) SetAttributeID(v int64) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetAttributeID(v) + }) +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateAttributeID() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateAttributeID() + }) +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsertOne) SetValue(v string) *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsertOne) UpdateValue() *UserAttributeValueUpsertOne { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateValue() + }) +} + +// Exec executes the query. +func (u *UserAttributeValueUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeValueCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeValueUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserAttributeValueUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserAttributeValueUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserAttributeValueCreateBulk is the builder for creating many UserAttributeValue entities in bulk. +type UserAttributeValueCreateBulk struct { + config + err error + builders []*UserAttributeValueCreate + conflict []sql.ConflictOption +} + +// Save creates the UserAttributeValue entities in the database. +func (_c *UserAttributeValueCreateBulk) Save(ctx context.Context) ([]*UserAttributeValue, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserAttributeValue, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserAttributeValueMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserAttributeValueCreateBulk) SaveX(ctx context.Context) []*UserAttributeValue { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserAttributeValueCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserAttributeValueCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserAttributeValue.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserAttributeValueUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserAttributeValueCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserAttributeValueUpsertBulk { + _c.conflict = opts + return &UserAttributeValueUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserAttributeValueCreateBulk) OnConflictColumns(columns ...string) *UserAttributeValueUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserAttributeValueUpsertBulk{ + create: _c, + } +} + +// UserAttributeValueUpsertBulk is the builder for "upsert"-ing +// a bulk of UserAttributeValue nodes. +type UserAttributeValueUpsertBulk struct { + create *UserAttributeValueCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserAttributeValueUpsertBulk) UpdateNewValues() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(userattributevalue.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserAttributeValue.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserAttributeValueUpsertBulk) Ignore() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserAttributeValueUpsertBulk) DoNothing() *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserAttributeValueCreateBulk.OnConflict +// documentation for more info. +func (u *UserAttributeValueUpsertBulk) Update(set func(*UserAttributeValueUpsert)) *UserAttributeValueUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserAttributeValueUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserAttributeValueUpsertBulk) SetUpdatedAt(v time.Time) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateUpdatedAt() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserAttributeValueUpsertBulk) SetUserID(v int64) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateUserID() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateUserID() + }) +} + +// SetAttributeID sets the "attribute_id" field. +func (u *UserAttributeValueUpsertBulk) SetAttributeID(v int64) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetAttributeID(v) + }) +} + +// UpdateAttributeID sets the "attribute_id" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateAttributeID() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateAttributeID() + }) +} + +// SetValue sets the "value" field. +func (u *UserAttributeValueUpsertBulk) SetValue(v string) *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.SetValue(v) + }) +} + +// UpdateValue sets the "value" field to the value that was provided on create. +func (u *UserAttributeValueUpsertBulk) UpdateValue() *UserAttributeValueUpsertBulk { + return u.Update(func(s *UserAttributeValueUpsert) { + s.UpdateValue() + }) +} + +// Exec executes the query. +func (u *UserAttributeValueUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserAttributeValueCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserAttributeValueCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserAttributeValueUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributevalue_delete.go b/backend/ent/userattributevalue_delete.go new file mode 100644 index 00000000..2805e49f --- /dev/null +++ b/backend/ent/userattributevalue_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueDelete is the builder for deleting a UserAttributeValue entity. +type UserAttributeValueDelete struct { + config + hooks []Hook + mutation *UserAttributeValueMutation +} + +// Where appends a list predicates to the UserAttributeValueDelete builder. +func (_d *UserAttributeValueDelete) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserAttributeValueDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeValueDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserAttributeValueDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(userattributevalue.Table, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserAttributeValueDeleteOne is the builder for deleting a single UserAttributeValue entity. +type UserAttributeValueDeleteOne struct { + _d *UserAttributeValueDelete +} + +// Where appends a list predicates to the UserAttributeValueDelete builder. +func (_d *UserAttributeValueDeleteOne) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserAttributeValueDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{userattributevalue.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserAttributeValueDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/userattributevalue_query.go b/backend/ent/userattributevalue_query.go new file mode 100644 index 00000000..a7c6b74a --- /dev/null +++ b/backend/ent/userattributevalue_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueQuery is the builder for querying UserAttributeValue entities. +type UserAttributeValueQuery struct { + config + ctx *QueryContext + order []userattributevalue.OrderOption + inters []Interceptor + predicates []predicate.UserAttributeValue + withUser *UserQuery + withDefinition *UserAttributeDefinitionQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserAttributeValueQuery builder. +func (_q *UserAttributeValueQuery) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserAttributeValueQuery) Limit(limit int) *UserAttributeValueQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserAttributeValueQuery) Offset(offset int) *UserAttributeValueQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserAttributeValueQuery) Unique(unique bool) *UserAttributeValueQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserAttributeValueQuery) Order(o ...userattributevalue.OrderOption) *UserAttributeValueQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserAttributeValueQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.UserTable, userattributevalue.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryDefinition chains the current query on the "definition" edge. +func (_q *UserAttributeValueQuery) QueryDefinition() *UserAttributeDefinitionQuery { + query := (&UserAttributeDefinitionClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(userattributevalue.Table, userattributevalue.FieldID, selector), + sqlgraph.To(userattributedefinition.Table, userattributedefinition.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, userattributevalue.DefinitionTable, userattributevalue.DefinitionColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserAttributeValue entity from the query. +// Returns a *NotFoundError when no UserAttributeValue was found. +func (_q *UserAttributeValueQuery) First(ctx context.Context) (*UserAttributeValue, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{userattributevalue.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserAttributeValueQuery) FirstX(ctx context.Context) *UserAttributeValue { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserAttributeValue ID from the query. +// Returns a *NotFoundError when no UserAttributeValue ID was found. +func (_q *UserAttributeValueQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{userattributevalue.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserAttributeValueQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserAttributeValue entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserAttributeValue entity is found. +// Returns a *NotFoundError when no UserAttributeValue entities are found. +func (_q *UserAttributeValueQuery) Only(ctx context.Context) (*UserAttributeValue, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{userattributevalue.Label} + default: + return nil, &NotSingularError{userattributevalue.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserAttributeValueQuery) OnlyX(ctx context.Context) *UserAttributeValue { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserAttributeValue ID in the query. +// Returns a *NotSingularError when more than one UserAttributeValue ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserAttributeValueQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{userattributevalue.Label} + default: + err = &NotSingularError{userattributevalue.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserAttributeValueQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserAttributeValues. +func (_q *UserAttributeValueQuery) All(ctx context.Context) ([]*UserAttributeValue, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserAttributeValue, *UserAttributeValueQuery]() + return withInterceptors[[]*UserAttributeValue](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserAttributeValueQuery) AllX(ctx context.Context) []*UserAttributeValue { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserAttributeValue IDs. +func (_q *UserAttributeValueQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(userattributevalue.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserAttributeValueQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserAttributeValueQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserAttributeValueQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserAttributeValueQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserAttributeValueQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserAttributeValueQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserAttributeValueQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserAttributeValueQuery) Clone() *UserAttributeValueQuery { + if _q == nil { + return nil + } + return &UserAttributeValueQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]userattributevalue.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserAttributeValue{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withDefinition: _q.withDefinition.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeValueQuery) WithUser(opts ...func(*UserQuery)) *UserAttributeValueQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithDefinition tells the query-builder to eager-load the nodes that are connected to +// the "definition" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserAttributeValueQuery) WithDefinition(opts ...func(*UserAttributeDefinitionQuery)) *UserAttributeValueQuery { + query := (&UserAttributeDefinitionClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withDefinition = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserAttributeValue.Query(). +// GroupBy(userattributevalue.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserAttributeValueQuery) GroupBy(field string, fields ...string) *UserAttributeValueGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserAttributeValueGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = userattributevalue.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserAttributeValue.Query(). +// Select(userattributevalue.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserAttributeValueQuery) Select(fields ...string) *UserAttributeValueSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserAttributeValueSelect{UserAttributeValueQuery: _q} + sbuild.label = userattributevalue.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserAttributeValueSelect configured with the given aggregations. +func (_q *UserAttributeValueQuery) Aggregate(fns ...AggregateFunc) *UserAttributeValueSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserAttributeValueQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !userattributevalue.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserAttributeValueQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserAttributeValue, error) { + var ( + nodes = []*UserAttributeValue{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withUser != nil, + _q.withDefinition != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserAttributeValue).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserAttributeValue{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserAttributeValue, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withDefinition; query != nil { + if err := _q.loadDefinition(ctx, query, nodes, nil, + func(n *UserAttributeValue, e *UserAttributeDefinition) { n.Edges.Definition = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserAttributeValueQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserAttributeValue, init func(*UserAttributeValue), assign func(*UserAttributeValue, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAttributeValue) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserAttributeValueQuery) loadDefinition(ctx context.Context, query *UserAttributeDefinitionQuery, nodes []*UserAttributeValue, init func(*UserAttributeValue), assign func(*UserAttributeValue, *UserAttributeDefinition)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserAttributeValue) + for i := range nodes { + fk := nodes[i].AttributeID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(userattributedefinition.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "attribute_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *UserAttributeValueQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserAttributeValueQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributevalue.FieldID) + for i := range fields { + if fields[i] != userattributevalue.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(userattributevalue.FieldUserID) + } + if _q.withDefinition != nil { + _spec.Node.AddColumnOnce(userattributevalue.FieldAttributeID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(userattributevalue.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = userattributevalue.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAttributeValueQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeValueQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAttributeValueQuery) ForShare(opts ...sql.LockOption) *UserAttributeValueQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UserAttributeValueGroupBy is the group-by builder for UserAttributeValue entities. +type UserAttributeValueGroupBy struct { + selector + build *UserAttributeValueQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserAttributeValueGroupBy) Aggregate(fns ...AggregateFunc) *UserAttributeValueGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserAttributeValueGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeValueQuery, *UserAttributeValueGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserAttributeValueGroupBy) sqlScan(ctx context.Context, root *UserAttributeValueQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserAttributeValueSelect is the builder for selecting fields of UserAttributeValue entities. +type UserAttributeValueSelect struct { + *UserAttributeValueQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserAttributeValueSelect) Aggregate(fns ...AggregateFunc) *UserAttributeValueSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserAttributeValueSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserAttributeValueQuery, *UserAttributeValueSelect](ctx, _s.UserAttributeValueQuery, _s, _s.inters, v) +} + +func (_s *UserAttributeValueSelect) sqlScan(ctx context.Context, root *UserAttributeValueQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/userattributevalue_update.go b/backend/ent/userattributevalue_update.go new file mode 100644 index 00000000..7dfce024 --- /dev/null +++ b/backend/ent/userattributevalue_update.go @@ -0,0 +1,504 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" +) + +// UserAttributeValueUpdate is the builder for updating UserAttributeValue entities. +type UserAttributeValueUpdate struct { + config + hooks []Hook + mutation *UserAttributeValueMutation +} + +// Where appends a list predicates to the UserAttributeValueUpdate builder. +func (_u *UserAttributeValueUpdate) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeValueUpdate) SetUpdatedAt(v time.Time) *UserAttributeValueUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAttributeValueUpdate) SetUserID(v int64) *UserAttributeValueUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableUserID(v *int64) *UserAttributeValueUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAttributeID sets the "attribute_id" field. +func (_u *UserAttributeValueUpdate) SetAttributeID(v int64) *UserAttributeValueUpdate { + _u.mutation.SetAttributeID(v) + return _u +} + +// SetNillableAttributeID sets the "attribute_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableAttributeID(v *int64) *UserAttributeValueUpdate { + if v != nil { + _u.SetAttributeID(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *UserAttributeValueUpdate) SetValue(v string) *UserAttributeValueUpdate { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *UserAttributeValueUpdate) SetNillableValue(v *string) *UserAttributeValueUpdate { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAttributeValueUpdate) SetUser(v *User) *UserAttributeValueUpdate { + return _u.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_u *UserAttributeValueUpdate) SetDefinitionID(id int64) *UserAttributeValueUpdate { + _u.mutation.SetDefinitionID(id) + return _u +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdate) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueUpdate { + return _u.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_u *UserAttributeValueUpdate) Mutation() *UserAttributeValueMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAttributeValueUpdate) ClearUser() *UserAttributeValueUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdate) ClearDefinition() *UserAttributeValueUpdate { + _u.mutation.ClearDefinition() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserAttributeValueUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeValueUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserAttributeValueUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeValueUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeValueUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := userattributevalue.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeValueUpdate) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.user"`) + } + if _u.mutation.DefinitionCleared() && len(_u.mutation.DefinitionIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.definition"`) + } + return nil +} + +func (_u *UserAttributeValueUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.DefinitionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributevalue.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserAttributeValueUpdateOne is the builder for updating a single UserAttributeValue entity. +type UserAttributeValueUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserAttributeValueMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserAttributeValueUpdateOne) SetUpdatedAt(v time.Time) *UserAttributeValueUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserAttributeValueUpdateOne) SetUserID(v int64) *UserAttributeValueUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableUserID(v *int64) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetAttributeID sets the "attribute_id" field. +func (_u *UserAttributeValueUpdateOne) SetAttributeID(v int64) *UserAttributeValueUpdateOne { + _u.mutation.SetAttributeID(v) + return _u +} + +// SetNillableAttributeID sets the "attribute_id" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableAttributeID(v *int64) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetAttributeID(*v) + } + return _u +} + +// SetValue sets the "value" field. +func (_u *UserAttributeValueUpdateOne) SetValue(v string) *UserAttributeValueUpdateOne { + _u.mutation.SetValue(v) + return _u +} + +// SetNillableValue sets the "value" field if the given value is not nil. +func (_u *UserAttributeValueUpdateOne) SetNillableValue(v *string) *UserAttributeValueUpdateOne { + if v != nil { + _u.SetValue(*v) + } + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserAttributeValueUpdateOne) SetUser(v *User) *UserAttributeValueUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetDefinitionID sets the "definition" edge to the UserAttributeDefinition entity by ID. +func (_u *UserAttributeValueUpdateOne) SetDefinitionID(id int64) *UserAttributeValueUpdateOne { + _u.mutation.SetDefinitionID(id) + return _u +} + +// SetDefinition sets the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdateOne) SetDefinition(v *UserAttributeDefinition) *UserAttributeValueUpdateOne { + return _u.SetDefinitionID(v.ID) +} + +// Mutation returns the UserAttributeValueMutation object of the builder. +func (_u *UserAttributeValueUpdateOne) Mutation() *UserAttributeValueMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserAttributeValueUpdateOne) ClearUser() *UserAttributeValueUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearDefinition clears the "definition" edge to the UserAttributeDefinition entity. +func (_u *UserAttributeValueUpdateOne) ClearDefinition() *UserAttributeValueUpdateOne { + _u.mutation.ClearDefinition() + return _u +} + +// Where appends a list predicates to the UserAttributeValueUpdate builder. +func (_u *UserAttributeValueUpdateOne) Where(ps ...predicate.UserAttributeValue) *UserAttributeValueUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserAttributeValueUpdateOne) Select(field string, fields ...string) *UserAttributeValueUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserAttributeValue entity. +func (_u *UserAttributeValueUpdateOne) Save(ctx context.Context) (*UserAttributeValue, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserAttributeValueUpdateOne) SaveX(ctx context.Context) *UserAttributeValue { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserAttributeValueUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserAttributeValueUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserAttributeValueUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := userattributevalue.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserAttributeValueUpdateOne) check() error { + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.user"`) + } + if _u.mutation.DefinitionCleared() && len(_u.mutation.DefinitionIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserAttributeValue.definition"`) + } + return nil +} + +func (_u *UserAttributeValueUpdateOne) sqlSave(ctx context.Context) (_node *UserAttributeValue, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(userattributevalue.Table, userattributevalue.Columns, sqlgraph.NewFieldSpec(userattributevalue.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserAttributeValue.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, userattributevalue.FieldID) + for _, f := range fields { + if !userattributevalue.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != userattributevalue.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(userattributevalue.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Value(); ok { + _spec.SetField(userattributevalue.FieldValue, field.TypeString, value) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.UserTable, + Columns: []string{userattributevalue.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.DefinitionCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.DefinitionIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: userattributevalue.DefinitionTable, + Columns: []string{userattributevalue.DefinitionColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(userattributedefinition.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserAttributeValue{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{userattributevalue.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/usersubscription.go b/backend/ent/usersubscription.go new file mode 100644 index 00000000..01beb2fc --- /dev/null +++ b/backend/ent/usersubscription.go @@ -0,0 +1,384 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscription is the model entity for the UserSubscription schema. +type UserSubscription struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // DeletedAt holds the value of the "deleted_at" field. + DeletedAt *time.Time `json:"deleted_at,omitempty"` + // UserID holds the value of the "user_id" field. + UserID int64 `json:"user_id,omitempty"` + // GroupID holds the value of the "group_id" field. + GroupID int64 `json:"group_id,omitempty"` + // StartsAt holds the value of the "starts_at" field. + StartsAt time.Time `json:"starts_at,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // DailyWindowStart holds the value of the "daily_window_start" field. + DailyWindowStart *time.Time `json:"daily_window_start,omitempty"` + // WeeklyWindowStart holds the value of the "weekly_window_start" field. + WeeklyWindowStart *time.Time `json:"weekly_window_start,omitempty"` + // MonthlyWindowStart holds the value of the "monthly_window_start" field. + MonthlyWindowStart *time.Time `json:"monthly_window_start,omitempty"` + // DailyUsageUsd holds the value of the "daily_usage_usd" field. + DailyUsageUsd float64 `json:"daily_usage_usd,omitempty"` + // WeeklyUsageUsd holds the value of the "weekly_usage_usd" field. + WeeklyUsageUsd float64 `json:"weekly_usage_usd,omitempty"` + // MonthlyUsageUsd holds the value of the "monthly_usage_usd" field. + MonthlyUsageUsd float64 `json:"monthly_usage_usd,omitempty"` + // AssignedBy holds the value of the "assigned_by" field. + AssignedBy *int64 `json:"assigned_by,omitempty"` + // AssignedAt holds the value of the "assigned_at" field. + AssignedAt time.Time `json:"assigned_at,omitempty"` + // Notes holds the value of the "notes" field. + Notes *string `json:"notes,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserSubscriptionQuery when eager-loading is set. + Edges UserSubscriptionEdges `json:"edges"` + selectValues sql.SelectValues +} + +// UserSubscriptionEdges holds the relations/edges for other nodes in the graph. +type UserSubscriptionEdges struct { + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // Group holds the value of the group edge. + Group *Group `json:"group,omitempty"` + // AssignedByUser holds the value of the assigned_by_user edge. + AssignedByUser *User `json:"assigned_by_user,omitempty"` + // UsageLogs holds the value of the usage_logs edge. + UsageLogs []*UsageLog `json:"usage_logs,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [4]bool +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// GroupOrErr returns the Group value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) GroupOrErr() (*Group, error) { + if e.Group != nil { + return e.Group, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: group.Label} + } + return nil, &NotLoadedError{edge: "group"} +} + +// AssignedByUserOrErr returns the AssignedByUser value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserSubscriptionEdges) AssignedByUserOrErr() (*User, error) { + if e.AssignedByUser != nil { + return e.AssignedByUser, nil + } else if e.loadedTypes[2] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "assigned_by_user"} +} + +// UsageLogsOrErr returns the UsageLogs value or an error if the edge +// was not loaded in eager-loading. +func (e UserSubscriptionEdges) UsageLogsOrErr() ([]*UsageLog, error) { + if e.loadedTypes[3] { + return e.UsageLogs, nil + } + return nil, &NotLoadedError{edge: "usage_logs"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*UserSubscription) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case usersubscription.FieldDailyUsageUsd, usersubscription.FieldWeeklyUsageUsd, usersubscription.FieldMonthlyUsageUsd: + values[i] = new(sql.NullFloat64) + case usersubscription.FieldID, usersubscription.FieldUserID, usersubscription.FieldGroupID, usersubscription.FieldAssignedBy: + values[i] = new(sql.NullInt64) + case usersubscription.FieldStatus, usersubscription.FieldNotes: + values[i] = new(sql.NullString) + case usersubscription.FieldCreatedAt, usersubscription.FieldUpdatedAt, usersubscription.FieldDeletedAt, usersubscription.FieldStartsAt, usersubscription.FieldExpiresAt, usersubscription.FieldDailyWindowStart, usersubscription.FieldWeeklyWindowStart, usersubscription.FieldMonthlyWindowStart, usersubscription.FieldAssignedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the UserSubscription fields. +func (_m *UserSubscription) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case usersubscription.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case usersubscription.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case usersubscription.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case usersubscription.FieldDeletedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field deleted_at", values[i]) + } else if value.Valid { + _m.DeletedAt = new(time.Time) + *_m.DeletedAt = value.Time + } + case usersubscription.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case usersubscription.FieldGroupID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field group_id", values[i]) + } else if value.Valid { + _m.GroupID = value.Int64 + } + case usersubscription.FieldStartsAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field starts_at", values[i]) + } else if value.Valid { + _m.StartsAt = value.Time + } + case usersubscription.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = value.Time + } + case usersubscription.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case usersubscription.FieldDailyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field daily_window_start", values[i]) + } else if value.Valid { + _m.DailyWindowStart = new(time.Time) + *_m.DailyWindowStart = value.Time + } + case usersubscription.FieldWeeklyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field weekly_window_start", values[i]) + } else if value.Valid { + _m.WeeklyWindowStart = new(time.Time) + *_m.WeeklyWindowStart = value.Time + } + case usersubscription.FieldMonthlyWindowStart: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field monthly_window_start", values[i]) + } else if value.Valid { + _m.MonthlyWindowStart = new(time.Time) + *_m.MonthlyWindowStart = value.Time + } + case usersubscription.FieldDailyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field daily_usage_usd", values[i]) + } else if value.Valid { + _m.DailyUsageUsd = value.Float64 + } + case usersubscription.FieldWeeklyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field weekly_usage_usd", values[i]) + } else if value.Valid { + _m.WeeklyUsageUsd = value.Float64 + } + case usersubscription.FieldMonthlyUsageUsd: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field monthly_usage_usd", values[i]) + } else if value.Valid { + _m.MonthlyUsageUsd = value.Float64 + } + case usersubscription.FieldAssignedBy: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field assigned_by", values[i]) + } else if value.Valid { + _m.AssignedBy = new(int64) + *_m.AssignedBy = value.Int64 + } + case usersubscription.FieldAssignedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field assigned_at", values[i]) + } else if value.Valid { + _m.AssignedAt = value.Time + } + case usersubscription.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the UserSubscription. +// This includes values selected through modifiers, order, etc. +func (_m *UserSubscription) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUser queries the "user" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryUser() *UserQuery { + return NewUserSubscriptionClient(_m.config).QueryUser(_m) +} + +// QueryGroup queries the "group" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryGroup() *GroupQuery { + return NewUserSubscriptionClient(_m.config).QueryGroup(_m) +} + +// QueryAssignedByUser queries the "assigned_by_user" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryAssignedByUser() *UserQuery { + return NewUserSubscriptionClient(_m.config).QueryAssignedByUser(_m) +} + +// QueryUsageLogs queries the "usage_logs" edge of the UserSubscription entity. +func (_m *UserSubscription) QueryUsageLogs() *UsageLogQuery { + return NewUserSubscriptionClient(_m.config).QueryUsageLogs(_m) +} + +// Update returns a builder for updating this UserSubscription. +// Note that you need to call UserSubscription.Unwrap() before calling this method if this UserSubscription +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *UserSubscription) Update() *UserSubscriptionUpdateOne { + return NewUserSubscriptionClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the UserSubscription entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *UserSubscription) Unwrap() *UserSubscription { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: UserSubscription is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *UserSubscription) String() string { + var builder strings.Builder + builder.WriteString("UserSubscription(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.DeletedAt; v != nil { + builder.WriteString("deleted_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("group_id=") + builder.WriteString(fmt.Sprintf("%v", _m.GroupID)) + builder.WriteString(", ") + builder.WriteString("starts_at=") + builder.WriteString(_m.StartsAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(_m.ExpiresAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.DailyWindowStart; v != nil { + builder.WriteString("daily_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.WeeklyWindowStart; v != nil { + builder.WriteString("weekly_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.MonthlyWindowStart; v != nil { + builder.WriteString("monthly_window_start=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("daily_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.DailyUsageUsd)) + builder.WriteString(", ") + builder.WriteString("weekly_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.WeeklyUsageUsd)) + builder.WriteString(", ") + builder.WriteString("monthly_usage_usd=") + builder.WriteString(fmt.Sprintf("%v", _m.MonthlyUsageUsd)) + builder.WriteString(", ") + if v := _m.AssignedBy; v != nil { + builder.WriteString("assigned_by=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("assigned_at=") + builder.WriteString(_m.AssignedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteByte(')') + return builder.String() +} + +// UserSubscriptions is a parsable slice of UserSubscription. +type UserSubscriptions []*UserSubscription diff --git a/backend/ent/usersubscription/usersubscription.go b/backend/ent/usersubscription/usersubscription.go new file mode 100644 index 00000000..06441646 --- /dev/null +++ b/backend/ent/usersubscription/usersubscription.go @@ -0,0 +1,306 @@ +// Code generated by ent, DO NOT EDIT. + +package usersubscription + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the usersubscription type in the database. + Label = "user_subscription" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDeletedAt holds the string denoting the deleted_at field in the database. + FieldDeletedAt = "deleted_at" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldGroupID holds the string denoting the group_id field in the database. + FieldGroupID = "group_id" + // FieldStartsAt holds the string denoting the starts_at field in the database. + FieldStartsAt = "starts_at" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldDailyWindowStart holds the string denoting the daily_window_start field in the database. + FieldDailyWindowStart = "daily_window_start" + // FieldWeeklyWindowStart holds the string denoting the weekly_window_start field in the database. + FieldWeeklyWindowStart = "weekly_window_start" + // FieldMonthlyWindowStart holds the string denoting the monthly_window_start field in the database. + FieldMonthlyWindowStart = "monthly_window_start" + // FieldDailyUsageUsd holds the string denoting the daily_usage_usd field in the database. + FieldDailyUsageUsd = "daily_usage_usd" + // FieldWeeklyUsageUsd holds the string denoting the weekly_usage_usd field in the database. + FieldWeeklyUsageUsd = "weekly_usage_usd" + // FieldMonthlyUsageUsd holds the string denoting the monthly_usage_usd field in the database. + FieldMonthlyUsageUsd = "monthly_usage_usd" + // FieldAssignedBy holds the string denoting the assigned_by field in the database. + FieldAssignedBy = "assigned_by" + // FieldAssignedAt holds the string denoting the assigned_at field in the database. + FieldAssignedAt = "assigned_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // EdgeGroup holds the string denoting the group edge name in mutations. + EdgeGroup = "group" + // EdgeAssignedByUser holds the string denoting the assigned_by_user edge name in mutations. + EdgeAssignedByUser = "assigned_by_user" + // EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations. + EdgeUsageLogs = "usage_logs" + // Table holds the table name of the usersubscription in the database. + Table = "user_subscriptions" + // UserTable is the table that holds the user relation/edge. + UserTable = "user_subscriptions" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" + // GroupTable is the table that holds the group relation/edge. + GroupTable = "user_subscriptions" + // GroupInverseTable is the table name for the Group entity. + // It exists in this package in order to avoid circular dependency with the "group" package. + GroupInverseTable = "groups" + // GroupColumn is the table column denoting the group relation/edge. + GroupColumn = "group_id" + // AssignedByUserTable is the table that holds the assigned_by_user relation/edge. + AssignedByUserTable = "user_subscriptions" + // AssignedByUserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AssignedByUserInverseTable = "users" + // AssignedByUserColumn is the table column denoting the assigned_by_user relation/edge. + AssignedByUserColumn = "assigned_by" + // UsageLogsTable is the table that holds the usage_logs relation/edge. + UsageLogsTable = "usage_logs" + // UsageLogsInverseTable is the table name for the UsageLog entity. + // It exists in this package in order to avoid circular dependency with the "usagelog" package. + UsageLogsInverseTable = "usage_logs" + // UsageLogsColumn is the table column denoting the usage_logs relation/edge. + UsageLogsColumn = "subscription_id" +) + +// Columns holds all SQL columns for usersubscription fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldDeletedAt, + FieldUserID, + FieldGroupID, + FieldStartsAt, + FieldExpiresAt, + FieldStatus, + FieldDailyWindowStart, + FieldWeeklyWindowStart, + FieldMonthlyWindowStart, + FieldDailyUsageUsd, + FieldWeeklyUsageUsd, + FieldMonthlyUsageUsd, + FieldAssignedBy, + FieldAssignedAt, + FieldNotes, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// Note that the variables below are initialized by the runtime +// package on the initialization of the application. Therefore, +// it should be imported in the main as follows: +// +// import _ "github.com/Wei-Shaw/sub2api/ent/runtime" +var ( + Hooks [1]ent.Hook + Interceptors [1]ent.Interceptor + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultDailyUsageUsd holds the default value on creation for the "daily_usage_usd" field. + DefaultDailyUsageUsd float64 + // DefaultWeeklyUsageUsd holds the default value on creation for the "weekly_usage_usd" field. + DefaultWeeklyUsageUsd float64 + // DefaultMonthlyUsageUsd holds the default value on creation for the "monthly_usage_usd" field. + DefaultMonthlyUsageUsd float64 + // DefaultAssignedAt holds the default value on creation for the "assigned_at" field. + DefaultAssignedAt func() time.Time +) + +// OrderOption defines the ordering options for the UserSubscription queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDeletedAt orders the results by the deleted_at field. +func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDeletedAt, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByGroupID orders the results by the group_id field. +func ByGroupID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldGroupID, opts...).ToFunc() +} + +// ByStartsAt orders the results by the starts_at field. +func ByStartsAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartsAt, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByDailyWindowStart orders the results by the daily_window_start field. +func ByDailyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyWindowStart, opts...).ToFunc() +} + +// ByWeeklyWindowStart orders the results by the weekly_window_start field. +func ByWeeklyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyWindowStart, opts...).ToFunc() +} + +// ByMonthlyWindowStart orders the results by the monthly_window_start field. +func ByMonthlyWindowStart(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyWindowStart, opts...).ToFunc() +} + +// ByDailyUsageUsd orders the results by the daily_usage_usd field. +func ByDailyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDailyUsageUsd, opts...).ToFunc() +} + +// ByWeeklyUsageUsd orders the results by the weekly_usage_usd field. +func ByWeeklyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWeeklyUsageUsd, opts...).ToFunc() +} + +// ByMonthlyUsageUsd orders the results by the monthly_usage_usd field. +func ByMonthlyUsageUsd(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMonthlyUsageUsd, opts...).ToFunc() +} + +// ByAssignedBy orders the results by the assigned_by field. +func ByAssignedBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAssignedBy, opts...).ToFunc() +} + +// ByAssignedAt orders the results by the assigned_at field. +func ByAssignedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAssignedAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByGroupField orders the results by group field. +func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAssignedByUserField orders the results by assigned_by_user field. +func ByAssignedByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAssignedByUserStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUsageLogsCount orders the results by usage_logs count. +func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...) + } +} + +// ByUsageLogs orders the results by usage_logs terms. +func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} +func newGroupStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(GroupInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) +} +func newAssignedByUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AssignedByUserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AssignedByUserTable, AssignedByUserColumn), + ) +} +func newUsageLogsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageLogsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) +} diff --git a/backend/ent/usersubscription/where.go b/backend/ent/usersubscription/where.go new file mode 100644 index 00000000..250e5ed5 --- /dev/null +++ b/backend/ent/usersubscription/where.go @@ -0,0 +1,978 @@ +// Code generated by ent, DO NOT EDIT. + +package usersubscription + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ. +func DeletedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDeletedAt, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUserID, v)) +} + +// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ. +func GroupID(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldGroupID, v)) +} + +// StartsAt applies equality check predicate on the "starts_at" field. It's identical to StartsAtEQ. +func StartsAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStartsAt, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldExpiresAt, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStatus, v)) +} + +// DailyWindowStart applies equality check predicate on the "daily_window_start" field. It's identical to DailyWindowStartEQ. +func DailyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyWindowStart, v)) +} + +// WeeklyWindowStart applies equality check predicate on the "weekly_window_start" field. It's identical to WeeklyWindowStartEQ. +func WeeklyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyWindowStart, v)) +} + +// MonthlyWindowStart applies equality check predicate on the "monthly_window_start" field. It's identical to MonthlyWindowStartEQ. +func MonthlyWindowStart(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyWindowStart, v)) +} + +// DailyUsageUsd applies equality check predicate on the "daily_usage_usd" field. It's identical to DailyUsageUsdEQ. +func DailyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyUsageUsd, v)) +} + +// WeeklyUsageUsd applies equality check predicate on the "weekly_usage_usd" field. It's identical to WeeklyUsageUsdEQ. +func WeeklyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyUsageUsd, v)) +} + +// MonthlyUsageUsd applies equality check predicate on the "monthly_usage_usd" field. It's identical to MonthlyUsageUsdEQ. +func MonthlyUsageUsd(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyUsageUsd, v)) +} + +// AssignedBy applies equality check predicate on the "assigned_by" field. It's identical to AssignedByEQ. +func AssignedBy(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedBy, v)) +} + +// AssignedAt applies equality check predicate on the "assigned_at" field. It's identical to AssignedAtEQ. +func AssignedAt(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DeletedAtEQ applies the EQ predicate on the "deleted_at" field. +func DeletedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDeletedAt, v)) +} + +// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field. +func DeletedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldDeletedAt, v)) +} + +// DeletedAtIn applies the In predicate on the "deleted_at" field. +func DeletedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldDeletedAt, vs...)) +} + +// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field. +func DeletedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldDeletedAt, vs...)) +} + +// DeletedAtGT applies the GT predicate on the "deleted_at" field. +func DeletedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldDeletedAt, v)) +} + +// DeletedAtGTE applies the GTE predicate on the "deleted_at" field. +func DeletedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldDeletedAt, v)) +} + +// DeletedAtLT applies the LT predicate on the "deleted_at" field. +func DeletedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldDeletedAt, v)) +} + +// DeletedAtLTE applies the LTE predicate on the "deleted_at" field. +func DeletedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldDeletedAt, v)) +} + +// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field. +func DeletedAtIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldDeletedAt)) +} + +// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field. +func DeletedAtNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldDeletedAt)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldUserID, vs...)) +} + +// GroupIDEQ applies the EQ predicate on the "group_id" field. +func GroupIDEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldGroupID, v)) +} + +// GroupIDNEQ applies the NEQ predicate on the "group_id" field. +func GroupIDNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldGroupID, v)) +} + +// GroupIDIn applies the In predicate on the "group_id" field. +func GroupIDIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldGroupID, vs...)) +} + +// GroupIDNotIn applies the NotIn predicate on the "group_id" field. +func GroupIDNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldGroupID, vs...)) +} + +// StartsAtEQ applies the EQ predicate on the "starts_at" field. +func StartsAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStartsAt, v)) +} + +// StartsAtNEQ applies the NEQ predicate on the "starts_at" field. +func StartsAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldStartsAt, v)) +} + +// StartsAtIn applies the In predicate on the "starts_at" field. +func StartsAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldStartsAt, vs...)) +} + +// StartsAtNotIn applies the NotIn predicate on the "starts_at" field. +func StartsAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldStartsAt, vs...)) +} + +// StartsAtGT applies the GT predicate on the "starts_at" field. +func StartsAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldStartsAt, v)) +} + +// StartsAtGTE applies the GTE predicate on the "starts_at" field. +func StartsAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldStartsAt, v)) +} + +// StartsAtLT applies the LT predicate on the "starts_at" field. +func StartsAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldStartsAt, v)) +} + +// StartsAtLTE applies the LTE predicate on the "starts_at" field. +func StartsAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldStartsAt, v)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldExpiresAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContainsFold(FieldStatus, v)) +} + +// DailyWindowStartEQ applies the EQ predicate on the "daily_window_start" field. +func DailyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyWindowStart, v)) +} + +// DailyWindowStartNEQ applies the NEQ predicate on the "daily_window_start" field. +func DailyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldDailyWindowStart, v)) +} + +// DailyWindowStartIn applies the In predicate on the "daily_window_start" field. +func DailyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldDailyWindowStart, vs...)) +} + +// DailyWindowStartNotIn applies the NotIn predicate on the "daily_window_start" field. +func DailyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldDailyWindowStart, vs...)) +} + +// DailyWindowStartGT applies the GT predicate on the "daily_window_start" field. +func DailyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldDailyWindowStart, v)) +} + +// DailyWindowStartGTE applies the GTE predicate on the "daily_window_start" field. +func DailyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldDailyWindowStart, v)) +} + +// DailyWindowStartLT applies the LT predicate on the "daily_window_start" field. +func DailyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldDailyWindowStart, v)) +} + +// DailyWindowStartLTE applies the LTE predicate on the "daily_window_start" field. +func DailyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldDailyWindowStart, v)) +} + +// DailyWindowStartIsNil applies the IsNil predicate on the "daily_window_start" field. +func DailyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldDailyWindowStart)) +} + +// DailyWindowStartNotNil applies the NotNil predicate on the "daily_window_start" field. +func DailyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldDailyWindowStart)) +} + +// WeeklyWindowStartEQ applies the EQ predicate on the "weekly_window_start" field. +func WeeklyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartNEQ applies the NEQ predicate on the "weekly_window_start" field. +func WeeklyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartIn applies the In predicate on the "weekly_window_start" field. +func WeeklyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldWeeklyWindowStart, vs...)) +} + +// WeeklyWindowStartNotIn applies the NotIn predicate on the "weekly_window_start" field. +func WeeklyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldWeeklyWindowStart, vs...)) +} + +// WeeklyWindowStartGT applies the GT predicate on the "weekly_window_start" field. +func WeeklyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartGTE applies the GTE predicate on the "weekly_window_start" field. +func WeeklyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartLT applies the LT predicate on the "weekly_window_start" field. +func WeeklyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartLTE applies the LTE predicate on the "weekly_window_start" field. +func WeeklyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldWeeklyWindowStart, v)) +} + +// WeeklyWindowStartIsNil applies the IsNil predicate on the "weekly_window_start" field. +func WeeklyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldWeeklyWindowStart)) +} + +// WeeklyWindowStartNotNil applies the NotNil predicate on the "weekly_window_start" field. +func WeeklyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldWeeklyWindowStart)) +} + +// MonthlyWindowStartEQ applies the EQ predicate on the "monthly_window_start" field. +func MonthlyWindowStartEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartNEQ applies the NEQ predicate on the "monthly_window_start" field. +func MonthlyWindowStartNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartIn applies the In predicate on the "monthly_window_start" field. +func MonthlyWindowStartIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldMonthlyWindowStart, vs...)) +} + +// MonthlyWindowStartNotIn applies the NotIn predicate on the "monthly_window_start" field. +func MonthlyWindowStartNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldMonthlyWindowStart, vs...)) +} + +// MonthlyWindowStartGT applies the GT predicate on the "monthly_window_start" field. +func MonthlyWindowStartGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartGTE applies the GTE predicate on the "monthly_window_start" field. +func MonthlyWindowStartGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartLT applies the LT predicate on the "monthly_window_start" field. +func MonthlyWindowStartLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartLTE applies the LTE predicate on the "monthly_window_start" field. +func MonthlyWindowStartLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldMonthlyWindowStart, v)) +} + +// MonthlyWindowStartIsNil applies the IsNil predicate on the "monthly_window_start" field. +func MonthlyWindowStartIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldMonthlyWindowStart)) +} + +// MonthlyWindowStartNotNil applies the NotNil predicate on the "monthly_window_start" field. +func MonthlyWindowStartNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldMonthlyWindowStart)) +} + +// DailyUsageUsdEQ applies the EQ predicate on the "daily_usage_usd" field. +func DailyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdNEQ applies the NEQ predicate on the "daily_usage_usd" field. +func DailyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdIn applies the In predicate on the "daily_usage_usd" field. +func DailyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldDailyUsageUsd, vs...)) +} + +// DailyUsageUsdNotIn applies the NotIn predicate on the "daily_usage_usd" field. +func DailyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldDailyUsageUsd, vs...)) +} + +// DailyUsageUsdGT applies the GT predicate on the "daily_usage_usd" field. +func DailyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdGTE applies the GTE predicate on the "daily_usage_usd" field. +func DailyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdLT applies the LT predicate on the "daily_usage_usd" field. +func DailyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldDailyUsageUsd, v)) +} + +// DailyUsageUsdLTE applies the LTE predicate on the "daily_usage_usd" field. +func DailyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldDailyUsageUsd, v)) +} + +// WeeklyUsageUsdEQ applies the EQ predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdNEQ applies the NEQ predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdIn applies the In predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldWeeklyUsageUsd, vs...)) +} + +// WeeklyUsageUsdNotIn applies the NotIn predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldWeeklyUsageUsd, vs...)) +} + +// WeeklyUsageUsdGT applies the GT predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdGTE applies the GTE predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdLT applies the LT predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldWeeklyUsageUsd, v)) +} + +// WeeklyUsageUsdLTE applies the LTE predicate on the "weekly_usage_usd" field. +func WeeklyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldWeeklyUsageUsd, v)) +} + +// MonthlyUsageUsdEQ applies the EQ predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdNEQ applies the NEQ predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdNEQ(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdIn applies the In predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldMonthlyUsageUsd, vs...)) +} + +// MonthlyUsageUsdNotIn applies the NotIn predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdNotIn(vs ...float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldMonthlyUsageUsd, vs...)) +} + +// MonthlyUsageUsdGT applies the GT predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdGT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdGTE applies the GTE predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdGTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdLT applies the LT predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdLT(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldMonthlyUsageUsd, v)) +} + +// MonthlyUsageUsdLTE applies the LTE predicate on the "monthly_usage_usd" field. +func MonthlyUsageUsdLTE(v float64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldMonthlyUsageUsd, v)) +} + +// AssignedByEQ applies the EQ predicate on the "assigned_by" field. +func AssignedByEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedBy, v)) +} + +// AssignedByNEQ applies the NEQ predicate on the "assigned_by" field. +func AssignedByNEQ(v int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldAssignedBy, v)) +} + +// AssignedByIn applies the In predicate on the "assigned_by" field. +func AssignedByIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldAssignedBy, vs...)) +} + +// AssignedByNotIn applies the NotIn predicate on the "assigned_by" field. +func AssignedByNotIn(vs ...int64) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldAssignedBy, vs...)) +} + +// AssignedByIsNil applies the IsNil predicate on the "assigned_by" field. +func AssignedByIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldAssignedBy)) +} + +// AssignedByNotNil applies the NotNil predicate on the "assigned_by" field. +func AssignedByNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldAssignedBy)) +} + +// AssignedAtEQ applies the EQ predicate on the "assigned_at" field. +func AssignedAtEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldAssignedAt, v)) +} + +// AssignedAtNEQ applies the NEQ predicate on the "assigned_at" field. +func AssignedAtNEQ(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldAssignedAt, v)) +} + +// AssignedAtIn applies the In predicate on the "assigned_at" field. +func AssignedAtIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldAssignedAt, vs...)) +} + +// AssignedAtNotIn applies the NotIn predicate on the "assigned_at" field. +func AssignedAtNotIn(vs ...time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldAssignedAt, vs...)) +} + +// AssignedAtGT applies the GT predicate on the "assigned_at" field. +func AssignedAtGT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldAssignedAt, v)) +} + +// AssignedAtGTE applies the GTE predicate on the "assigned_at" field. +func AssignedAtGTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldAssignedAt, v)) +} + +// AssignedAtLT applies the LT predicate on the "assigned_at" field. +func AssignedAtLT(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldAssignedAt, v)) +} + +// AssignedAtLTE applies the LTE predicate on the "assigned_at" field. +func AssignedAtLTE(v time.Time) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldAssignedAt, v)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.UserSubscription { + return predicate.UserSubscription(sql.FieldContainsFold(FieldNotes, v)) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasGroup applies the HasEdge predicate on the "group" edge. +func HasGroup() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates). +func HasGroupWith(preds ...predicate.Group) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newGroupStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAssignedByUser applies the HasEdge predicate on the "assigned_by_user" edge. +func HasAssignedByUser() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, AssignedByUserTable, AssignedByUserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAssignedByUserWith applies the HasEdge predicate on the "assigned_by_user" edge with a given conditions (other predicates). +func HasAssignedByUserWith(preds ...predicate.User) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newAssignedByUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge. +func HasUsageLogs() predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates). +func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.UserSubscription { + return predicate.UserSubscription(func(s *sql.Selector) { + step := newUsageLogsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.UserSubscription) predicate.UserSubscription { + return predicate.UserSubscription(sql.NotPredicates(p)) +} diff --git a/backend/ent/usersubscription_create.go b/backend/ent/usersubscription_create.go new file mode 100644 index 00000000..dd03115b --- /dev/null +++ b/backend/ent/usersubscription_create.go @@ -0,0 +1,1700 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionCreate is the builder for creating a UserSubscription entity. +type UserSubscriptionCreate struct { + config + mutation *UserSubscriptionMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *UserSubscriptionCreate) SetCreatedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableCreatedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *UserSubscriptionCreate) SetUpdatedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableUpdatedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetDeletedAt sets the "deleted_at" field. +func (_c *UserSubscriptionCreate) SetDeletedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetDeletedAt(v) + return _c +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableDeletedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetDeletedAt(*v) + } + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *UserSubscriptionCreate) SetUserID(v int64) *UserSubscriptionCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetGroupID sets the "group_id" field. +func (_c *UserSubscriptionCreate) SetGroupID(v int64) *UserSubscriptionCreate { + _c.mutation.SetGroupID(v) + return _c +} + +// SetStartsAt sets the "starts_at" field. +func (_c *UserSubscriptionCreate) SetStartsAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetStartsAt(v) + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *UserSubscriptionCreate) SetExpiresAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *UserSubscriptionCreate) SetStatus(v string) *UserSubscriptionCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableStatus(v *string) *UserSubscriptionCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_c *UserSubscriptionCreate) SetDailyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetDailyWindowStart(v) + return _c +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetDailyWindowStart(*v) + } + return _c +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_c *UserSubscriptionCreate) SetWeeklyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetWeeklyWindowStart(v) + return _c +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetWeeklyWindowStart(*v) + } + return _c +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_c *UserSubscriptionCreate) SetMonthlyWindowStart(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetMonthlyWindowStart(v) + return _c +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetMonthlyWindowStart(*v) + } + return _c +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_c *UserSubscriptionCreate) SetDailyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetDailyUsageUsd(v) + return _c +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetDailyUsageUsd(*v) + } + return _c +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_c *UserSubscriptionCreate) SetWeeklyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetWeeklyUsageUsd(v) + return _c +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetWeeklyUsageUsd(*v) + } + return _c +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_c *UserSubscriptionCreate) SetMonthlyUsageUsd(v float64) *UserSubscriptionCreate { + _c.mutation.SetMonthlyUsageUsd(v) + return _c +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionCreate { + if v != nil { + _c.SetMonthlyUsageUsd(*v) + } + return _c +} + +// SetAssignedBy sets the "assigned_by" field. +func (_c *UserSubscriptionCreate) SetAssignedBy(v int64) *UserSubscriptionCreate { + _c.mutation.SetAssignedBy(v) + return _c +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedBy(v *int64) *UserSubscriptionCreate { + if v != nil { + _c.SetAssignedBy(*v) + } + return _c +} + +// SetAssignedAt sets the "assigned_at" field. +func (_c *UserSubscriptionCreate) SetAssignedAt(v time.Time) *UserSubscriptionCreate { + _c.mutation.SetAssignedAt(v) + return _c +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedAt(v *time.Time) *UserSubscriptionCreate { + if v != nil { + _c.SetAssignedAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *UserSubscriptionCreate) SetNotes(v string) *UserSubscriptionCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableNotes(v *string) *UserSubscriptionCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetUser sets the "user" edge to the User entity. +func (_c *UserSubscriptionCreate) SetUser(v *User) *UserSubscriptionCreate { + return _c.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_c *UserSubscriptionCreate) SetGroup(v *Group) *UserSubscriptionCreate { + return _c.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_c *UserSubscriptionCreate) SetAssignedByUserID(id int64) *UserSubscriptionCreate { + _c.mutation.SetAssignedByUserID(id) + return _c +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_c *UserSubscriptionCreate) SetNillableAssignedByUserID(id *int64) *UserSubscriptionCreate { + if id != nil { + _c = _c.SetAssignedByUserID(*id) + } + return _c +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_c *UserSubscriptionCreate) SetAssignedByUser(v *User) *UserSubscriptionCreate { + return _c.SetAssignedByUserID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_c *UserSubscriptionCreate) AddUsageLogIDs(ids ...int64) *UserSubscriptionCreate { + _c.mutation.AddUsageLogIDs(ids...) + return _c +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_c *UserSubscriptionCreate) AddUsageLogs(v ...*UsageLog) *UserSubscriptionCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageLogIDs(ids...) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_c *UserSubscriptionCreate) Mutation() *UserSubscriptionMutation { + return _c.mutation +} + +// Save creates the UserSubscription in the database. +func (_c *UserSubscriptionCreate) Save(ctx context.Context) (*UserSubscription, error) { + if err := _c.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *UserSubscriptionCreate) SaveX(ctx context.Context) *UserSubscription { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserSubscriptionCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserSubscriptionCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *UserSubscriptionCreate) defaults() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + if usersubscription.DefaultCreatedAt == nil { + return fmt.Errorf("ent: uninitialized usersubscription.DefaultCreatedAt (forgotten import ent/runtime?)") + } + v := usersubscription.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + if usersubscription.DefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized usersubscription.DefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := usersubscription.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := usersubscription.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.DailyUsageUsd(); !ok { + v := usersubscription.DefaultDailyUsageUsd + _c.mutation.SetDailyUsageUsd(v) + } + if _, ok := _c.mutation.WeeklyUsageUsd(); !ok { + v := usersubscription.DefaultWeeklyUsageUsd + _c.mutation.SetWeeklyUsageUsd(v) + } + if _, ok := _c.mutation.MonthlyUsageUsd(); !ok { + v := usersubscription.DefaultMonthlyUsageUsd + _c.mutation.SetMonthlyUsageUsd(v) + } + if _, ok := _c.mutation.AssignedAt(); !ok { + if usersubscription.DefaultAssignedAt == nil { + return fmt.Errorf("ent: uninitialized usersubscription.DefaultAssignedAt (forgotten import ent/runtime?)") + } + v := usersubscription.DefaultAssignedAt() + _c.mutation.SetAssignedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_c *UserSubscriptionCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "UserSubscription.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "UserSubscription.updated_at"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "UserSubscription.user_id"`)} + } + if _, ok := _c.mutation.GroupID(); !ok { + return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "UserSubscription.group_id"`)} + } + if _, ok := _c.mutation.StartsAt(); !ok { + return &ValidationError{Name: "starts_at", err: errors.New(`ent: missing required field "UserSubscription.starts_at"`)} + } + if _, ok := _c.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "UserSubscription.expires_at"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "UserSubscription.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _, ok := _c.mutation.DailyUsageUsd(); !ok { + return &ValidationError{Name: "daily_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.daily_usage_usd"`)} + } + if _, ok := _c.mutation.WeeklyUsageUsd(); !ok { + return &ValidationError{Name: "weekly_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.weekly_usage_usd"`)} + } + if _, ok := _c.mutation.MonthlyUsageUsd(); !ok { + return &ValidationError{Name: "monthly_usage_usd", err: errors.New(`ent: missing required field "UserSubscription.monthly_usage_usd"`)} + } + if _, ok := _c.mutation.AssignedAt(); !ok { + return &ValidationError{Name: "assigned_at", err: errors.New(`ent: missing required field "UserSubscription.assigned_at"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "UserSubscription.user"`)} + } + if len(_c.mutation.GroupIDs()) == 0 { + return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "UserSubscription.group"`)} + } + return nil +} + +func (_c *UserSubscriptionCreate) sqlSave(ctx context.Context) (*UserSubscription, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *UserSubscriptionCreate) createSpec() (*UserSubscription, *sqlgraph.CreateSpec) { + var ( + _node = &UserSubscription{config: _c.config} + _spec = sqlgraph.NewCreateSpec(usersubscription.Table, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(usersubscription.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.DeletedAt(); ok { + _spec.SetField(usersubscription.FieldDeletedAt, field.TypeTime, value) + _node.DeletedAt = &value + } + if value, ok := _c.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + _node.StartsAt = value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + _node.DailyWindowStart = &value + } + if value, ok := _c.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + _node.WeeklyWindowStart = &value + } + if value, ok := _c.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + _node.MonthlyWindowStart = &value + } + if value, ok := _c.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + _node.DailyUsageUsd = value + } + if value, ok := _c.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + _node.WeeklyUsageUsd = value + } + if value, ok := _c.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + _node.MonthlyUsageUsd = value + } + if value, ok := _c.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + _node.AssignedAt = value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.GroupID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.AssignedBy = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserSubscription.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserSubscriptionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserSubscriptionCreate) OnConflict(opts ...sql.ConflictOption) *UserSubscriptionUpsertOne { + _c.conflict = opts + return &UserSubscriptionUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserSubscriptionCreate) OnConflictColumns(columns ...string) *UserSubscriptionUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserSubscriptionUpsertOne{ + create: _c, + } +} + +type ( + // UserSubscriptionUpsertOne is the builder for "upsert"-ing + // one UserSubscription node. + UserSubscriptionUpsertOne struct { + create *UserSubscriptionCreate + } + + // UserSubscriptionUpsert is the "OnConflict" setter. + UserSubscriptionUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsert) SetUpdatedAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateUpdatedAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldUpdatedAt) + return u +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserSubscriptionUpsert) SetDeletedAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldDeletedAt, v) + return u +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateDeletedAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldDeletedAt) + return u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserSubscriptionUpsert) ClearDeletedAt() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldDeletedAt) + return u +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsert) SetUserID(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateUserID() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldUserID) + return u +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsert) SetGroupID(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldGroupID, v) + return u +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateGroupID() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldGroupID) + return u +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsert) SetStartsAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldStartsAt, v) + return u +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateStartsAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldStartsAt) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsert) SetExpiresAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateExpiresAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldExpiresAt) + return u +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsert) SetStatus(v string) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateStatus() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldStatus) + return u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsert) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldDailyWindowStart, v) + return u +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateDailyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldDailyWindowStart) + return u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsert) ClearDailyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldDailyWindowStart) + return u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsert) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldWeeklyWindowStart, v) + return u +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateWeeklyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldWeeklyWindowStart) + return u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsert) ClearWeeklyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldWeeklyWindowStart) + return u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsert) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldMonthlyWindowStart, v) + return u +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateMonthlyWindowStart() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldMonthlyWindowStart) + return u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsert) ClearMonthlyWindowStart() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldMonthlyWindowStart) + return u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsert) SetDailyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldDailyUsageUsd, v) + return u +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateDailyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldDailyUsageUsd) + return u +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsert) AddDailyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldDailyUsageUsd, v) + return u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsert) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldWeeklyUsageUsd, v) + return u +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateWeeklyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldWeeklyUsageUsd) + return u +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsert) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldWeeklyUsageUsd, v) + return u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsert) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldMonthlyUsageUsd, v) + return u +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateMonthlyUsageUsd() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldMonthlyUsageUsd) + return u +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsert) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsert { + u.Add(usersubscription.FieldMonthlyUsageUsd, v) + return u +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsert) SetAssignedBy(v int64) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldAssignedBy, v) + return u +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateAssignedBy() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldAssignedBy) + return u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsert) ClearAssignedBy() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldAssignedBy) + return u +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsert) SetAssignedAt(v time.Time) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldAssignedAt, v) + return u +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateAssignedAt() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldAssignedAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsert) SetNotes(v string) *UserSubscriptionUpsert { + u.Set(usersubscription.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsert) UpdateNotes() *UserSubscriptionUpsert { + u.SetExcluded(usersubscription.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsert) ClearNotes() *UserSubscriptionUpsert { + u.SetNull(usersubscription.FieldNotes) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserSubscriptionUpsertOne) UpdateNewValues() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(usersubscription.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserSubscriptionUpsertOne) Ignore() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserSubscriptionUpsertOne) DoNothing() *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserSubscriptionCreate.OnConflict +// documentation for more info. +func (u *UserSubscriptionUpsertOne) Update(set func(*UserSubscriptionUpsert)) *UserSubscriptionUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserSubscriptionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsertOne) SetUpdatedAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateUpdatedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserSubscriptionUpsertOne) SetDeletedAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateDeletedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserSubscriptionUpsertOne) ClearDeletedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsertOne) SetUserID(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateUserID() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsertOne) SetGroupID(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateGroupID() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateGroupID() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsertOne) SetStartsAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateStartsAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStartsAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsertOne) SetExpiresAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateExpiresAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateExpiresAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsertOne) SetStatus(v string) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateStatus() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStatus() + }) +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsertOne) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyWindowStart(v) + }) +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateDailyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyWindowStart() + }) +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearDailyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDailyWindowStart() + }) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsertOne) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyWindowStart(v) + }) +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateWeeklyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyWindowStart() + }) +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearWeeklyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearWeeklyWindowStart() + }) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsertOne) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyWindowStart(v) + }) +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateMonthlyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyWindowStart() + }) +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsertOne) ClearMonthlyWindowStart() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearMonthlyWindowStart() + }) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetDailyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyUsageUsd(v) + }) +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddDailyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddDailyUsageUsd(v) + }) +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateDailyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyUsageUsd() + }) +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyUsageUsd(v) + }) +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddWeeklyUsageUsd(v) + }) +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateWeeklyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyUsageUsd() + }) +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyUsageUsd(v) + }) +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertOne) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddMonthlyUsageUsd(v) + }) +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateMonthlyUsageUsd() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyUsageUsd() + }) +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsertOne) SetAssignedBy(v int64) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedBy(v) + }) +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateAssignedBy() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedBy() + }) +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsertOne) ClearAssignedBy() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearAssignedBy() + }) +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsertOne) SetAssignedAt(v time.Time) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedAt(v) + }) +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateAssignedAt() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsertOne) SetNotes(v string) *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsertOne) UpdateNotes() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsertOne) ClearNotes() *UserSubscriptionUpsertOne { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearNotes() + }) +} + +// Exec executes the query. +func (u *UserSubscriptionUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserSubscriptionCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserSubscriptionUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserSubscriptionUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserSubscriptionUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserSubscriptionCreateBulk is the builder for creating many UserSubscription entities in bulk. +type UserSubscriptionCreateBulk struct { + config + err error + builders []*UserSubscriptionCreate + conflict []sql.ConflictOption +} + +// Save creates the UserSubscription entities in the database. +func (_c *UserSubscriptionCreateBulk) Save(ctx context.Context) ([]*UserSubscription, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*UserSubscription, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserSubscriptionMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *UserSubscriptionCreateBulk) SaveX(ctx context.Context) []*UserSubscription { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *UserSubscriptionCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *UserSubscriptionCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.UserSubscription.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserSubscriptionUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *UserSubscriptionCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserSubscriptionUpsertBulk { + _c.conflict = opts + return &UserSubscriptionUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *UserSubscriptionCreateBulk) OnConflictColumns(columns ...string) *UserSubscriptionUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &UserSubscriptionUpsertBulk{ + create: _c, + } +} + +// UserSubscriptionUpsertBulk is the builder for "upsert"-ing +// a bulk of UserSubscription nodes. +type UserSubscriptionUpsertBulk struct { + create *UserSubscriptionCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *UserSubscriptionUpsertBulk) UpdateNewValues() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(usersubscription.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.UserSubscription.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserSubscriptionUpsertBulk) Ignore() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserSubscriptionUpsertBulk) DoNothing() *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserSubscriptionCreateBulk.OnConflict +// documentation for more info. +func (u *UserSubscriptionUpsertBulk) Update(set func(*UserSubscriptionUpsert)) *UserSubscriptionUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserSubscriptionUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserSubscriptionUpsertBulk) SetUpdatedAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateUpdatedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDeletedAt sets the "deleted_at" field. +func (u *UserSubscriptionUpsertBulk) SetDeletedAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDeletedAt(v) + }) +} + +// UpdateDeletedAt sets the "deleted_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateDeletedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDeletedAt() + }) +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (u *UserSubscriptionUpsertBulk) ClearDeletedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDeletedAt() + }) +} + +// SetUserID sets the "user_id" field. +func (u *UserSubscriptionUpsertBulk) SetUserID(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateUserID() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateUserID() + }) +} + +// SetGroupID sets the "group_id" field. +func (u *UserSubscriptionUpsertBulk) SetGroupID(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetGroupID(v) + }) +} + +// UpdateGroupID sets the "group_id" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateGroupID() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateGroupID() + }) +} + +// SetStartsAt sets the "starts_at" field. +func (u *UserSubscriptionUpsertBulk) SetStartsAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStartsAt(v) + }) +} + +// UpdateStartsAt sets the "starts_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateStartsAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStartsAt() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *UserSubscriptionUpsertBulk) SetExpiresAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateExpiresAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateExpiresAt() + }) +} + +// SetStatus sets the "status" field. +func (u *UserSubscriptionUpsertBulk) SetStatus(v string) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateStatus() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateStatus() + }) +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetDailyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyWindowStart(v) + }) +} + +// UpdateDailyWindowStart sets the "daily_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateDailyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyWindowStart() + }) +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearDailyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearDailyWindowStart() + }) +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyWindowStart(v) + }) +} + +// UpdateWeeklyWindowStart sets the "weekly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateWeeklyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyWindowStart() + }) +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearWeeklyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearWeeklyWindowStart() + }) +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (u *UserSubscriptionUpsertBulk) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyWindowStart(v) + }) +} + +// UpdateMonthlyWindowStart sets the "monthly_window_start" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateMonthlyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyWindowStart() + }) +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (u *UserSubscriptionUpsertBulk) ClearMonthlyWindowStart() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearMonthlyWindowStart() + }) +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetDailyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetDailyUsageUsd(v) + }) +} + +// AddDailyUsageUsd adds v to the "daily_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddDailyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddDailyUsageUsd(v) + }) +} + +// UpdateDailyUsageUsd sets the "daily_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateDailyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateDailyUsageUsd() + }) +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetWeeklyUsageUsd(v) + }) +} + +// AddWeeklyUsageUsd adds v to the "weekly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddWeeklyUsageUsd(v) + }) +} + +// UpdateWeeklyUsageUsd sets the "weekly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateWeeklyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateWeeklyUsageUsd() + }) +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetMonthlyUsageUsd(v) + }) +} + +// AddMonthlyUsageUsd adds v to the "monthly_usage_usd" field. +func (u *UserSubscriptionUpsertBulk) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.AddMonthlyUsageUsd(v) + }) +} + +// UpdateMonthlyUsageUsd sets the "monthly_usage_usd" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateMonthlyUsageUsd() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateMonthlyUsageUsd() + }) +} + +// SetAssignedBy sets the "assigned_by" field. +func (u *UserSubscriptionUpsertBulk) SetAssignedBy(v int64) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedBy(v) + }) +} + +// UpdateAssignedBy sets the "assigned_by" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateAssignedBy() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedBy() + }) +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (u *UserSubscriptionUpsertBulk) ClearAssignedBy() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearAssignedBy() + }) +} + +// SetAssignedAt sets the "assigned_at" field. +func (u *UserSubscriptionUpsertBulk) SetAssignedAt(v time.Time) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetAssignedAt(v) + }) +} + +// UpdateAssignedAt sets the "assigned_at" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateAssignedAt() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateAssignedAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *UserSubscriptionUpsertBulk) SetNotes(v string) *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *UserSubscriptionUpsertBulk) UpdateNotes() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *UserSubscriptionUpsertBulk) ClearNotes() *UserSubscriptionUpsertBulk { + return u.Update(func(s *UserSubscriptionUpsert) { + s.ClearNotes() + }) +} + +// Exec executes the query. +func (u *UserSubscriptionUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserSubscriptionCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserSubscriptionCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserSubscriptionUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usersubscription_delete.go b/backend/ent/usersubscription_delete.go new file mode 100644 index 00000000..02096763 --- /dev/null +++ b/backend/ent/usersubscription_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionDelete is the builder for deleting a UserSubscription entity. +type UserSubscriptionDelete struct { + config + hooks []Hook + mutation *UserSubscriptionMutation +} + +// Where appends a list predicates to the UserSubscriptionDelete builder. +func (_d *UserSubscriptionDelete) Where(ps ...predicate.UserSubscription) *UserSubscriptionDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *UserSubscriptionDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserSubscriptionDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *UserSubscriptionDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(usersubscription.Table, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// UserSubscriptionDeleteOne is the builder for deleting a single UserSubscription entity. +type UserSubscriptionDeleteOne struct { + _d *UserSubscriptionDelete +} + +// Where appends a list predicates to the UserSubscriptionDelete builder. +func (_d *UserSubscriptionDeleteOne) Where(ps ...predicate.UserSubscription) *UserSubscriptionDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *UserSubscriptionDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{usersubscription.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *UserSubscriptionDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/usersubscription_query.go b/backend/ent/usersubscription_query.go new file mode 100644 index 00000000..288b7b1d --- /dev/null +++ b/backend/ent/usersubscription_query.go @@ -0,0 +1,873 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionQuery is the builder for querying UserSubscription entities. +type UserSubscriptionQuery struct { + config + ctx *QueryContext + order []usersubscription.OrderOption + inters []Interceptor + predicates []predicate.UserSubscription + withUser *UserQuery + withGroup *GroupQuery + withAssignedByUser *UserQuery + withUsageLogs *UsageLogQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserSubscriptionQuery builder. +func (_q *UserSubscriptionQuery) Where(ps ...predicate.UserSubscription) *UserSubscriptionQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *UserSubscriptionQuery) Limit(limit int) *UserSubscriptionQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *UserSubscriptionQuery) Offset(offset int) *UserSubscriptionQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *UserSubscriptionQuery) Unique(unique bool) *UserSubscriptionQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *UserSubscriptionQuery) Order(o ...usersubscription.OrderOption) *UserSubscriptionQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUser chains the current query on the "user" edge. +func (_q *UserSubscriptionQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.UserTable, usersubscription.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryGroup chains the current query on the "group" edge. +func (_q *UserSubscriptionQuery) QueryGroup() *GroupQuery { + query := (&GroupClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(group.Table, group.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.GroupTable, usersubscription.GroupColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAssignedByUser chains the current query on the "assigned_by_user" edge. +func (_q *UserSubscriptionQuery) QueryAssignedByUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, usersubscription.AssignedByUserTable, usersubscription.AssignedByUserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUsageLogs chains the current query on the "usage_logs" edge. +func (_q *UserSubscriptionQuery) QueryUsageLogs() *UsageLogQuery { + query := (&UsageLogClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(usersubscription.Table, usersubscription.FieldID, selector), + sqlgraph.To(usagelog.Table, usagelog.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, usersubscription.UsageLogsTable, usersubscription.UsageLogsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first UserSubscription entity from the query. +// Returns a *NotFoundError when no UserSubscription was found. +func (_q *UserSubscriptionQuery) First(ctx context.Context) (*UserSubscription, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{usersubscription.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *UserSubscriptionQuery) FirstX(ctx context.Context) *UserSubscription { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first UserSubscription ID from the query. +// Returns a *NotFoundError when no UserSubscription ID was found. +func (_q *UserSubscriptionQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{usersubscription.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *UserSubscriptionQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single UserSubscription entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one UserSubscription entity is found. +// Returns a *NotFoundError when no UserSubscription entities are found. +func (_q *UserSubscriptionQuery) Only(ctx context.Context) (*UserSubscription, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{usersubscription.Label} + default: + return nil, &NotSingularError{usersubscription.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *UserSubscriptionQuery) OnlyX(ctx context.Context) *UserSubscription { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only UserSubscription ID in the query. +// Returns a *NotSingularError when more than one UserSubscription ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *UserSubscriptionQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{usersubscription.Label} + default: + err = &NotSingularError{usersubscription.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *UserSubscriptionQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of UserSubscriptions. +func (_q *UserSubscriptionQuery) All(ctx context.Context) ([]*UserSubscription, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*UserSubscription, *UserSubscriptionQuery]() + return withInterceptors[[]*UserSubscription](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *UserSubscriptionQuery) AllX(ctx context.Context) []*UserSubscription { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of UserSubscription IDs. +func (_q *UserSubscriptionQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(usersubscription.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *UserSubscriptionQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *UserSubscriptionQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*UserSubscriptionQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *UserSubscriptionQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *UserSubscriptionQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *UserSubscriptionQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserSubscriptionQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *UserSubscriptionQuery) Clone() *UserSubscriptionQuery { + if _q == nil { + return nil + } + return &UserSubscriptionQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]usersubscription.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.UserSubscription{}, _q.predicates...), + withUser: _q.withUser.Clone(), + withGroup: _q.withGroup.Clone(), + withAssignedByUser: _q.withAssignedByUser.Clone(), + withUsageLogs: _q.withUsageLogs.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithUser(opts ...func(*UserQuery)) *UserSubscriptionQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// WithGroup tells the query-builder to eager-load the nodes that are connected to +// the "group" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithGroup(opts ...func(*GroupQuery)) *UserSubscriptionQuery { + query := (&GroupClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withGroup = query + return _q +} + +// WithAssignedByUser tells the query-builder to eager-load the nodes that are connected to +// the "assigned_by_user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithAssignedByUser(opts ...func(*UserQuery)) *UserSubscriptionQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withAssignedByUser = query + return _q +} + +// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to +// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserSubscriptionQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *UserSubscriptionQuery { + query := (&UsageLogClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageLogs = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.UserSubscription.Query(). +// GroupBy(usersubscription.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *UserSubscriptionQuery) GroupBy(field string, fields ...string) *UserSubscriptionGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserSubscriptionGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = usersubscription.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.UserSubscription.Query(). +// Select(usersubscription.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *UserSubscriptionQuery) Select(fields ...string) *UserSubscriptionSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &UserSubscriptionSelect{UserSubscriptionQuery: _q} + sbuild.label = usersubscription.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSubscriptionSelect configured with the given aggregations. +func (_q *UserSubscriptionQuery) Aggregate(fns ...AggregateFunc) *UserSubscriptionSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *UserSubscriptionQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !usersubscription.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *UserSubscriptionQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*UserSubscription, error) { + var ( + nodes = []*UserSubscription{} + _spec = _q.querySpec() + loadedTypes = [4]bool{ + _q.withUser != nil, + _q.withGroup != nil, + _q.withAssignedByUser != nil, + _q.withUsageLogs != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*UserSubscription).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &UserSubscription{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *UserSubscription, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + if query := _q.withGroup; query != nil { + if err := _q.loadGroup(ctx, query, nodes, nil, + func(n *UserSubscription, e *Group) { n.Edges.Group = e }); err != nil { + return nil, err + } + } + if query := _q.withAssignedByUser; query != nil { + if err := _q.loadAssignedByUser(ctx, query, nodes, nil, + func(n *UserSubscription, e *User) { n.Edges.AssignedByUser = e }); err != nil { + return nil, err + } + } + if query := _q.withUsageLogs; query != nil { + if err := _q.loadUsageLogs(ctx, query, nodes, + func(n *UserSubscription) { n.Edges.UsageLogs = []*UsageLog{} }, + func(n *UserSubscription, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *UserSubscriptionQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserSubscriptionQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *Group)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + fk := nodes[i].GroupID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(group.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserSubscriptionQuery) loadAssignedByUser(ctx context.Context, query *UserQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*UserSubscription) + for i := range nodes { + if nodes[i].AssignedBy == nil { + continue + } + fk := *nodes[i].AssignedBy + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "assigned_by" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *UserSubscriptionQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*UserSubscription, init func(*UserSubscription), assign func(*UserSubscription, *UsageLog)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*UserSubscription) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(usagelog.FieldSubscriptionID) + } + query.Where(predicate.UsageLog(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(usersubscription.UsageLogsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.SubscriptionID + if fk == nil { + return fmt.Errorf(`foreign-key "subscription_id" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "subscription_id" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *UserSubscriptionQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *UserSubscriptionQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usersubscription.FieldID) + for i := range fields { + if fields[i] != usersubscription.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldUserID) + } + if _q.withGroup != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldGroupID) + } + if _q.withAssignedByUser != nil { + _spec.Node.AddColumnOnce(usersubscription.FieldAssignedBy) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(usersubscription.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = usersubscription.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserSubscriptionQuery) ForUpdate(opts ...sql.LockOption) *UserSubscriptionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserSubscriptionQuery) ForShare(opts ...sql.LockOption) *UserSubscriptionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// UserSubscriptionGroupBy is the group-by builder for UserSubscription entities. +type UserSubscriptionGroupBy struct { + selector + build *UserSubscriptionQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *UserSubscriptionGroupBy) Aggregate(fns ...AggregateFunc) *UserSubscriptionGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *UserSubscriptionGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserSubscriptionQuery, *UserSubscriptionGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *UserSubscriptionGroupBy) sqlScan(ctx context.Context, root *UserSubscriptionQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSubscriptionSelect is the builder for selecting fields of UserSubscription entities. +type UserSubscriptionSelect struct { + *UserSubscriptionQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *UserSubscriptionSelect) Aggregate(fns ...AggregateFunc) *UserSubscriptionSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *UserSubscriptionSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserSubscriptionQuery, *UserSubscriptionSelect](ctx, _s.UserSubscriptionQuery, _s, _s.inters, v) +} + +func (_s *UserSubscriptionSelect) sqlScan(ctx context.Context, root *UserSubscriptionQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/usersubscription_update.go b/backend/ent/usersubscription_update.go new file mode 100644 index 00000000..811dae7e --- /dev/null +++ b/backend/ent/usersubscription_update.go @@ -0,0 +1,1349 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/usagelog" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" +) + +// UserSubscriptionUpdate is the builder for updating UserSubscription entities. +type UserSubscriptionUpdate struct { + config + hooks []Hook + mutation *UserSubscriptionMutation +} + +// Where appends a list predicates to the UserSubscriptionUpdate builder. +func (_u *UserSubscriptionUpdate) Where(ps ...predicate.UserSubscription) *UserSubscriptionUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserSubscriptionUpdate) SetUpdatedAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserSubscriptionUpdate) SetDeletedAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableDeletedAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserSubscriptionUpdate) ClearDeletedAt() *UserSubscriptionUpdate { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserSubscriptionUpdate) SetUserID(v int64) *UserSubscriptionUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableUserID(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserSubscriptionUpdate) SetGroupID(v int64) *UserSubscriptionUpdate { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableGroupID(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *UserSubscriptionUpdate) SetStartsAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableStartsAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *UserSubscriptionUpdate) SetExpiresAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableExpiresAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserSubscriptionUpdate) SetStatus(v string) *UserSubscriptionUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableStatus(v *string) *UserSubscriptionUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_u *UserSubscriptionUpdate) SetDailyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetDailyWindowStart(v) + return _u +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetDailyWindowStart(*v) + } + return _u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (_u *UserSubscriptionUpdate) ClearDailyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearDailyWindowStart() + return _u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_u *UserSubscriptionUpdate) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetWeeklyWindowStart(v) + return _u +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetWeeklyWindowStart(*v) + } + return _u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (_u *UserSubscriptionUpdate) ClearWeeklyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearWeeklyWindowStart() + return _u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_u *UserSubscriptionUpdate) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetMonthlyWindowStart(v) + return _u +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetMonthlyWindowStart(*v) + } + return _u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (_u *UserSubscriptionUpdate) ClearMonthlyWindowStart() *UserSubscriptionUpdate { + _u.mutation.ClearMonthlyWindowStart() + return _u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetDailyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetDailyUsageUsd() + _u.mutation.SetDailyUsageUsd(v) + return _u +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetDailyUsageUsd(*v) + } + return _u +} + +// AddDailyUsageUsd adds value to the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddDailyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddDailyUsageUsd(v) + return _u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetWeeklyUsageUsd() + _u.mutation.SetWeeklyUsageUsd(v) + return _u +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetWeeklyUsageUsd(*v) + } + return _u +} + +// AddWeeklyUsageUsd adds value to the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddWeeklyUsageUsd(v) + return _u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdate) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.ResetMonthlyUsageUsd() + _u.mutation.SetMonthlyUsageUsd(v) + return _u +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionUpdate { + if v != nil { + _u.SetMonthlyUsageUsd(*v) + } + return _u +} + +// AddMonthlyUsageUsd adds value to the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdate) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpdate { + _u.mutation.AddMonthlyUsageUsd(v) + return _u +} + +// SetAssignedBy sets the "assigned_by" field. +func (_u *UserSubscriptionUpdate) SetAssignedBy(v int64) *UserSubscriptionUpdate { + _u.mutation.SetAssignedBy(v) + return _u +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedBy(v *int64) *UserSubscriptionUpdate { + if v != nil { + _u.SetAssignedBy(*v) + } + return _u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (_u *UserSubscriptionUpdate) ClearAssignedBy() *UserSubscriptionUpdate { + _u.mutation.ClearAssignedBy() + return _u +} + +// SetAssignedAt sets the "assigned_at" field. +func (_u *UserSubscriptionUpdate) SetAssignedAt(v time.Time) *UserSubscriptionUpdate { + _u.mutation.SetAssignedAt(v) + return _u +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedAt(v *time.Time) *UserSubscriptionUpdate { + if v != nil { + _u.SetAssignedAt(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserSubscriptionUpdate) SetNotes(v string) *UserSubscriptionUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableNotes(v *string) *UserSubscriptionUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *UserSubscriptionUpdate) ClearNotes() *UserSubscriptionUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserSubscriptionUpdate) SetUser(v *User) *UserSubscriptionUpdate { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdate) SetGroup(v *Group) *UserSubscriptionUpdate { + return _u.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_u *UserSubscriptionUpdate) SetAssignedByUserID(id int64) *UserSubscriptionUpdate { + _u.mutation.SetAssignedByUserID(id) + return _u +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_u *UserSubscriptionUpdate) SetNillableAssignedByUserID(id *int64) *UserSubscriptionUpdate { + if id != nil { + _u = _u.SetAssignedByUserID(*id) + } + return _u +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdate) SetAssignedByUser(v *User) *UserSubscriptionUpdate { + return _u.SetAssignedByUserID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *UserSubscriptionUpdate) AddUsageLogIDs(ids ...int64) *UserSubscriptionUpdate { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *UserSubscriptionUpdate) AddUsageLogs(v ...*UsageLog) *UserSubscriptionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_u *UserSubscriptionUpdate) Mutation() *UserSubscriptionMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserSubscriptionUpdate) ClearUser() *UserSubscriptionUpdate { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdate) ClearGroup() *UserSubscriptionUpdate { + _u.mutation.ClearGroup() + return _u +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdate) ClearAssignedByUser() *UserSubscriptionUpdate { + _u.mutation.ClearAssignedByUser() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *UserSubscriptionUpdate) ClearUsageLogs() *UserSubscriptionUpdate { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *UserSubscriptionUpdate) RemoveUsageLogIDs(ids ...int64) *UserSubscriptionUpdate { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *UserSubscriptionUpdate) RemoveUsageLogs(v ...*UsageLog) *UserSubscriptionUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *UserSubscriptionUpdate) Save(ctx context.Context) (int, error) { + if err := _u.defaults(); err != nil { + return 0, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserSubscriptionUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *UserSubscriptionUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserSubscriptionUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserSubscriptionUpdate) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if usersubscription.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized usersubscription.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := usersubscription.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserSubscriptionUpdate) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.group"`) + } + return nil +} + +func (_u *UserSubscriptionUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(usersubscription.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(usersubscription.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + } + if _u.mutation.DailyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldDailyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + } + if _u.mutation.WeeklyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldWeeklyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + } + if _u.mutation.MonthlyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldMonthlyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(usersubscription.FieldNotes, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedByUserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usersubscription.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// UserSubscriptionUpdateOne is the builder for updating a single UserSubscription entity. +type UserSubscriptionUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserSubscriptionMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *UserSubscriptionUpdateOne) SetUpdatedAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetDeletedAt sets the "deleted_at" field. +func (_u *UserSubscriptionUpdateOne) SetDeletedAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetDeletedAt(v) + return _u +} + +// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableDeletedAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetDeletedAt(*v) + } + return _u +} + +// ClearDeletedAt clears the value of the "deleted_at" field. +func (_u *UserSubscriptionUpdateOne) ClearDeletedAt() *UserSubscriptionUpdateOne { + _u.mutation.ClearDeletedAt() + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *UserSubscriptionUpdateOne) SetUserID(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableUserID(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetGroupID sets the "group_id" field. +func (_u *UserSubscriptionUpdateOne) SetGroupID(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetGroupID(v) + return _u +} + +// SetNillableGroupID sets the "group_id" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableGroupID(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetGroupID(*v) + } + return _u +} + +// SetStartsAt sets the "starts_at" field. +func (_u *UserSubscriptionUpdateOne) SetStartsAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetStartsAt(v) + return _u +} + +// SetNillableStartsAt sets the "starts_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableStartsAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetStartsAt(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *UserSubscriptionUpdateOne) SetExpiresAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableExpiresAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *UserSubscriptionUpdateOne) SetStatus(v string) *UserSubscriptionUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableStatus(v *string) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetDailyWindowStart sets the "daily_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetDailyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetDailyWindowStart(v) + return _u +} + +// SetNillableDailyWindowStart sets the "daily_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableDailyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetDailyWindowStart(*v) + } + return _u +} + +// ClearDailyWindowStart clears the value of the "daily_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearDailyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearDailyWindowStart() + return _u +} + +// SetWeeklyWindowStart sets the "weekly_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetWeeklyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetWeeklyWindowStart(v) + return _u +} + +// SetNillableWeeklyWindowStart sets the "weekly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableWeeklyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetWeeklyWindowStart(*v) + } + return _u +} + +// ClearWeeklyWindowStart clears the value of the "weekly_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearWeeklyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearWeeklyWindowStart() + return _u +} + +// SetMonthlyWindowStart sets the "monthly_window_start" field. +func (_u *UserSubscriptionUpdateOne) SetMonthlyWindowStart(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetMonthlyWindowStart(v) + return _u +} + +// SetNillableMonthlyWindowStart sets the "monthly_window_start" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableMonthlyWindowStart(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetMonthlyWindowStart(*v) + } + return _u +} + +// ClearMonthlyWindowStart clears the value of the "monthly_window_start" field. +func (_u *UserSubscriptionUpdateOne) ClearMonthlyWindowStart() *UserSubscriptionUpdateOne { + _u.mutation.ClearMonthlyWindowStart() + return _u +} + +// SetDailyUsageUsd sets the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetDailyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetDailyUsageUsd() + _u.mutation.SetDailyUsageUsd(v) + return _u +} + +// SetNillableDailyUsageUsd sets the "daily_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableDailyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetDailyUsageUsd(*v) + } + return _u +} + +// AddDailyUsageUsd adds value to the "daily_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddDailyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddDailyUsageUsd(v) + return _u +} + +// SetWeeklyUsageUsd sets the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetWeeklyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetWeeklyUsageUsd() + _u.mutation.SetWeeklyUsageUsd(v) + return _u +} + +// SetNillableWeeklyUsageUsd sets the "weekly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableWeeklyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetWeeklyUsageUsd(*v) + } + return _u +} + +// AddWeeklyUsageUsd adds value to the "weekly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddWeeklyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddWeeklyUsageUsd(v) + return _u +} + +// SetMonthlyUsageUsd sets the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) SetMonthlyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.ResetMonthlyUsageUsd() + _u.mutation.SetMonthlyUsageUsd(v) + return _u +} + +// SetNillableMonthlyUsageUsd sets the "monthly_usage_usd" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableMonthlyUsageUsd(v *float64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetMonthlyUsageUsd(*v) + } + return _u +} + +// AddMonthlyUsageUsd adds value to the "monthly_usage_usd" field. +func (_u *UserSubscriptionUpdateOne) AddMonthlyUsageUsd(v float64) *UserSubscriptionUpdateOne { + _u.mutation.AddMonthlyUsageUsd(v) + return _u +} + +// SetAssignedBy sets the "assigned_by" field. +func (_u *UserSubscriptionUpdateOne) SetAssignedBy(v int64) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedBy(v) + return _u +} + +// SetNillableAssignedBy sets the "assigned_by" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedBy(v *int64) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetAssignedBy(*v) + } + return _u +} + +// ClearAssignedBy clears the value of the "assigned_by" field. +func (_u *UserSubscriptionUpdateOne) ClearAssignedBy() *UserSubscriptionUpdateOne { + _u.mutation.ClearAssignedBy() + return _u +} + +// SetAssignedAt sets the "assigned_at" field. +func (_u *UserSubscriptionUpdateOne) SetAssignedAt(v time.Time) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedAt(v) + return _u +} + +// SetNillableAssignedAt sets the "assigned_at" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedAt(v *time.Time) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetAssignedAt(*v) + } + return _u +} + +// SetNotes sets the "notes" field. +func (_u *UserSubscriptionUpdateOne) SetNotes(v string) *UserSubscriptionUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableNotes(v *string) *UserSubscriptionUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *UserSubscriptionUpdateOne) ClearNotes() *UserSubscriptionUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetUser sets the "user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) SetUser(v *User) *UserSubscriptionUpdateOne { + return _u.SetUserID(v.ID) +} + +// SetGroup sets the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdateOne) SetGroup(v *Group) *UserSubscriptionUpdateOne { + return _u.SetGroupID(v.ID) +} + +// SetAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID. +func (_u *UserSubscriptionUpdateOne) SetAssignedByUserID(id int64) *UserSubscriptionUpdateOne { + _u.mutation.SetAssignedByUserID(id) + return _u +} + +// SetNillableAssignedByUserID sets the "assigned_by_user" edge to the User entity by ID if the given value is not nil. +func (_u *UserSubscriptionUpdateOne) SetNillableAssignedByUserID(id *int64) *UserSubscriptionUpdateOne { + if id != nil { + _u = _u.SetAssignedByUserID(*id) + } + return _u +} + +// SetAssignedByUser sets the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) SetAssignedByUser(v *User) *UserSubscriptionUpdateOne { + return _u.SetAssignedByUserID(v.ID) +} + +// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs. +func (_u *UserSubscriptionUpdateOne) AddUsageLogIDs(ids ...int64) *UserSubscriptionUpdateOne { + _u.mutation.AddUsageLogIDs(ids...) + return _u +} + +// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity. +func (_u *UserSubscriptionUpdateOne) AddUsageLogs(v ...*UsageLog) *UserSubscriptionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageLogIDs(ids...) +} + +// Mutation returns the UserSubscriptionMutation object of the builder. +func (_u *UserSubscriptionUpdateOne) Mutation() *UserSubscriptionMutation { + return _u.mutation +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) ClearUser() *UserSubscriptionUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// ClearGroup clears the "group" edge to the Group entity. +func (_u *UserSubscriptionUpdateOne) ClearGroup() *UserSubscriptionUpdateOne { + _u.mutation.ClearGroup() + return _u +} + +// ClearAssignedByUser clears the "assigned_by_user" edge to the User entity. +func (_u *UserSubscriptionUpdateOne) ClearAssignedByUser() *UserSubscriptionUpdateOne { + _u.mutation.ClearAssignedByUser() + return _u +} + +// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity. +func (_u *UserSubscriptionUpdateOne) ClearUsageLogs() *UserSubscriptionUpdateOne { + _u.mutation.ClearUsageLogs() + return _u +} + +// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs. +func (_u *UserSubscriptionUpdateOne) RemoveUsageLogIDs(ids ...int64) *UserSubscriptionUpdateOne { + _u.mutation.RemoveUsageLogIDs(ids...) + return _u +} + +// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities. +func (_u *UserSubscriptionUpdateOne) RemoveUsageLogs(v ...*UsageLog) *UserSubscriptionUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageLogIDs(ids...) +} + +// Where appends a list predicates to the UserSubscriptionUpdate builder. +func (_u *UserSubscriptionUpdateOne) Where(ps ...predicate.UserSubscription) *UserSubscriptionUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *UserSubscriptionUpdateOne) Select(field string, fields ...string) *UserSubscriptionUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated UserSubscription entity. +func (_u *UserSubscriptionUpdateOne) Save(ctx context.Context) (*UserSubscription, error) { + if err := _u.defaults(); err != nil { + return nil, err + } + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *UserSubscriptionUpdateOne) SaveX(ctx context.Context) *UserSubscription { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *UserSubscriptionUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *UserSubscriptionUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *UserSubscriptionUpdateOne) defaults() error { + if _, ok := _u.mutation.UpdatedAt(); !ok { + if usersubscription.UpdateDefaultUpdatedAt == nil { + return fmt.Errorf("ent: uninitialized usersubscription.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)") + } + v := usersubscription.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } + return nil +} + +// check runs all checks and user-defined validators on the builder. +func (_u *UserSubscriptionUpdateOne) check() error { + if v, ok := _u.mutation.Status(); ok { + if err := usersubscription.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "UserSubscription.status": %w`, err)} + } + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.user"`) + } + if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "UserSubscription.group"`) + } + return nil +} + +func (_u *UserSubscriptionUpdateOne) sqlSave(ctx context.Context) (_node *UserSubscription, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(usersubscription.Table, usersubscription.Columns, sqlgraph.NewFieldSpec(usersubscription.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "UserSubscription.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, usersubscription.FieldID) + for _, f := range fields { + if !usersubscription.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != usersubscription.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(usersubscription.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.DeletedAt(); ok { + _spec.SetField(usersubscription.FieldDeletedAt, field.TypeTime, value) + } + if _u.mutation.DeletedAtCleared() { + _spec.ClearField(usersubscription.FieldDeletedAt, field.TypeTime) + } + if value, ok := _u.mutation.StartsAt(); ok { + _spec.SetField(usersubscription.FieldStartsAt, field.TypeTime, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(usersubscription.FieldExpiresAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(usersubscription.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.DailyWindowStart(); ok { + _spec.SetField(usersubscription.FieldDailyWindowStart, field.TypeTime, value) + } + if _u.mutation.DailyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldDailyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.WeeklyWindowStart(); ok { + _spec.SetField(usersubscription.FieldWeeklyWindowStart, field.TypeTime, value) + } + if _u.mutation.WeeklyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldWeeklyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.MonthlyWindowStart(); ok { + _spec.SetField(usersubscription.FieldMonthlyWindowStart, field.TypeTime, value) + } + if _u.mutation.MonthlyWindowStartCleared() { + _spec.ClearField(usersubscription.FieldMonthlyWindowStart, field.TypeTime) + } + if value, ok := _u.mutation.DailyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedDailyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldDailyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.WeeklyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedWeeklyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldWeeklyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MonthlyUsageUsd(); ok { + _spec.SetField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedMonthlyUsageUsd(); ok { + _spec.AddField(usersubscription.FieldMonthlyUsageUsd, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AssignedAt(); ok { + _spec.SetField(usersubscription.FieldAssignedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(usersubscription.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(usersubscription.FieldNotes, field.TypeString) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.UserTable, + Columns: []string{usersubscription.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.GroupCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.GroupTable, + Columns: []string{usersubscription.GroupColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.AssignedByUserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.AssignedByUserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: usersubscription.AssignedByUserTable, + Columns: []string{usersubscription.AssignedByUserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: usersubscription.UsageLogsTable, + Columns: []string{usersubscription.UsageLogsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &UserSubscription{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{usersubscription.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 00000000..4ac6ba14 --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,151 @@ +module github.com/Wei-Shaw/sub2api + +go 1.25.5 + +require ( + entgo.io/ent v0.14.5 + github.com/gin-gonic/gin v1.9.1 + github.com/golang-jwt/jwt/v5 v5.2.2 + github.com/google/uuid v1.6.0 + github.com/google/wire v0.7.0 + github.com/gorilla/websocket v1.5.3 + github.com/imroc/req/v3 v3.57.0 + github.com/lib/pq v1.10.9 + github.com/redis/go-redis/v9 v9.17.2 + github.com/shirou/gopsutil/v4 v4.25.6 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.11.1 + github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 + github.com/testcontainers/testcontainers-go/modules/redis v0.40.0 + github.com/tidwall/gjson v1.18.0 + github.com/tidwall/sjson v1.2.5 + github.com/zeromicro/go-zero v1.9.4 + golang.org/x/crypto v0.46.0 + golang.org/x/net v0.48.0 + golang.org/x/sync v0.19.0 + golang.org/x/term v0.38.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/andybalholm/brotli v1.2.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/bytedance/sonic v1.9.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgraph-io/ristretto v0.2.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v28.5.1+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/ebitengine/purego v0.8.4 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/subcommands v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.1 // indirect + github.com/icholy/digest v1.1.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mdelapenya/tlscert v0.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/quic-go/qpack v0.6.0 // indirect + github.com/quic-go/quic-go v0.57.1 // indirect + github.com/refraction-networking/utls v1.8.1 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/testcontainers/testcontainers-go v0.40.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/tools v0.39.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 00000000..415e73a7 --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,390 @@ +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc= +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +entgo.io/ent v0.14.5 h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4= +entgo.io/ent v0.14.5/go.mod h1:zTzLmWtPvGpmSwtkaayM2cm5m819NdM7z7tYPq3vN0U= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= +github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= +github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= +github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= +github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/icholy/digest v1.1.0 h1:HfGg9Irj7i+IX1o1QAmPfIBNu/Q5A5Tu3n/MED9k9H4= +github.com/icholy/digest v1.1.0/go.mod h1:QNrsSGQ5v7v9cReDI0+eyjsXGUoRSUZQHeQ5C4XLa0Y= +github.com/imroc/req/v3 v3.57.0 h1:LMTUjNRUybUkTPn8oJDq8Kg3JRBOBTcnDhKu7mzupKI= +github.com/imroc/req/v3 v3.57.0/go.mod h1:JL62ey1nvSLq81HORNcosvlf7SxZStONNqOprg0Pz00= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg= +github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= +github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= +github.com/mdelapenya/tlscert v0.2.0/go.mod h1:O4njj3ELLnJjGdkN7M/vIVCpZ+Cf0L6muqOG4tLSl8o= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8= +github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII= +github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10= +github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s= +github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI= +github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= +github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkrMJI0pRUOCAo= +github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+aMnb6JfVz1mxk7OeMU= +github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= +github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 h1:s2bIayFXlbDFexo96y+htn7FzuhpXLYJNnIuglNKqOk= +github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0/go.mod h1:h+u/2KoREGTnTl9UwrQ/g+XhasAT8E6dClclAADeXoQ= +github.com/testcontainers/testcontainers-go/modules/redis v0.40.0 h1:OG4qwcxp2O0re7V7M9lY9w0v6wWgWf7j7rtkpAnGMd0= +github.com/testcontainers/testcontainers-go/modules/redis v0.40.0/go.mod h1:Bc+EDhKMo5zI5V5zdBkHiMVzeAXbtI4n5isS/nzf6zw= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU= +github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +github.com/zeromicro/go-zero v1.9.4 h1:aRLFoISqAYijABtkbliQC5SsI5TbizJpQvoHc9xup8k= +github.com/zeromicro/go-zero v1.9.4/go.mod h1:a17JOTch25SWxBcUgJZYps60hygK3pIYdw7nGwlcS38= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0 h1:t6wl9SPayj+c7lEIFgm4ooDBZVb01IhLB4InpomhRw8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.24.0/go.mod h1:iSDOcsnSA5INXzZtwaBPrKp/lWu/V14Dd+llD0oI2EA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= +go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= +google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 h1:i8QOKZfYg6AbGVZzUAY3LrNWCKF8O6zFisU9Wl9RER4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go new file mode 100644 index 00000000..944e0f84 --- /dev/null +++ b/backend/internal/config/config.go @@ -0,0 +1,1249 @@ +// Package config provides configuration loading, defaults, and validation. +package config + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "log" + "net/url" + "os" + "strings" + "time" + + "github.com/spf13/viper" +) + +const ( + RunModeStandard = "standard" + RunModeSimple = "simple" +) + +const DefaultCSPPolicy = "default-src 'self'; script-src 'self' https://challenges.cloudflare.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" + +// 连接池隔离策略常量 +// 用于控制上游 HTTP 连接池的隔离粒度,影响连接复用和资源消耗 +const ( + // ConnectionPoolIsolationProxy: 按代理隔离 + // 同一代理地址共享连接池,适合代理数量少、账户数量多的场景 + ConnectionPoolIsolationProxy = "proxy" + // ConnectionPoolIsolationAccount: 按账户隔离 + // 每个账户独立连接池,适合账户数量少、需要严格隔离的场景 + ConnectionPoolIsolationAccount = "account" + // ConnectionPoolIsolationAccountProxy: 按账户+代理组合隔离(默认) + // 同一账户+代理组合共享连接池,提供最细粒度的隔离 + ConnectionPoolIsolationAccountProxy = "account_proxy" +) + +type Config struct { + Server ServerConfig `mapstructure:"server"` + CORS CORSConfig `mapstructure:"cors"` + Security SecurityConfig `mapstructure:"security"` + Billing BillingConfig `mapstructure:"billing"` + Turnstile TurnstileConfig `mapstructure:"turnstile"` + Database DatabaseConfig `mapstructure:"database"` + Redis RedisConfig `mapstructure:"redis"` + Ops OpsConfig `mapstructure:"ops"` + JWT JWTConfig `mapstructure:"jwt"` + LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"` + Default DefaultConfig `mapstructure:"default"` + RateLimit RateLimitConfig `mapstructure:"rate_limit"` + Pricing PricingConfig `mapstructure:"pricing"` + Gateway GatewayConfig `mapstructure:"gateway"` + APIKeyAuth APIKeyAuthCacheConfig `mapstructure:"api_key_auth_cache"` + Dashboard DashboardCacheConfig `mapstructure:"dashboard_cache"` + DashboardAgg DashboardAggregationConfig `mapstructure:"dashboard_aggregation"` + Concurrency ConcurrencyConfig `mapstructure:"concurrency"` + TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` + RunMode string `mapstructure:"run_mode" yaml:"run_mode"` + Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC" + Gemini GeminiConfig `mapstructure:"gemini"` + Update UpdateConfig `mapstructure:"update"` +} + +type GeminiConfig struct { + OAuth GeminiOAuthConfig `mapstructure:"oauth"` + Quota GeminiQuotaConfig `mapstructure:"quota"` +} + +type GeminiOAuthConfig struct { + ClientID string `mapstructure:"client_id"` + ClientSecret string `mapstructure:"client_secret"` + Scopes string `mapstructure:"scopes"` +} + +type GeminiQuotaConfig struct { + Tiers map[string]GeminiTierQuotaConfig `mapstructure:"tiers"` + Policy string `mapstructure:"policy"` +} + +type GeminiTierQuotaConfig struct { + ProRPD *int64 `mapstructure:"pro_rpd" json:"pro_rpd"` + FlashRPD *int64 `mapstructure:"flash_rpd" json:"flash_rpd"` + CooldownMinutes *int `mapstructure:"cooldown_minutes" json:"cooldown_minutes"` +} + +type UpdateConfig struct { + // ProxyURL 用于访问 GitHub 的代理地址 + // 支持 http/https/socks5/socks5h 协议 + // 例如: "http://127.0.0.1:7890", "socks5://127.0.0.1:1080" + ProxyURL string `mapstructure:"proxy_url"` +} + +type LinuxDoConnectConfig struct { + Enabled bool `mapstructure:"enabled"` + ClientID string `mapstructure:"client_id"` + ClientSecret string `mapstructure:"client_secret"` + AuthorizeURL string `mapstructure:"authorize_url"` + TokenURL string `mapstructure:"token_url"` + UserInfoURL string `mapstructure:"userinfo_url"` + Scopes string `mapstructure:"scopes"` + RedirectURL string `mapstructure:"redirect_url"` // 后端回调地址(需在提供方后台登记) + FrontendRedirectURL string `mapstructure:"frontend_redirect_url"` // 前端接收 token 的路由(默认:/auth/linuxdo/callback) + TokenAuthMethod string `mapstructure:"token_auth_method"` // client_secret_post / client_secret_basic / none + UsePKCE bool `mapstructure:"use_pkce"` + + // 可选:用于从 userinfo JSON 中提取字段的 gjson 路径。 + // 为空时,服务端会尝试一组常见字段名。 + UserInfoEmailPath string `mapstructure:"userinfo_email_path"` + UserInfoIDPath string `mapstructure:"userinfo_id_path"` + UserInfoUsernamePath string `mapstructure:"userinfo_username_path"` +} + +// TokenRefreshConfig OAuth token自动刷新配置 +type TokenRefreshConfig struct { + // 是否启用自动刷新 + Enabled bool `mapstructure:"enabled"` + // 检查间隔(分钟) + CheckIntervalMinutes int `mapstructure:"check_interval_minutes"` + // 提前刷新时间(小时),在token过期前多久开始刷新 + RefreshBeforeExpiryHours float64 `mapstructure:"refresh_before_expiry_hours"` + // 最大重试次数 + MaxRetries int `mapstructure:"max_retries"` + // 重试退避基础时间(秒) + RetryBackoffSeconds int `mapstructure:"retry_backoff_seconds"` +} + +type PricingConfig struct { + // 价格数据远程URL(默认使用LiteLLM镜像) + RemoteURL string `mapstructure:"remote_url"` + // 哈希校验文件URL + HashURL string `mapstructure:"hash_url"` + // 本地数据目录 + DataDir string `mapstructure:"data_dir"` + // 回退文件路径 + FallbackFile string `mapstructure:"fallback_file"` + // 更新间隔(小时) + UpdateIntervalHours int `mapstructure:"update_interval_hours"` + // 哈希校验间隔(分钟) + HashCheckIntervalMinutes int `mapstructure:"hash_check_interval_minutes"` +} + +type ServerConfig struct { + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + Mode string `mapstructure:"mode"` // debug/release + ReadHeaderTimeout int `mapstructure:"read_header_timeout"` // 读取请求头超时(秒) + IdleTimeout int `mapstructure:"idle_timeout"` // 空闲连接超时(秒) + TrustedProxies []string `mapstructure:"trusted_proxies"` // 可信代理列表(CIDR/IP) +} + +type CORSConfig struct { + AllowedOrigins []string `mapstructure:"allowed_origins"` + AllowCredentials bool `mapstructure:"allow_credentials"` +} + +type SecurityConfig struct { + URLAllowlist URLAllowlistConfig `mapstructure:"url_allowlist"` + ResponseHeaders ResponseHeaderConfig `mapstructure:"response_headers"` + CSP CSPConfig `mapstructure:"csp"` + ProxyProbe ProxyProbeConfig `mapstructure:"proxy_probe"` +} + +type URLAllowlistConfig struct { + Enabled bool `mapstructure:"enabled"` + UpstreamHosts []string `mapstructure:"upstream_hosts"` + PricingHosts []string `mapstructure:"pricing_hosts"` + CRSHosts []string `mapstructure:"crs_hosts"` + AllowPrivateHosts bool `mapstructure:"allow_private_hosts"` + // 关闭 URL 白名单校验时,是否允许 http URL(默认只允许 https) + AllowInsecureHTTP bool `mapstructure:"allow_insecure_http"` +} + +type ResponseHeaderConfig struct { + Enabled bool `mapstructure:"enabled"` + AdditionalAllowed []string `mapstructure:"additional_allowed"` + ForceRemove []string `mapstructure:"force_remove"` +} + +type CSPConfig struct { + Enabled bool `mapstructure:"enabled"` + Policy string `mapstructure:"policy"` +} + +type ProxyProbeConfig struct { + InsecureSkipVerify bool `mapstructure:"insecure_skip_verify"` // 已禁用:禁止跳过 TLS 证书验证 +} + +type BillingConfig struct { + CircuitBreaker CircuitBreakerConfig `mapstructure:"circuit_breaker"` +} + +type CircuitBreakerConfig struct { + Enabled bool `mapstructure:"enabled"` + FailureThreshold int `mapstructure:"failure_threshold"` + ResetTimeoutSeconds int `mapstructure:"reset_timeout_seconds"` + HalfOpenRequests int `mapstructure:"half_open_requests"` +} + +type ConcurrencyConfig struct { + // PingInterval: 并发等待期间的 SSE ping 间隔(秒) + PingInterval int `mapstructure:"ping_interval"` +} + +// GatewayConfig API网关相关配置 +type GatewayConfig struct { + // 等待上游响应头的超时时间(秒),0表示无超时 + // 注意:这不影响流式数据传输,只控制等待响应头的时间 + ResponseHeaderTimeout int `mapstructure:"response_header_timeout"` + // 请求体最大字节数,用于网关请求体大小限制 + MaxBodySize int64 `mapstructure:"max_body_size"` + // ConnectionPoolIsolation: 上游连接池隔离策略(proxy/account/account_proxy) + ConnectionPoolIsolation string `mapstructure:"connection_pool_isolation"` + + // HTTP 上游连接池配置(性能优化:支持高并发场景调优) + // MaxIdleConns: 所有主机的最大空闲连接总数 + MaxIdleConns int `mapstructure:"max_idle_conns"` + // MaxIdleConnsPerHost: 每个主机的最大空闲连接数(关键参数,影响连接复用率) + MaxIdleConnsPerHost int `mapstructure:"max_idle_conns_per_host"` + // MaxConnsPerHost: 每个主机的最大连接数(包括活跃+空闲),0表示无限制 + MaxConnsPerHost int `mapstructure:"max_conns_per_host"` + // IdleConnTimeoutSeconds: 空闲连接超时时间(秒) + IdleConnTimeoutSeconds int `mapstructure:"idle_conn_timeout_seconds"` + // MaxUpstreamClients: 上游连接池客户端最大缓存数量 + // 当使用连接池隔离策略时,系统会为不同的账户/代理组合创建独立的 HTTP 客户端 + // 此参数限制缓存的客户端数量,超出后会淘汰最久未使用的客户端 + // 建议值:预估的活跃账户数 * 1.2(留有余量) + MaxUpstreamClients int `mapstructure:"max_upstream_clients"` + // ClientIdleTTLSeconds: 上游连接池客户端空闲回收阈值(秒) + // 超过此时间未使用的客户端会被标记为可回收 + // 建议值:根据用户访问频率设置,一般 10-30 分钟 + ClientIdleTTLSeconds int `mapstructure:"client_idle_ttl_seconds"` + // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) + // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 + ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` + + // StreamDataIntervalTimeout: 流数据间隔超时(秒),0表示禁用 + StreamDataIntervalTimeout int `mapstructure:"stream_data_interval_timeout"` + // StreamKeepaliveInterval: 流式 keepalive 间隔(秒),0表示禁用 + StreamKeepaliveInterval int `mapstructure:"stream_keepalive_interval"` + // MaxLineSize: 上游 SSE 单行最大字节数(0使用默认值) + MaxLineSize int `mapstructure:"max_line_size"` + + // 是否记录上游错误响应体摘要(避免输出请求内容) + LogUpstreamErrorBody bool `mapstructure:"log_upstream_error_body"` + // 上游错误响应体记录最大字节数(超过会截断) + LogUpstreamErrorBodyMaxBytes int `mapstructure:"log_upstream_error_body_max_bytes"` + + // API-key 账号在客户端未提供 anthropic-beta 时,是否按需自动补齐(默认关闭以保持兼容) + InjectBetaForAPIKey bool `mapstructure:"inject_beta_for_apikey"` + + // 是否允许对部分 400 错误触发 failover(默认关闭以避免改变语义) + FailoverOn400 bool `mapstructure:"failover_on_400"` + + // Scheduling: 账号调度相关配置 + Scheduling GatewaySchedulingConfig `mapstructure:"scheduling"` +} + +// GatewaySchedulingConfig accounts scheduling configuration. +type GatewaySchedulingConfig struct { + // 粘性会话排队配置 + StickySessionMaxWaiting int `mapstructure:"sticky_session_max_waiting"` + StickySessionWaitTimeout time.Duration `mapstructure:"sticky_session_wait_timeout"` + + // 兜底排队配置 + FallbackWaitTimeout time.Duration `mapstructure:"fallback_wait_timeout"` + FallbackMaxWaiting int `mapstructure:"fallback_max_waiting"` + + // 负载计算 + LoadBatchEnabled bool `mapstructure:"load_batch_enabled"` + + // 过期槽位清理周期(0 表示禁用) + SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"` + + // 受控回源配置 + DbFallbackEnabled bool `mapstructure:"db_fallback_enabled"` + // 受控回源超时(秒),0 表示不额外收紧超时 + DbFallbackTimeoutSeconds int `mapstructure:"db_fallback_timeout_seconds"` + // 受控回源限流(实例级 QPS),0 表示不限制 + DbFallbackMaxQPS int `mapstructure:"db_fallback_max_qps"` + + // Outbox 轮询与滞后阈值配置 + // Outbox 轮询周期(秒) + OutboxPollIntervalSeconds int `mapstructure:"outbox_poll_interval_seconds"` + // Outbox 滞后告警阈值(秒) + OutboxLagWarnSeconds int `mapstructure:"outbox_lag_warn_seconds"` + // Outbox 触发强制重建阈值(秒) + OutboxLagRebuildSeconds int `mapstructure:"outbox_lag_rebuild_seconds"` + // Outbox 连续滞后触发次数 + OutboxLagRebuildFailures int `mapstructure:"outbox_lag_rebuild_failures"` + // Outbox 积压触发重建阈值(行数) + OutboxBacklogRebuildRows int `mapstructure:"outbox_backlog_rebuild_rows"` + + // 全量重建周期配置 + // 全量重建周期(秒),0 表示禁用 + FullRebuildIntervalSeconds int `mapstructure:"full_rebuild_interval_seconds"` +} + +func (s *ServerConfig) Address() string { + return fmt.Sprintf("%s:%d", s.Host, s.Port) +} + +// DatabaseConfig 数据库连接配置 +// 性能优化:新增连接池参数,避免频繁创建/销毁连接 +type DatabaseConfig struct { + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + User string `mapstructure:"user"` + Password string `mapstructure:"password"` + DBName string `mapstructure:"dbname"` + SSLMode string `mapstructure:"sslmode"` + // 连接池配置(性能优化:可配置化连接池参数) + // MaxOpenConns: 最大打开连接数,控制数据库连接上限,防止资源耗尽 + MaxOpenConns int `mapstructure:"max_open_conns"` + // MaxIdleConns: 最大空闲连接数,保持热连接减少建连延迟 + MaxIdleConns int `mapstructure:"max_idle_conns"` + // ConnMaxLifetimeMinutes: 连接最大存活时间,防止长连接导致的资源泄漏 + ConnMaxLifetimeMinutes int `mapstructure:"conn_max_lifetime_minutes"` + // ConnMaxIdleTimeMinutes: 空闲连接最大存活时间,及时释放不活跃连接 + ConnMaxIdleTimeMinutes int `mapstructure:"conn_max_idle_time_minutes"` +} + +func (d *DatabaseConfig) DSN() string { + // 当密码为空时不包含 password 参数,避免 libpq 解析错误 + if d.Password == "" { + return fmt.Sprintf( + "host=%s port=%d user=%s dbname=%s sslmode=%s", + d.Host, d.Port, d.User, d.DBName, d.SSLMode, + ) + } + return fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", + d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, + ) +} + +// DSNWithTimezone returns DSN with timezone setting +func (d *DatabaseConfig) DSNWithTimezone(tz string) string { + if tz == "" { + tz = "Asia/Shanghai" + } + // 当密码为空时不包含 password 参数,避免 libpq 解析错误 + if d.Password == "" { + return fmt.Sprintf( + "host=%s port=%d user=%s dbname=%s sslmode=%s TimeZone=%s", + d.Host, d.Port, d.User, d.DBName, d.SSLMode, tz, + ) + } + return fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s", + d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, tz, + ) +} + +// RedisConfig Redis 连接配置 +// 性能优化:新增连接池和超时参数,提升高并发场景下的吞吐量 +type RedisConfig struct { + Host string `mapstructure:"host"` + Port int `mapstructure:"port"` + Password string `mapstructure:"password"` + DB int `mapstructure:"db"` + // 连接池与超时配置(性能优化:可配置化连接池参数) + // DialTimeoutSeconds: 建立连接超时,防止慢连接阻塞 + DialTimeoutSeconds int `mapstructure:"dial_timeout_seconds"` + // ReadTimeoutSeconds: 读取超时,避免慢查询阻塞连接池 + ReadTimeoutSeconds int `mapstructure:"read_timeout_seconds"` + // WriteTimeoutSeconds: 写入超时,避免慢写入阻塞连接池 + WriteTimeoutSeconds int `mapstructure:"write_timeout_seconds"` + // PoolSize: 连接池大小,控制最大并发连接数 + PoolSize int `mapstructure:"pool_size"` + // MinIdleConns: 最小空闲连接数,保持热连接减少冷启动延迟 + MinIdleConns int `mapstructure:"min_idle_conns"` +} + +func (r *RedisConfig) Address() string { + return fmt.Sprintf("%s:%d", r.Host, r.Port) +} + +type OpsConfig struct { + // Enabled controls whether ops features should run. + // + // NOTE: vNext still has a DB-backed feature flag (ops_monitoring_enabled) for runtime on/off. + // This config flag is the "hard switch" for deployments that want to disable ops completely. + Enabled bool `mapstructure:"enabled"` + + // UsePreaggregatedTables prefers ops_metrics_hourly/daily for long-window dashboard queries. + UsePreaggregatedTables bool `mapstructure:"use_preaggregated_tables"` + + // Cleanup controls periodic deletion of old ops data to prevent unbounded growth. + Cleanup OpsCleanupConfig `mapstructure:"cleanup"` + + // MetricsCollectorCache controls Redis caching for expensive per-window collector queries. + MetricsCollectorCache OpsMetricsCollectorCacheConfig `mapstructure:"metrics_collector_cache"` + + // Pre-aggregation configuration. + Aggregation OpsAggregationConfig `mapstructure:"aggregation"` +} + +type OpsCleanupConfig struct { + Enabled bool `mapstructure:"enabled"` + Schedule string `mapstructure:"schedule"` + + // Retention days (0 disables that cleanup target). + // + // vNext requirement: default 30 days across ops datasets. + ErrorLogRetentionDays int `mapstructure:"error_log_retention_days"` + MinuteMetricsRetentionDays int `mapstructure:"minute_metrics_retention_days"` + HourlyMetricsRetentionDays int `mapstructure:"hourly_metrics_retention_days"` +} + +type OpsAggregationConfig struct { + Enabled bool `mapstructure:"enabled"` +} + +type OpsMetricsCollectorCacheConfig struct { + Enabled bool `mapstructure:"enabled"` + TTL time.Duration `mapstructure:"ttl"` +} + +type JWTConfig struct { + Secret string `mapstructure:"secret"` + ExpireHour int `mapstructure:"expire_hour"` +} + +type TurnstileConfig struct { + Required bool `mapstructure:"required"` +} + +type DefaultConfig struct { + AdminEmail string `mapstructure:"admin_email"` + AdminPassword string `mapstructure:"admin_password"` + UserConcurrency int `mapstructure:"user_concurrency"` + UserBalance float64 `mapstructure:"user_balance"` + APIKeyPrefix string `mapstructure:"api_key_prefix"` + RateMultiplier float64 `mapstructure:"rate_multiplier"` +} + +type RateLimitConfig struct { + OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟) +} + +// APIKeyAuthCacheConfig API Key 认证缓存配置 +type APIKeyAuthCacheConfig struct { + L1Size int `mapstructure:"l1_size"` + L1TTLSeconds int `mapstructure:"l1_ttl_seconds"` + L2TTLSeconds int `mapstructure:"l2_ttl_seconds"` + NegativeTTLSeconds int `mapstructure:"negative_ttl_seconds"` + JitterPercent int `mapstructure:"jitter_percent"` + Singleflight bool `mapstructure:"singleflight"` +} + +// DashboardCacheConfig 仪表盘统计缓存配置 +type DashboardCacheConfig struct { + // Enabled: 是否启用仪表盘缓存 + Enabled bool `mapstructure:"enabled"` + // KeyPrefix: Redis key 前缀,用于多环境隔离 + KeyPrefix string `mapstructure:"key_prefix"` + // StatsFreshTTLSeconds: 缓存命中认为“新鲜”的时间窗口(秒) + StatsFreshTTLSeconds int `mapstructure:"stats_fresh_ttl_seconds"` + // StatsTTLSeconds: Redis 缓存总 TTL(秒) + StatsTTLSeconds int `mapstructure:"stats_ttl_seconds"` + // StatsRefreshTimeoutSeconds: 异步刷新超时(秒) + StatsRefreshTimeoutSeconds int `mapstructure:"stats_refresh_timeout_seconds"` +} + +// DashboardAggregationConfig 仪表盘预聚合配置 +type DashboardAggregationConfig struct { + // Enabled: 是否启用预聚合作业 + Enabled bool `mapstructure:"enabled"` + // IntervalSeconds: 聚合刷新间隔(秒) + IntervalSeconds int `mapstructure:"interval_seconds"` + // LookbackSeconds: 回看窗口(秒) + LookbackSeconds int `mapstructure:"lookback_seconds"` + // BackfillEnabled: 是否允许全量回填 + BackfillEnabled bool `mapstructure:"backfill_enabled"` + // BackfillMaxDays: 回填最大跨度(天) + BackfillMaxDays int `mapstructure:"backfill_max_days"` + // Retention: 各表保留窗口(天) + Retention DashboardAggregationRetentionConfig `mapstructure:"retention"` + // RecomputeDays: 启动时重算最近 N 天 + RecomputeDays int `mapstructure:"recompute_days"` +} + +// DashboardAggregationRetentionConfig 预聚合保留窗口 +type DashboardAggregationRetentionConfig struct { + UsageLogsDays int `mapstructure:"usage_logs_days"` + HourlyDays int `mapstructure:"hourly_days"` + DailyDays int `mapstructure:"daily_days"` +} + +func NormalizeRunMode(value string) string { + normalized := strings.ToLower(strings.TrimSpace(value)) + switch normalized { + case RunModeStandard, RunModeSimple: + return normalized + default: + return RunModeStandard + } +} + +func Load() (*Config, error) { + viper.SetConfigName("config") + viper.SetConfigType("yaml") + + // Add config paths in priority order + // 1. DATA_DIR environment variable (highest priority) + if dataDir := os.Getenv("DATA_DIR"); dataDir != "" { + viper.AddConfigPath(dataDir) + } + // 2. Docker data directory + viper.AddConfigPath("/app/data") + // 3. Current directory + viper.AddConfigPath(".") + // 4. Config subdirectory + viper.AddConfigPath("./config") + // 5. System config directory + viper.AddConfigPath("/etc/sub2api") + + // 环境变量支持 + viper.AutomaticEnv() + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + + // 默认值 + setDefaults() + + if err := viper.ReadInConfig(); err != nil { + if _, ok := err.(viper.ConfigFileNotFoundError); !ok { + return nil, fmt.Errorf("read config error: %w", err) + } + // 配置文件不存在时使用默认值 + } + + var cfg Config + if err := viper.Unmarshal(&cfg); err != nil { + return nil, fmt.Errorf("unmarshal config error: %w", err) + } + + cfg.RunMode = NormalizeRunMode(cfg.RunMode) + cfg.Server.Mode = strings.ToLower(strings.TrimSpace(cfg.Server.Mode)) + if cfg.Server.Mode == "" { + cfg.Server.Mode = "debug" + } + cfg.JWT.Secret = strings.TrimSpace(cfg.JWT.Secret) + cfg.LinuxDo.ClientID = strings.TrimSpace(cfg.LinuxDo.ClientID) + cfg.LinuxDo.ClientSecret = strings.TrimSpace(cfg.LinuxDo.ClientSecret) + cfg.LinuxDo.AuthorizeURL = strings.TrimSpace(cfg.LinuxDo.AuthorizeURL) + cfg.LinuxDo.TokenURL = strings.TrimSpace(cfg.LinuxDo.TokenURL) + cfg.LinuxDo.UserInfoURL = strings.TrimSpace(cfg.LinuxDo.UserInfoURL) + cfg.LinuxDo.Scopes = strings.TrimSpace(cfg.LinuxDo.Scopes) + cfg.LinuxDo.RedirectURL = strings.TrimSpace(cfg.LinuxDo.RedirectURL) + cfg.LinuxDo.FrontendRedirectURL = strings.TrimSpace(cfg.LinuxDo.FrontendRedirectURL) + cfg.LinuxDo.TokenAuthMethod = strings.ToLower(strings.TrimSpace(cfg.LinuxDo.TokenAuthMethod)) + cfg.LinuxDo.UserInfoEmailPath = strings.TrimSpace(cfg.LinuxDo.UserInfoEmailPath) + cfg.LinuxDo.UserInfoIDPath = strings.TrimSpace(cfg.LinuxDo.UserInfoIDPath) + cfg.LinuxDo.UserInfoUsernamePath = strings.TrimSpace(cfg.LinuxDo.UserInfoUsernamePath) + cfg.Dashboard.KeyPrefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix) + cfg.CORS.AllowedOrigins = normalizeStringSlice(cfg.CORS.AllowedOrigins) + cfg.Security.ResponseHeaders.AdditionalAllowed = normalizeStringSlice(cfg.Security.ResponseHeaders.AdditionalAllowed) + cfg.Security.ResponseHeaders.ForceRemove = normalizeStringSlice(cfg.Security.ResponseHeaders.ForceRemove) + cfg.Security.CSP.Policy = strings.TrimSpace(cfg.Security.CSP.Policy) + + if cfg.JWT.Secret == "" { + secret, err := generateJWTSecret(64) + if err != nil { + return nil, fmt.Errorf("generate jwt secret error: %w", err) + } + cfg.JWT.Secret = secret + log.Println("Warning: JWT secret auto-generated. Consider setting a fixed secret for production.") + } + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("validate config error: %w", err) + } + + if !cfg.Security.URLAllowlist.Enabled { + log.Println("Warning: security.url_allowlist.enabled=false; allowlist/SSRF checks disabled (minimal format validation only).") + } + if !cfg.Security.ResponseHeaders.Enabled { + log.Println("Warning: security.response_headers.enabled=false; configurable header filtering disabled (default allowlist only).") + } + + if cfg.JWT.Secret != "" && isWeakJWTSecret(cfg.JWT.Secret) { + log.Println("Warning: JWT secret appears weak; use a 32+ character random secret in production.") + } + if len(cfg.Security.ResponseHeaders.AdditionalAllowed) > 0 || len(cfg.Security.ResponseHeaders.ForceRemove) > 0 { + log.Printf("AUDIT: response header policy configured additional_allowed=%v force_remove=%v", + cfg.Security.ResponseHeaders.AdditionalAllowed, + cfg.Security.ResponseHeaders.ForceRemove, + ) + } + + return &cfg, nil +} + +func setDefaults() { + viper.SetDefault("run_mode", RunModeStandard) + + // Server + viper.SetDefault("server.host", "0.0.0.0") + viper.SetDefault("server.port", 8080) + viper.SetDefault("server.mode", "debug") + viper.SetDefault("server.read_header_timeout", 30) // 30秒读取请求头 + viper.SetDefault("server.idle_timeout", 120) // 120秒空闲超时 + viper.SetDefault("server.trusted_proxies", []string{}) + + // CORS + viper.SetDefault("cors.allowed_origins", []string{}) + viper.SetDefault("cors.allow_credentials", true) + + // Security + viper.SetDefault("security.url_allowlist.enabled", false) + viper.SetDefault("security.url_allowlist.upstream_hosts", []string{ + "api.openai.com", + "api.anthropic.com", + "api.kimi.com", + "open.bigmodel.cn", + "api.minimaxi.com", + "generativelanguage.googleapis.com", + "cloudcode-pa.googleapis.com", + "*.openai.azure.com", + }) + viper.SetDefault("security.url_allowlist.pricing_hosts", []string{ + "raw.githubusercontent.com", + }) + viper.SetDefault("security.url_allowlist.crs_hosts", []string{}) + viper.SetDefault("security.url_allowlist.allow_private_hosts", true) + viper.SetDefault("security.url_allowlist.allow_insecure_http", true) + viper.SetDefault("security.response_headers.enabled", false) + viper.SetDefault("security.response_headers.additional_allowed", []string{}) + viper.SetDefault("security.response_headers.force_remove", []string{}) + viper.SetDefault("security.csp.enabled", true) + viper.SetDefault("security.csp.policy", DefaultCSPPolicy) + viper.SetDefault("security.proxy_probe.insecure_skip_verify", false) + + // Billing + viper.SetDefault("billing.circuit_breaker.enabled", true) + viper.SetDefault("billing.circuit_breaker.failure_threshold", 5) + viper.SetDefault("billing.circuit_breaker.reset_timeout_seconds", 30) + viper.SetDefault("billing.circuit_breaker.half_open_requests", 3) + + // Turnstile + viper.SetDefault("turnstile.required", false) + + // LinuxDo Connect OAuth 登录 + viper.SetDefault("linuxdo_connect.enabled", false) + viper.SetDefault("linuxdo_connect.client_id", "") + viper.SetDefault("linuxdo_connect.client_secret", "") + viper.SetDefault("linuxdo_connect.authorize_url", "https://connect.linux.do/oauth2/authorize") + viper.SetDefault("linuxdo_connect.token_url", "https://connect.linux.do/oauth2/token") + viper.SetDefault("linuxdo_connect.userinfo_url", "https://connect.linux.do/api/user") + viper.SetDefault("linuxdo_connect.scopes", "user") + viper.SetDefault("linuxdo_connect.redirect_url", "") + viper.SetDefault("linuxdo_connect.frontend_redirect_url", "/auth/linuxdo/callback") + viper.SetDefault("linuxdo_connect.token_auth_method", "client_secret_post") + viper.SetDefault("linuxdo_connect.use_pkce", false) + viper.SetDefault("linuxdo_connect.userinfo_email_path", "") + viper.SetDefault("linuxdo_connect.userinfo_id_path", "") + viper.SetDefault("linuxdo_connect.userinfo_username_path", "") + + // Database + viper.SetDefault("database.host", "localhost") + viper.SetDefault("database.port", 5432) + viper.SetDefault("database.user", "postgres") + viper.SetDefault("database.password", "postgres") + viper.SetDefault("database.dbname", "sub2api") + viper.SetDefault("database.sslmode", "disable") + viper.SetDefault("database.max_open_conns", 50) + viper.SetDefault("database.max_idle_conns", 10) + viper.SetDefault("database.conn_max_lifetime_minutes", 30) + viper.SetDefault("database.conn_max_idle_time_minutes", 5) + + // Redis + viper.SetDefault("redis.host", "localhost") + viper.SetDefault("redis.port", 6379) + viper.SetDefault("redis.password", "") + viper.SetDefault("redis.db", 0) + viper.SetDefault("redis.dial_timeout_seconds", 5) + viper.SetDefault("redis.read_timeout_seconds", 3) + viper.SetDefault("redis.write_timeout_seconds", 3) + viper.SetDefault("redis.pool_size", 128) + viper.SetDefault("redis.min_idle_conns", 10) + + // Ops (vNext) + viper.SetDefault("ops.enabled", true) + viper.SetDefault("ops.use_preaggregated_tables", false) + viper.SetDefault("ops.cleanup.enabled", true) + viper.SetDefault("ops.cleanup.schedule", "0 2 * * *") + // Retention days: vNext defaults to 30 days across ops datasets. + viper.SetDefault("ops.cleanup.error_log_retention_days", 30) + viper.SetDefault("ops.cleanup.minute_metrics_retention_days", 30) + viper.SetDefault("ops.cleanup.hourly_metrics_retention_days", 30) + viper.SetDefault("ops.aggregation.enabled", true) + viper.SetDefault("ops.metrics_collector_cache.enabled", true) + // TTL should be slightly larger than collection interval (1m) to maximize cross-replica cache hits. + viper.SetDefault("ops.metrics_collector_cache.ttl", 65*time.Second) + + // JWT + viper.SetDefault("jwt.secret", "") + viper.SetDefault("jwt.expire_hour", 24) + + // Default + // Admin credentials are created via the setup flow (web wizard / CLI / AUTO_SETUP). + // Do not ship fixed defaults here to avoid insecure "known credentials" in production. + viper.SetDefault("default.admin_email", "") + viper.SetDefault("default.admin_password", "") + viper.SetDefault("default.user_concurrency", 5) + viper.SetDefault("default.user_balance", 0) + viper.SetDefault("default.api_key_prefix", "sk-") + viper.SetDefault("default.rate_multiplier", 1.0) + + // RateLimit + viper.SetDefault("rate_limit.overload_cooldown_minutes", 10) + + // Pricing - 从 price-mirror 分支同步,该分支维护了 sha256 哈希文件用于增量更新检查 + viper.SetDefault("pricing.remote_url", "https://raw.githubusercontent.com/Wei-Shaw/claude-relay-service/price-mirror/model_prices_and_context_window.json") + viper.SetDefault("pricing.hash_url", "https://raw.githubusercontent.com/Wei-Shaw/claude-relay-service/price-mirror/model_prices_and_context_window.sha256") + viper.SetDefault("pricing.data_dir", "./data") + viper.SetDefault("pricing.fallback_file", "./resources/model-pricing/model_prices_and_context_window.json") + viper.SetDefault("pricing.update_interval_hours", 24) + viper.SetDefault("pricing.hash_check_interval_minutes", 10) + + // Timezone (default to Asia/Shanghai for Chinese users) + viper.SetDefault("timezone", "Asia/Shanghai") + + // API Key auth cache + viper.SetDefault("api_key_auth_cache.l1_size", 65535) + viper.SetDefault("api_key_auth_cache.l1_ttl_seconds", 15) + viper.SetDefault("api_key_auth_cache.l2_ttl_seconds", 300) + viper.SetDefault("api_key_auth_cache.negative_ttl_seconds", 30) + viper.SetDefault("api_key_auth_cache.jitter_percent", 10) + viper.SetDefault("api_key_auth_cache.singleflight", true) + + // Dashboard cache + viper.SetDefault("dashboard_cache.enabled", true) + viper.SetDefault("dashboard_cache.key_prefix", "sub2api:") + viper.SetDefault("dashboard_cache.stats_fresh_ttl_seconds", 15) + viper.SetDefault("dashboard_cache.stats_ttl_seconds", 30) + viper.SetDefault("dashboard_cache.stats_refresh_timeout_seconds", 30) + + // Dashboard aggregation + viper.SetDefault("dashboard_aggregation.enabled", true) + viper.SetDefault("dashboard_aggregation.interval_seconds", 60) + viper.SetDefault("dashboard_aggregation.lookback_seconds", 120) + viper.SetDefault("dashboard_aggregation.backfill_enabled", false) + viper.SetDefault("dashboard_aggregation.backfill_max_days", 31) + viper.SetDefault("dashboard_aggregation.retention.usage_logs_days", 90) + viper.SetDefault("dashboard_aggregation.retention.hourly_days", 180) + viper.SetDefault("dashboard_aggregation.retention.daily_days", 730) + viper.SetDefault("dashboard_aggregation.recompute_days", 2) + + // Gateway + viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久 + viper.SetDefault("gateway.log_upstream_error_body", true) + viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) + viper.SetDefault("gateway.inject_beta_for_apikey", false) + viper.SetDefault("gateway.failover_on_400", false) + viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) + viper.SetDefault("gateway.connection_pool_isolation", ConnectionPoolIsolationAccountProxy) + // HTTP 上游连接池配置(针对 5000+ 并发用户优化) + viper.SetDefault("gateway.max_idle_conns", 240) // 最大空闲连接总数(HTTP/2 场景默认) + viper.SetDefault("gateway.max_idle_conns_per_host", 120) // 每主机最大空闲连接(HTTP/2 场景默认) + viper.SetDefault("gateway.max_conns_per_host", 240) // 每主机最大连接数(含活跃,HTTP/2 场景默认) + viper.SetDefault("gateway.idle_conn_timeout_seconds", 90) // 空闲连接超时(秒) + viper.SetDefault("gateway.max_upstream_clients", 5000) + viper.SetDefault("gateway.client_idle_ttl_seconds", 900) + viper.SetDefault("gateway.concurrency_slot_ttl_minutes", 30) // 并发槽位过期时间(支持超长请求) + viper.SetDefault("gateway.stream_data_interval_timeout", 180) + viper.SetDefault("gateway.stream_keepalive_interval", 10) + viper.SetDefault("gateway.max_line_size", 10*1024*1024) + viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) + viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 120*time.Second) + viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) + viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100) + viper.SetDefault("gateway.scheduling.load_batch_enabled", true) + viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second) + viper.SetDefault("gateway.scheduling.db_fallback_enabled", true) + viper.SetDefault("gateway.scheduling.db_fallback_timeout_seconds", 0) + viper.SetDefault("gateway.scheduling.db_fallback_max_qps", 0) + viper.SetDefault("gateway.scheduling.outbox_poll_interval_seconds", 1) + viper.SetDefault("gateway.scheduling.outbox_lag_warn_seconds", 5) + viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_seconds", 10) + viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_failures", 3) + viper.SetDefault("gateway.scheduling.outbox_backlog_rebuild_rows", 10000) + viper.SetDefault("gateway.scheduling.full_rebuild_interval_seconds", 300) + viper.SetDefault("concurrency.ping_interval", 10) + + // TokenRefresh + viper.SetDefault("token_refresh.enabled", true) + viper.SetDefault("token_refresh.check_interval_minutes", 5) // 每5分钟检查一次 + viper.SetDefault("token_refresh.refresh_before_expiry_hours", 0.5) // 提前30分钟刷新(适配Google 1小时token) + viper.SetDefault("token_refresh.max_retries", 3) // 最多重试3次 + viper.SetDefault("token_refresh.retry_backoff_seconds", 2) // 重试退避基础2秒 + + // Gemini OAuth - configure via environment variables or config file + // GEMINI_OAUTH_CLIENT_ID and GEMINI_OAUTH_CLIENT_SECRET + // Default: uses Gemini CLI public credentials (set via environment) + viper.SetDefault("gemini.oauth.client_id", "") + viper.SetDefault("gemini.oauth.client_secret", "") + viper.SetDefault("gemini.oauth.scopes", "") + viper.SetDefault("gemini.quota.policy", "") +} + +func (c *Config) Validate() error { + if c.JWT.ExpireHour <= 0 { + return fmt.Errorf("jwt.expire_hour must be positive") + } + if c.JWT.ExpireHour > 168 { + return fmt.Errorf("jwt.expire_hour must be <= 168 (7 days)") + } + if c.JWT.ExpireHour > 24 { + log.Printf("Warning: jwt.expire_hour is %d hours (> 24). Consider shorter expiration for security.", c.JWT.ExpireHour) + } + if c.Security.CSP.Enabled && strings.TrimSpace(c.Security.CSP.Policy) == "" { + return fmt.Errorf("security.csp.policy is required when CSP is enabled") + } + if c.LinuxDo.Enabled { + if strings.TrimSpace(c.LinuxDo.ClientID) == "" { + return fmt.Errorf("linuxdo_connect.client_id is required when linuxdo_connect.enabled=true") + } + if strings.TrimSpace(c.LinuxDo.AuthorizeURL) == "" { + return fmt.Errorf("linuxdo_connect.authorize_url is required when linuxdo_connect.enabled=true") + } + if strings.TrimSpace(c.LinuxDo.TokenURL) == "" { + return fmt.Errorf("linuxdo_connect.token_url is required when linuxdo_connect.enabled=true") + } + if strings.TrimSpace(c.LinuxDo.UserInfoURL) == "" { + return fmt.Errorf("linuxdo_connect.userinfo_url is required when linuxdo_connect.enabled=true") + } + if strings.TrimSpace(c.LinuxDo.RedirectURL) == "" { + return fmt.Errorf("linuxdo_connect.redirect_url is required when linuxdo_connect.enabled=true") + } + method := strings.ToLower(strings.TrimSpace(c.LinuxDo.TokenAuthMethod)) + switch method { + case "", "client_secret_post", "client_secret_basic", "none": + default: + return fmt.Errorf("linuxdo_connect.token_auth_method must be one of: client_secret_post/client_secret_basic/none") + } + if method == "none" && !c.LinuxDo.UsePKCE { + return fmt.Errorf("linuxdo_connect.use_pkce must be true when linuxdo_connect.token_auth_method=none") + } + if (method == "" || method == "client_secret_post" || method == "client_secret_basic") && + strings.TrimSpace(c.LinuxDo.ClientSecret) == "" { + return fmt.Errorf("linuxdo_connect.client_secret is required when linuxdo_connect.enabled=true and token_auth_method is client_secret_post/client_secret_basic") + } + if strings.TrimSpace(c.LinuxDo.FrontendRedirectURL) == "" { + return fmt.Errorf("linuxdo_connect.frontend_redirect_url is required when linuxdo_connect.enabled=true") + } + + if err := ValidateAbsoluteHTTPURL(c.LinuxDo.AuthorizeURL); err != nil { + return fmt.Errorf("linuxdo_connect.authorize_url invalid: %w", err) + } + if err := ValidateAbsoluteHTTPURL(c.LinuxDo.TokenURL); err != nil { + return fmt.Errorf("linuxdo_connect.token_url invalid: %w", err) + } + if err := ValidateAbsoluteHTTPURL(c.LinuxDo.UserInfoURL); err != nil { + return fmt.Errorf("linuxdo_connect.userinfo_url invalid: %w", err) + } + if err := ValidateAbsoluteHTTPURL(c.LinuxDo.RedirectURL); err != nil { + return fmt.Errorf("linuxdo_connect.redirect_url invalid: %w", err) + } + if err := ValidateFrontendRedirectURL(c.LinuxDo.FrontendRedirectURL); err != nil { + return fmt.Errorf("linuxdo_connect.frontend_redirect_url invalid: %w", err) + } + + warnIfInsecureURL("linuxdo_connect.authorize_url", c.LinuxDo.AuthorizeURL) + warnIfInsecureURL("linuxdo_connect.token_url", c.LinuxDo.TokenURL) + warnIfInsecureURL("linuxdo_connect.userinfo_url", c.LinuxDo.UserInfoURL) + warnIfInsecureURL("linuxdo_connect.redirect_url", c.LinuxDo.RedirectURL) + warnIfInsecureURL("linuxdo_connect.frontend_redirect_url", c.LinuxDo.FrontendRedirectURL) + } + if c.Billing.CircuitBreaker.Enabled { + if c.Billing.CircuitBreaker.FailureThreshold <= 0 { + return fmt.Errorf("billing.circuit_breaker.failure_threshold must be positive") + } + if c.Billing.CircuitBreaker.ResetTimeoutSeconds <= 0 { + return fmt.Errorf("billing.circuit_breaker.reset_timeout_seconds must be positive") + } + if c.Billing.CircuitBreaker.HalfOpenRequests <= 0 { + return fmt.Errorf("billing.circuit_breaker.half_open_requests must be positive") + } + } + if c.Database.MaxOpenConns <= 0 { + return fmt.Errorf("database.max_open_conns must be positive") + } + if c.Database.MaxIdleConns < 0 { + return fmt.Errorf("database.max_idle_conns must be non-negative") + } + if c.Database.MaxIdleConns > c.Database.MaxOpenConns { + return fmt.Errorf("database.max_idle_conns cannot exceed database.max_open_conns") + } + if c.Database.ConnMaxLifetimeMinutes < 0 { + return fmt.Errorf("database.conn_max_lifetime_minutes must be non-negative") + } + if c.Database.ConnMaxIdleTimeMinutes < 0 { + return fmt.Errorf("database.conn_max_idle_time_minutes must be non-negative") + } + if c.Redis.DialTimeoutSeconds <= 0 { + return fmt.Errorf("redis.dial_timeout_seconds must be positive") + } + if c.Redis.ReadTimeoutSeconds <= 0 { + return fmt.Errorf("redis.read_timeout_seconds must be positive") + } + if c.Redis.WriteTimeoutSeconds <= 0 { + return fmt.Errorf("redis.write_timeout_seconds must be positive") + } + if c.Redis.PoolSize <= 0 { + return fmt.Errorf("redis.pool_size must be positive") + } + if c.Redis.MinIdleConns < 0 { + return fmt.Errorf("redis.min_idle_conns must be non-negative") + } + if c.Redis.MinIdleConns > c.Redis.PoolSize { + return fmt.Errorf("redis.min_idle_conns cannot exceed redis.pool_size") + } + if c.Dashboard.Enabled { + if c.Dashboard.StatsFreshTTLSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be positive") + } + if c.Dashboard.StatsTTLSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be positive") + } + if c.Dashboard.StatsRefreshTimeoutSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be positive") + } + if c.Dashboard.StatsFreshTTLSeconds > c.Dashboard.StatsTTLSeconds { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be <= dashboard_cache.stats_ttl_seconds") + } + } else { + if c.Dashboard.StatsFreshTTLSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be non-negative") + } + if c.Dashboard.StatsTTLSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be non-negative") + } + if c.Dashboard.StatsRefreshTimeoutSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be non-negative") + } + } + if c.DashboardAgg.Enabled { + if c.DashboardAgg.IntervalSeconds <= 0 { + return fmt.Errorf("dashboard_aggregation.interval_seconds must be positive") + } + if c.DashboardAgg.LookbackSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative") + } + if c.DashboardAgg.BackfillMaxDays < 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative") + } + if c.DashboardAgg.BackfillEnabled && c.DashboardAgg.BackfillMaxDays == 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be positive") + } + if c.DashboardAgg.Retention.UsageLogsDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be positive") + } + if c.DashboardAgg.Retention.HourlyDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be positive") + } + if c.DashboardAgg.Retention.DailyDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.daily_days must be positive") + } + if c.DashboardAgg.RecomputeDays < 0 { + return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") + } + } else { + if c.DashboardAgg.IntervalSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.interval_seconds must be non-negative") + } + if c.DashboardAgg.LookbackSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative") + } + if c.DashboardAgg.BackfillMaxDays < 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative") + } + if c.DashboardAgg.Retention.UsageLogsDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be non-negative") + } + if c.DashboardAgg.Retention.HourlyDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be non-negative") + } + if c.DashboardAgg.Retention.DailyDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.daily_days must be non-negative") + } + if c.DashboardAgg.RecomputeDays < 0 { + return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") + } + } + if c.Gateway.MaxBodySize <= 0 { + return fmt.Errorf("gateway.max_body_size must be positive") + } + if strings.TrimSpace(c.Gateway.ConnectionPoolIsolation) != "" { + switch c.Gateway.ConnectionPoolIsolation { + case ConnectionPoolIsolationProxy, ConnectionPoolIsolationAccount, ConnectionPoolIsolationAccountProxy: + default: + return fmt.Errorf("gateway.connection_pool_isolation must be one of: %s/%s/%s", + ConnectionPoolIsolationProxy, ConnectionPoolIsolationAccount, ConnectionPoolIsolationAccountProxy) + } + } + if c.Gateway.MaxIdleConns <= 0 { + return fmt.Errorf("gateway.max_idle_conns must be positive") + } + if c.Gateway.MaxIdleConnsPerHost <= 0 { + return fmt.Errorf("gateway.max_idle_conns_per_host must be positive") + } + if c.Gateway.MaxConnsPerHost < 0 { + return fmt.Errorf("gateway.max_conns_per_host must be non-negative") + } + if c.Gateway.IdleConnTimeoutSeconds <= 0 { + return fmt.Errorf("gateway.idle_conn_timeout_seconds must be positive") + } + if c.Gateway.IdleConnTimeoutSeconds > 180 { + log.Printf("Warning: gateway.idle_conn_timeout_seconds is %d (> 180). Consider 60-120 seconds for better connection reuse.", c.Gateway.IdleConnTimeoutSeconds) + } + if c.Gateway.MaxUpstreamClients <= 0 { + return fmt.Errorf("gateway.max_upstream_clients must be positive") + } + if c.Gateway.ClientIdleTTLSeconds <= 0 { + return fmt.Errorf("gateway.client_idle_ttl_seconds must be positive") + } + if c.Gateway.ConcurrencySlotTTLMinutes <= 0 { + return fmt.Errorf("gateway.concurrency_slot_ttl_minutes must be positive") + } + if c.Gateway.StreamDataIntervalTimeout < 0 { + return fmt.Errorf("gateway.stream_data_interval_timeout must be non-negative") + } + if c.Gateway.StreamDataIntervalTimeout != 0 && + (c.Gateway.StreamDataIntervalTimeout < 30 || c.Gateway.StreamDataIntervalTimeout > 300) { + return fmt.Errorf("gateway.stream_data_interval_timeout must be 0 or between 30-300 seconds") + } + if c.Gateway.StreamKeepaliveInterval < 0 { + return fmt.Errorf("gateway.stream_keepalive_interval must be non-negative") + } + if c.Gateway.StreamKeepaliveInterval != 0 && + (c.Gateway.StreamKeepaliveInterval < 5 || c.Gateway.StreamKeepaliveInterval > 30) { + return fmt.Errorf("gateway.stream_keepalive_interval must be 0 or between 5-30 seconds") + } + if c.Gateway.MaxLineSize < 0 { + return fmt.Errorf("gateway.max_line_size must be non-negative") + } + if c.Gateway.MaxLineSize != 0 && c.Gateway.MaxLineSize < 1024*1024 { + return fmt.Errorf("gateway.max_line_size must be at least 1MB") + } + if c.Gateway.Scheduling.StickySessionMaxWaiting <= 0 { + return fmt.Errorf("gateway.scheduling.sticky_session_max_waiting must be positive") + } + if c.Gateway.Scheduling.StickySessionWaitTimeout <= 0 { + return fmt.Errorf("gateway.scheduling.sticky_session_wait_timeout must be positive") + } + if c.Gateway.Scheduling.FallbackWaitTimeout <= 0 { + return fmt.Errorf("gateway.scheduling.fallback_wait_timeout must be positive") + } + if c.Gateway.Scheduling.FallbackMaxWaiting <= 0 { + return fmt.Errorf("gateway.scheduling.fallback_max_waiting must be positive") + } + if c.Gateway.Scheduling.SlotCleanupInterval < 0 { + return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative") + } + if c.Gateway.Scheduling.DbFallbackTimeoutSeconds < 0 { + return fmt.Errorf("gateway.scheduling.db_fallback_timeout_seconds must be non-negative") + } + if c.Gateway.Scheduling.DbFallbackMaxQPS < 0 { + return fmt.Errorf("gateway.scheduling.db_fallback_max_qps must be non-negative") + } + if c.Gateway.Scheduling.OutboxPollIntervalSeconds <= 0 { + return fmt.Errorf("gateway.scheduling.outbox_poll_interval_seconds must be positive") + } + if c.Gateway.Scheduling.OutboxLagWarnSeconds < 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_warn_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagRebuildSeconds < 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagRebuildFailures <= 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_failures must be positive") + } + if c.Gateway.Scheduling.OutboxBacklogRebuildRows < 0 { + return fmt.Errorf("gateway.scheduling.outbox_backlog_rebuild_rows must be non-negative") + } + if c.Gateway.Scheduling.FullRebuildIntervalSeconds < 0 { + return fmt.Errorf("gateway.scheduling.full_rebuild_interval_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagWarnSeconds > 0 && + c.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 && + c.Gateway.Scheduling.OutboxLagRebuildSeconds < c.Gateway.Scheduling.OutboxLagWarnSeconds { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be >= outbox_lag_warn_seconds") + } + if c.Ops.MetricsCollectorCache.TTL < 0 { + return fmt.Errorf("ops.metrics_collector_cache.ttl must be non-negative") + } + if c.Ops.Cleanup.ErrorLogRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.error_log_retention_days must be non-negative") + } + if c.Ops.Cleanup.MinuteMetricsRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.minute_metrics_retention_days must be non-negative") + } + if c.Ops.Cleanup.HourlyMetricsRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.hourly_metrics_retention_days must be non-negative") + } + if c.Ops.Cleanup.Enabled && strings.TrimSpace(c.Ops.Cleanup.Schedule) == "" { + return fmt.Errorf("ops.cleanup.schedule is required when ops.cleanup.enabled=true") + } + if c.Concurrency.PingInterval < 5 || c.Concurrency.PingInterval > 30 { + return fmt.Errorf("concurrency.ping_interval must be between 5-30 seconds") + } + return nil +} + +func normalizeStringSlice(values []string) []string { + if len(values) == 0 { + return values + } + normalized := make([]string, 0, len(values)) + for _, v := range values { + trimmed := strings.TrimSpace(v) + if trimmed == "" { + continue + } + normalized = append(normalized, trimmed) + } + return normalized +} + +func isWeakJWTSecret(secret string) bool { + lower := strings.ToLower(strings.TrimSpace(secret)) + if lower == "" { + return true + } + weak := map[string]struct{}{ + "change-me-in-production": {}, + "changeme": {}, + "secret": {}, + "password": {}, + "123456": {}, + "12345678": {}, + "admin": {}, + "jwt-secret": {}, + } + _, exists := weak[lower] + return exists +} + +func generateJWTSecret(byteLength int) (string, error) { + if byteLength <= 0 { + byteLength = 32 + } + buf := make([]byte, byteLength) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return hex.EncodeToString(buf), nil +} + +// GetServerAddress returns the server address (host:port) from config file or environment variable. +// This is a lightweight function that can be used before full config validation, +// such as during setup wizard startup. +// Priority: config.yaml > environment variables > defaults +func GetServerAddress() string { + v := viper.New() + v.SetConfigName("config") + v.SetConfigType("yaml") + v.AddConfigPath(".") + v.AddConfigPath("./config") + v.AddConfigPath("/etc/sub2api") + + // Support SERVER_HOST and SERVER_PORT environment variables + v.AutomaticEnv() + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + v.SetDefault("server.host", "0.0.0.0") + v.SetDefault("server.port", 8080) + + // Try to read config file (ignore errors if not found) + _ = v.ReadInConfig() + + host := v.GetString("server.host") + port := v.GetInt("server.port") + return fmt.Sprintf("%s:%d", host, port) +} + +// ValidateAbsoluteHTTPURL 验证是否为有效的绝对 HTTP(S) URL +func ValidateAbsoluteHTTPURL(raw string) error { + raw = strings.TrimSpace(raw) + if raw == "" { + return fmt.Errorf("empty url") + } + u, err := url.Parse(raw) + if err != nil { + return err + } + if !u.IsAbs() { + return fmt.Errorf("must be absolute") + } + if !isHTTPScheme(u.Scheme) { + return fmt.Errorf("unsupported scheme: %s", u.Scheme) + } + if strings.TrimSpace(u.Host) == "" { + return fmt.Errorf("missing host") + } + if u.Fragment != "" { + return fmt.Errorf("must not include fragment") + } + return nil +} + +// ValidateFrontendRedirectURL 验证前端重定向 URL(可以是绝对 URL 或相对路径) +func ValidateFrontendRedirectURL(raw string) error { + raw = strings.TrimSpace(raw) + if raw == "" { + return fmt.Errorf("empty url") + } + if strings.ContainsAny(raw, "\r\n") { + return fmt.Errorf("contains invalid characters") + } + if strings.HasPrefix(raw, "/") { + if strings.HasPrefix(raw, "//") { + return fmt.Errorf("must not start with //") + } + return nil + } + u, err := url.Parse(raw) + if err != nil { + return err + } + if !u.IsAbs() { + return fmt.Errorf("must be absolute http(s) url or relative path") + } + if !isHTTPScheme(u.Scheme) { + return fmt.Errorf("unsupported scheme: %s", u.Scheme) + } + if strings.TrimSpace(u.Host) == "" { + return fmt.Errorf("missing host") + } + if u.Fragment != "" { + return fmt.Errorf("must not include fragment") + } + return nil +} + +// isHTTPScheme 检查是否为 HTTP 或 HTTPS 协议 +func isHTTPScheme(scheme string) bool { + return strings.EqualFold(scheme, "http") || strings.EqualFold(scheme, "https") +} + +func warnIfInsecureURL(field, raw string) { + u, err := url.Parse(strings.TrimSpace(raw)) + if err != nil { + return + } + if strings.EqualFold(u.Scheme, "http") { + log.Printf("Warning: %s uses http scheme; use https in production to avoid token leakage.", field) + } +} diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go new file mode 100644 index 00000000..4637989e --- /dev/null +++ b/backend/internal/config/config_test.go @@ -0,0 +1,282 @@ +package config + +import ( + "strings" + "testing" + "time" + + "github.com/spf13/viper" +) + +func TestNormalizeRunMode(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"simple", "simple"}, + {"SIMPLE", "simple"}, + {"standard", "standard"}, + {"invalid", "standard"}, + {"", "standard"}, + } + + for _, tt := range tests { + result := NormalizeRunMode(tt.input) + if result != tt.expected { + t.Errorf("NormalizeRunMode(%q) = %q, want %q", tt.input, result, tt.expected) + } + } +} + +func TestLoadDefaultSchedulingConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 { + t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting) + } + if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 120*time.Second { + t.Fatalf("StickySessionWaitTimeout = %v, want 120s", cfg.Gateway.Scheduling.StickySessionWaitTimeout) + } + if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second { + t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout) + } + if cfg.Gateway.Scheduling.FallbackMaxWaiting != 100 { + t.Fatalf("FallbackMaxWaiting = %d, want 100", cfg.Gateway.Scheduling.FallbackMaxWaiting) + } + if !cfg.Gateway.Scheduling.LoadBatchEnabled { + t.Fatalf("LoadBatchEnabled = false, want true") + } + if cfg.Gateway.Scheduling.SlotCleanupInterval != 30*time.Second { + t.Fatalf("SlotCleanupInterval = %v, want 30s", cfg.Gateway.Scheduling.SlotCleanupInterval) + } +} + +func TestLoadSchedulingConfigFromEnv(t *testing.T) { + viper.Reset() + t.Setenv("GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING", "5") + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 5 { + t.Fatalf("StickySessionMaxWaiting = %d, want 5", cfg.Gateway.Scheduling.StickySessionMaxWaiting) + } +} + +func TestLoadDefaultSecurityToggles(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Security.URLAllowlist.Enabled { + t.Fatalf("URLAllowlist.Enabled = true, want false") + } + if !cfg.Security.URLAllowlist.AllowInsecureHTTP { + t.Fatalf("URLAllowlist.AllowInsecureHTTP = false, want true") + } + if !cfg.Security.URLAllowlist.AllowPrivateHosts { + t.Fatalf("URLAllowlist.AllowPrivateHosts = false, want true") + } + if cfg.Security.ResponseHeaders.Enabled { + t.Fatalf("ResponseHeaders.Enabled = true, want false") + } +} + +func TestValidateLinuxDoFrontendRedirectURL(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.LinuxDo.Enabled = true + cfg.LinuxDo.ClientID = "test-client" + cfg.LinuxDo.ClientSecret = "test-secret" + cfg.LinuxDo.RedirectURL = "https://example.com/api/v1/auth/oauth/linuxdo/callback" + cfg.LinuxDo.TokenAuthMethod = "client_secret_post" + cfg.LinuxDo.UsePKCE = false + + cfg.LinuxDo.FrontendRedirectURL = "javascript:alert(1)" + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for javascript scheme, got nil") + } + if !strings.Contains(err.Error(), "linuxdo_connect.frontend_redirect_url") { + t.Fatalf("Validate() expected frontend_redirect_url error, got: %v", err) + } +} + +func TestValidateLinuxDoPKCERequiredForPublicClient(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.LinuxDo.Enabled = true + cfg.LinuxDo.ClientID = "test-client" + cfg.LinuxDo.ClientSecret = "" + cfg.LinuxDo.RedirectURL = "https://example.com/api/v1/auth/oauth/linuxdo/callback" + cfg.LinuxDo.FrontendRedirectURL = "/auth/linuxdo/callback" + cfg.LinuxDo.TokenAuthMethod = "none" + cfg.LinuxDo.UsePKCE = false + + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error when token_auth_method=none and use_pkce=false, got nil") + } + if !strings.Contains(err.Error(), "linuxdo_connect.use_pkce") { + t.Fatalf("Validate() expected use_pkce error, got: %v", err) + } +} + +func TestLoadDefaultDashboardCacheConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.Dashboard.Enabled { + t.Fatalf("Dashboard.Enabled = false, want true") + } + if cfg.Dashboard.KeyPrefix != "sub2api:" { + t.Fatalf("Dashboard.KeyPrefix = %q, want %q", cfg.Dashboard.KeyPrefix, "sub2api:") + } + if cfg.Dashboard.StatsFreshTTLSeconds != 15 { + t.Fatalf("Dashboard.StatsFreshTTLSeconds = %d, want 15", cfg.Dashboard.StatsFreshTTLSeconds) + } + if cfg.Dashboard.StatsTTLSeconds != 30 { + t.Fatalf("Dashboard.StatsTTLSeconds = %d, want 30", cfg.Dashboard.StatsTTLSeconds) + } + if cfg.Dashboard.StatsRefreshTimeoutSeconds != 30 { + t.Fatalf("Dashboard.StatsRefreshTimeoutSeconds = %d, want 30", cfg.Dashboard.StatsRefreshTimeoutSeconds) + } +} + +func TestValidateDashboardCacheConfigEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Dashboard.Enabled = true + cfg.Dashboard.StatsFreshTTLSeconds = 10 + cfg.Dashboard.StatsTTLSeconds = 5 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for stats_fresh_ttl_seconds > stats_ttl_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_cache.stats_fresh_ttl_seconds") { + t.Fatalf("Validate() expected stats_fresh_ttl_seconds error, got: %v", err) + } +} + +func TestValidateDashboardCacheConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Dashboard.Enabled = false + cfg.Dashboard.StatsTTLSeconds = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for negative stats_ttl_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_cache.stats_ttl_seconds") { + t.Fatalf("Validate() expected stats_ttl_seconds error, got: %v", err) + } +} + +func TestLoadDefaultDashboardAggregationConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.DashboardAgg.Enabled { + t.Fatalf("DashboardAgg.Enabled = false, want true") + } + if cfg.DashboardAgg.IntervalSeconds != 60 { + t.Fatalf("DashboardAgg.IntervalSeconds = %d, want 60", cfg.DashboardAgg.IntervalSeconds) + } + if cfg.DashboardAgg.LookbackSeconds != 120 { + t.Fatalf("DashboardAgg.LookbackSeconds = %d, want 120", cfg.DashboardAgg.LookbackSeconds) + } + if cfg.DashboardAgg.BackfillEnabled { + t.Fatalf("DashboardAgg.BackfillEnabled = true, want false") + } + if cfg.DashboardAgg.BackfillMaxDays != 31 { + t.Fatalf("DashboardAgg.BackfillMaxDays = %d, want 31", cfg.DashboardAgg.BackfillMaxDays) + } + if cfg.DashboardAgg.Retention.UsageLogsDays != 90 { + t.Fatalf("DashboardAgg.Retention.UsageLogsDays = %d, want 90", cfg.DashboardAgg.Retention.UsageLogsDays) + } + if cfg.DashboardAgg.Retention.HourlyDays != 180 { + t.Fatalf("DashboardAgg.Retention.HourlyDays = %d, want 180", cfg.DashboardAgg.Retention.HourlyDays) + } + if cfg.DashboardAgg.Retention.DailyDays != 730 { + t.Fatalf("DashboardAgg.Retention.DailyDays = %d, want 730", cfg.DashboardAgg.Retention.DailyDays) + } + if cfg.DashboardAgg.RecomputeDays != 2 { + t.Fatalf("DashboardAgg.RecomputeDays = %d, want 2", cfg.DashboardAgg.RecomputeDays) + } +} + +func TestValidateDashboardAggregationConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.DashboardAgg.Enabled = false + cfg.DashboardAgg.IntervalSeconds = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for negative dashboard_aggregation.interval_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_aggregation.interval_seconds") { + t.Fatalf("Validate() expected interval_seconds error, got: %v", err) + } +} + +func TestValidateDashboardAggregationBackfillMaxDays(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.DashboardAgg.BackfillEnabled = true + cfg.DashboardAgg.BackfillMaxDays = 0 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for dashboard_aggregation.backfill_max_days, got nil") + } + if !strings.Contains(err.Error(), "dashboard_aggregation.backfill_max_days") { + t.Fatalf("Validate() expected backfill_max_days error, got: %v", err) + } +} diff --git a/backend/internal/config/wire.go b/backend/internal/config/wire.go new file mode 100644 index 00000000..ec26c401 --- /dev/null +++ b/backend/internal/config/wire.go @@ -0,0 +1,13 @@ +package config + +import "github.com/google/wire" + +// ProviderSet 提供配置层的依赖 +var ProviderSet = wire.NewSet( + ProvideConfig, +) + +// ProvideConfig 提供应用配置 +func ProvideConfig() (*Config, error) { + return Load() +} diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go new file mode 100644 index 00000000..92fdf2eb --- /dev/null +++ b/backend/internal/handler/admin/account_handler.go @@ -0,0 +1,1307 @@ +// Package admin provides HTTP handlers for administrative operations. +package admin + +import ( + "errors" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +// OAuthHandler handles OAuth-related operations for accounts +type OAuthHandler struct { + oauthService *service.OAuthService +} + +// NewOAuthHandler creates a new OAuth handler +func NewOAuthHandler(oauthService *service.OAuthService) *OAuthHandler { + return &OAuthHandler{ + oauthService: oauthService, + } +} + +// AccountHandler handles admin account management +type AccountHandler struct { + adminService service.AdminService + oauthService *service.OAuthService + openaiOAuthService *service.OpenAIOAuthService + geminiOAuthService *service.GeminiOAuthService + antigravityOAuthService *service.AntigravityOAuthService + rateLimitService *service.RateLimitService + accountUsageService *service.AccountUsageService + accountTestService *service.AccountTestService + concurrencyService *service.ConcurrencyService + crsSyncService *service.CRSSyncService +} + +// NewAccountHandler creates a new admin account handler +func NewAccountHandler( + adminService service.AdminService, + oauthService *service.OAuthService, + openaiOAuthService *service.OpenAIOAuthService, + geminiOAuthService *service.GeminiOAuthService, + antigravityOAuthService *service.AntigravityOAuthService, + rateLimitService *service.RateLimitService, + accountUsageService *service.AccountUsageService, + accountTestService *service.AccountTestService, + concurrencyService *service.ConcurrencyService, + crsSyncService *service.CRSSyncService, +) *AccountHandler { + return &AccountHandler{ + adminService: adminService, + oauthService: oauthService, + openaiOAuthService: openaiOAuthService, + geminiOAuthService: geminiOAuthService, + antigravityOAuthService: antigravityOAuthService, + rateLimitService: rateLimitService, + accountUsageService: accountUsageService, + accountTestService: accountTestService, + concurrencyService: concurrencyService, + crsSyncService: crsSyncService, + } +} + +// CreateAccountRequest represents create account request +type CreateAccountRequest struct { + Name string `json:"name" binding:"required"` + Notes *string `json:"notes"` + Platform string `json:"platform" binding:"required"` + Type string `json:"type" binding:"required,oneof=oauth setup-token apikey"` + Credentials map[string]any `json:"credentials" binding:"required"` + Extra map[string]any `json:"extra"` + ProxyID *int64 `json:"proxy_id"` + Concurrency int `json:"concurrency"` + Priority int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` + GroupIDs []int64 `json:"group_ids"` + ExpiresAt *int64 `json:"expires_at"` + AutoPauseOnExpired *bool `json:"auto_pause_on_expired"` + ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 +} + +// UpdateAccountRequest represents update account request +// 使用指针类型来区分"未提供"和"设置为0" +type UpdateAccountRequest struct { + Name string `json:"name"` + Notes *string `json:"notes"` + Type string `json:"type" binding:"omitempty,oneof=oauth setup-token apikey"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` + ProxyID *int64 `json:"proxy_id"` + Concurrency *int `json:"concurrency"` + Priority *int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` + Status string `json:"status" binding:"omitempty,oneof=active inactive"` + GroupIDs *[]int64 `json:"group_ids"` + ExpiresAt *int64 `json:"expires_at"` + AutoPauseOnExpired *bool `json:"auto_pause_on_expired"` + ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 +} + +// BulkUpdateAccountsRequest represents the payload for bulk editing accounts +type BulkUpdateAccountsRequest struct { + AccountIDs []int64 `json:"account_ids" binding:"required,min=1"` + Name string `json:"name"` + ProxyID *int64 `json:"proxy_id"` + Concurrency *int `json:"concurrency"` + Priority *int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` + Status string `json:"status" binding:"omitempty,oneof=active inactive error"` + Schedulable *bool `json:"schedulable"` + GroupIDs *[]int64 `json:"group_ids"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` + ConfirmMixedChannelRisk *bool `json:"confirm_mixed_channel_risk"` // 用户确认混合渠道风险 +} + +// AccountWithConcurrency extends Account with real-time concurrency info +type AccountWithConcurrency struct { + *dto.Account + CurrentConcurrency int `json:"current_concurrency"` +} + +// List handles listing all accounts with pagination +// GET /api/v1/admin/accounts +func (h *AccountHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + platform := c.Query("platform") + accountType := c.Query("type") + status := c.Query("status") + search := c.Query("search") + // 标准化和验证 search 参数 + search = strings.TrimSpace(search) + if len(search) > 100 { + search = search[:100] + } + + accounts, total, err := h.adminService.ListAccounts(c.Request.Context(), page, pageSize, platform, accountType, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Get current concurrency counts for all accounts + accountIDs := make([]int64, len(accounts)) + for i, acc := range accounts { + accountIDs[i] = acc.ID + } + + concurrencyCounts, err := h.concurrencyService.GetAccountConcurrencyBatch(c.Request.Context(), accountIDs) + if err != nil { + // Log error but don't fail the request, just use 0 for all + concurrencyCounts = make(map[int64]int) + } + + // Build response with concurrency info + result := make([]AccountWithConcurrency, len(accounts)) + for i := range accounts { + result[i] = AccountWithConcurrency{ + Account: dto.AccountFromService(&accounts[i]), + CurrentConcurrency: concurrencyCounts[accounts[i].ID], + } + } + + response.Paginated(c, result, total, page, pageSize) +} + +// GetByID handles getting an account by ID +// GET /api/v1/admin/accounts/:id +func (h *AccountHandler) GetByID(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} + +// Create handles creating a new account +// POST /api/v1/admin/accounts +func (h *AccountHandler) Create(c *gin.Context) { + var req CreateAccountRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } + + // 确定是否跳过混合渠道检查 + skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk + + account, err := h.adminService.CreateAccount(c.Request.Context(), &service.CreateAccountInput{ + Name: req.Name, + Notes: req.Notes, + Platform: req.Platform, + Type: req.Type, + Credentials: req.Credentials, + Extra: req.Extra, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, + Priority: req.Priority, + RateMultiplier: req.RateMultiplier, + GroupIDs: req.GroupIDs, + ExpiresAt: req.ExpiresAt, + AutoPauseOnExpired: req.AutoPauseOnExpired, + SkipMixedChannelCheck: skipCheck, + }) + if err != nil { + // 检查是否为混合渠道错误 + var mixedErr *service.MixedChannelError + if errors.As(err, &mixedErr) { + // 返回特殊错误码要求确认 + c.JSON(409, gin.H{ + "error": "mixed_channel_warning", + "message": mixedErr.Error(), + "details": gin.H{ + "group_id": mixedErr.GroupID, + "group_name": mixedErr.GroupName, + "current_platform": mixedErr.CurrentPlatform, + "other_platform": mixedErr.OtherPlatform, + }, + "require_confirmation": true, + }) + return + } + + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} + +// Update handles updating an account +// PUT /api/v1/admin/accounts/:id +func (h *AccountHandler) Update(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + var req UpdateAccountRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } + + // 确定是否跳过混合渠道检查 + skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk + + account, err := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Name: req.Name, + Notes: req.Notes, + Type: req.Type, + Credentials: req.Credentials, + Extra: req.Extra, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, // 指针类型,nil 表示未提供 + Priority: req.Priority, // 指针类型,nil 表示未提供 + RateMultiplier: req.RateMultiplier, + Status: req.Status, + GroupIDs: req.GroupIDs, + ExpiresAt: req.ExpiresAt, + AutoPauseOnExpired: req.AutoPauseOnExpired, + SkipMixedChannelCheck: skipCheck, + }) + if err != nil { + // 检查是否为混合渠道错误 + var mixedErr *service.MixedChannelError + if errors.As(err, &mixedErr) { + // 返回特殊错误码要求确认 + c.JSON(409, gin.H{ + "error": "mixed_channel_warning", + "message": mixedErr.Error(), + "details": gin.H{ + "group_id": mixedErr.GroupID, + "group_name": mixedErr.GroupName, + "current_platform": mixedErr.CurrentPlatform, + "other_platform": mixedErr.OtherPlatform, + }, + "require_confirmation": true, + }) + return + } + + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} + +// Delete handles deleting an account +// DELETE /api/v1/admin/accounts/:id +func (h *AccountHandler) Delete(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + err = h.adminService.DeleteAccount(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Account deleted successfully"}) +} + +// TestAccountRequest represents the request body for testing an account +type TestAccountRequest struct { + ModelID string `json:"model_id"` +} + +type SyncFromCRSRequest struct { + BaseURL string `json:"base_url" binding:"required"` + Username string `json:"username" binding:"required"` + Password string `json:"password" binding:"required"` + SyncProxies *bool `json:"sync_proxies"` +} + +// Test handles testing account connectivity with SSE streaming +// POST /api/v1/admin/accounts/:id/test +func (h *AccountHandler) Test(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + var req TestAccountRequest + // Allow empty body, model_id is optional + _ = c.ShouldBindJSON(&req) + + // Use AccountTestService to test the account with SSE streaming + if err := h.accountTestService.TestAccountConnection(c, accountID, req.ModelID); err != nil { + // Error already sent via SSE, just log + return + } +} + +// SyncFromCRS handles syncing accounts from claude-relay-service (CRS) +// POST /api/v1/admin/accounts/sync/crs +func (h *AccountHandler) SyncFromCRS(c *gin.Context) { + var req SyncFromCRSRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Default to syncing proxies (can be disabled by explicitly setting false) + syncProxies := true + if req.SyncProxies != nil { + syncProxies = *req.SyncProxies + } + + result, err := h.crsSyncService.SyncFromCRS(c.Request.Context(), service.SyncFromCRSInput{ + BaseURL: req.BaseURL, + Username: req.Username, + Password: req.Password, + SyncProxies: syncProxies, + }) + if err != nil { + // Provide detailed error message for CRS sync failures + response.InternalError(c, "CRS sync failed: "+err.Error()) + return + } + + response.Success(c, result) +} + +// Refresh handles refreshing account credentials +// POST /api/v1/admin/accounts/:id/refresh +func (h *AccountHandler) Refresh(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + // Get account + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.NotFound(c, "Account not found") + return + } + + // Only refresh OAuth-based accounts (oauth and setup-token) + if !account.IsOAuth() { + response.BadRequest(c, "Cannot refresh non-OAuth account credentials") + return + } + + var newCredentials map[string]any + + if account.IsOpenAI() { + // Use OpenAI OAuth service to refresh token + tokenInfo, err := h.openaiOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Build new credentials from token info + newCredentials = h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Preserve non-token settings from existing credentials + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } else if account.Platform == service.PlatformGemini { + tokenInfo, err := h.geminiOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.InternalError(c, "Failed to refresh credentials: "+err.Error()) + return + } + + newCredentials = h.geminiOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } else if account.Platform == service.PlatformAntigravity { + tokenInfo, err := h.antigravityOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.ErrorFrom(c, err) + return + } + + newCredentials = h.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } else { + // Use Anthropic/Claude OAuth service to refresh token + tokenInfo, err := h.oauthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Copy existing credentials to preserve non-token settings (e.g., intercept_warmup_requests) + newCredentials = make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + + // Update token-related fields + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = strconv.FormatInt(tokenInfo.ExpiresIn, 10) + newCredentials["expires_at"] = strconv.FormatInt(tokenInfo.ExpiresAt, 10) + if strings.TrimSpace(tokenInfo.RefreshToken) != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + if strings.TrimSpace(tokenInfo.Scope) != "" { + newCredentials["scope"] = tokenInfo.Scope + } + } + + updatedAccount, err := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Credentials: newCredentials, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(updatedAccount)) +} + +// GetStats handles getting account statistics +// GET /api/v1/admin/accounts/:id/stats +func (h *AccountHandler) GetStats(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + // Parse days parameter (default 30) + days := 30 + if daysStr := c.Query("days"); daysStr != "" { + if d, err := strconv.Atoi(daysStr); err == nil && d > 0 && d <= 90 { + days = d + } + } + + // Calculate time range + now := timezone.Now() + endTime := timezone.StartOfDay(now.AddDate(0, 0, 1)) + startTime := timezone.StartOfDay(now.AddDate(0, 0, -days+1)) + + stats, err := h.accountUsageService.GetAccountUsageStats(c.Request.Context(), accountID, startTime, endTime) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} + +// ClearError handles clearing account error +// POST /api/v1/admin/accounts/:id/clear-error +func (h *AccountHandler) ClearError(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + account, err := h.adminService.ClearAccountError(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} + +// BatchCreate handles batch creating accounts +// POST /api/v1/admin/accounts/batch +func (h *AccountHandler) BatchCreate(c *gin.Context) { + var req struct { + Accounts []CreateAccountRequest `json:"accounts" binding:"required,min=1"` + } + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Return mock data for now + response.Success(c, gin.H{ + "success": len(req.Accounts), + "failed": 0, + "results": []gin.H{}, + }) +} + +// BatchUpdateCredentialsRequest represents batch credentials update request +type BatchUpdateCredentialsRequest struct { + AccountIDs []int64 `json:"account_ids" binding:"required,min=1"` + Field string `json:"field" binding:"required,oneof=account_uuid org_uuid intercept_warmup_requests"` + Value any `json:"value"` +} + +// BatchUpdateCredentials handles batch updating credentials fields +// POST /api/v1/admin/accounts/batch-update-credentials +func (h *AccountHandler) BatchUpdateCredentials(c *gin.Context) { + var req BatchUpdateCredentialsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Validate value type based on field + if req.Field == "intercept_warmup_requests" { + // Must be boolean + if _, ok := req.Value.(bool); !ok { + response.BadRequest(c, "intercept_warmup_requests must be boolean") + return + } + } else { + // account_uuid and org_uuid can be string or null + if req.Value != nil { + if _, ok := req.Value.(string); !ok { + response.BadRequest(c, req.Field+" must be string or null") + return + } + } + } + + ctx := c.Request.Context() + success := 0 + failed := 0 + results := []gin.H{} + + for _, accountID := range req.AccountIDs { + // Get account + account, err := h.adminService.GetAccount(ctx, accountID) + if err != nil { + failed++ + results = append(results, gin.H{ + "account_id": accountID, + "success": false, + "error": "Account not found", + }) + continue + } + + // Update credentials field + if account.Credentials == nil { + account.Credentials = make(map[string]any) + } + + account.Credentials[req.Field] = req.Value + + // Update account + updateInput := &service.UpdateAccountInput{ + Credentials: account.Credentials, + } + + _, err = h.adminService.UpdateAccount(ctx, accountID, updateInput) + if err != nil { + failed++ + results = append(results, gin.H{ + "account_id": accountID, + "success": false, + "error": err.Error(), + }) + continue + } + + success++ + results = append(results, gin.H{ + "account_id": accountID, + "success": true, + }) + } + + response.Success(c, gin.H{ + "success": success, + "failed": failed, + "results": results, + }) +} + +// BulkUpdate handles bulk updating accounts with selected fields/credentials. +// POST /api/v1/admin/accounts/bulk-update +func (h *AccountHandler) BulkUpdate(c *gin.Context) { + var req BulkUpdateAccountsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } + + // 确定是否跳过混合渠道检查 + skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk + + hasUpdates := req.Name != "" || + req.ProxyID != nil || + req.Concurrency != nil || + req.Priority != nil || + req.RateMultiplier != nil || + req.Status != "" || + req.Schedulable != nil || + req.GroupIDs != nil || + len(req.Credentials) > 0 || + len(req.Extra) > 0 + + if !hasUpdates { + response.BadRequest(c, "No updates provided") + return + } + + result, err := h.adminService.BulkUpdateAccounts(c.Request.Context(), &service.BulkUpdateAccountsInput{ + AccountIDs: req.AccountIDs, + Name: req.Name, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, + Priority: req.Priority, + RateMultiplier: req.RateMultiplier, + Status: req.Status, + Schedulable: req.Schedulable, + GroupIDs: req.GroupIDs, + Credentials: req.Credentials, + Extra: req.Extra, + SkipMixedChannelCheck: skipCheck, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// ========== OAuth Handlers ========== + +// GenerateAuthURLRequest represents the request for generating auth URL +type GenerateAuthURLRequest struct { + ProxyID *int64 `json:"proxy_id"` +} + +// GenerateAuthURL generates OAuth authorization URL with full scope +// POST /api/v1/admin/accounts/generate-auth-url +func (h *OAuthHandler) GenerateAuthURL(c *gin.Context) { + var req GenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body + req = GenerateAuthURLRequest{} + } + + result, err := h.oauthService.GenerateAuthURL(c.Request.Context(), req.ProxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// GenerateSetupTokenURL generates OAuth authorization URL for setup token (inference only) +// POST /api/v1/admin/accounts/generate-setup-token-url +func (h *OAuthHandler) GenerateSetupTokenURL(c *gin.Context) { + var req GenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body + req = GenerateAuthURLRequest{} + } + + result, err := h.oauthService.GenerateSetupTokenURL(c.Request.Context(), req.ProxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// ExchangeCodeRequest represents the request for exchanging auth code +type ExchangeCodeRequest struct { + SessionID string `json:"session_id" binding:"required"` + Code string `json:"code" binding:"required"` + ProxyID *int64 `json:"proxy_id"` +} + +// ExchangeCode exchanges authorization code for tokens +// POST /api/v1/admin/accounts/exchange-code +func (h *OAuthHandler) ExchangeCode(c *gin.Context) { + var req ExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.oauthService.ExchangeCode(c.Request.Context(), &service.ExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + ProxyID: req.ProxyID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// ExchangeSetupTokenCode exchanges authorization code for setup token +// POST /api/v1/admin/accounts/exchange-setup-token-code +func (h *OAuthHandler) ExchangeSetupTokenCode(c *gin.Context) { + var req ExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.oauthService.ExchangeCode(c.Request.Context(), &service.ExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + ProxyID: req.ProxyID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// CookieAuthRequest represents the request for cookie-based authentication +type CookieAuthRequest struct { + SessionKey string `json:"code" binding:"required"` // Using 'code' field as sessionKey (frontend sends it this way) + ProxyID *int64 `json:"proxy_id"` +} + +// CookieAuth performs OAuth using sessionKey (cookie-based auto-auth) +// POST /api/v1/admin/accounts/cookie-auth +func (h *OAuthHandler) CookieAuth(c *gin.Context) { + var req CookieAuthRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.oauthService.CookieAuth(c.Request.Context(), &service.CookieAuthInput{ + SessionKey: req.SessionKey, + ProxyID: req.ProxyID, + Scope: "full", + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// SetupTokenCookieAuth performs OAuth using sessionKey for setup token (inference only) +// POST /api/v1/admin/accounts/setup-token-cookie-auth +func (h *OAuthHandler) SetupTokenCookieAuth(c *gin.Context) { + var req CookieAuthRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.oauthService.CookieAuth(c.Request.Context(), &service.CookieAuthInput{ + SessionKey: req.SessionKey, + ProxyID: req.ProxyID, + Scope: "inference", + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// GetUsage handles getting account usage information +// GET /api/v1/admin/accounts/:id/usage +func (h *AccountHandler) GetUsage(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + usage, err := h.accountUsageService.GetUsage(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, usage) +} + +// ClearRateLimit handles clearing account rate limit status +// POST /api/v1/admin/accounts/:id/clear-rate-limit +func (h *AccountHandler) ClearRateLimit(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + err = h.rateLimitService.ClearRateLimit(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Rate limit cleared successfully"}) +} + +// GetTempUnschedulable handles getting temporary unschedulable status +// GET /api/v1/admin/accounts/:id/temp-unschedulable +func (h *AccountHandler) GetTempUnschedulable(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + state, err := h.rateLimitService.GetTempUnschedStatus(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + if state == nil || state.UntilUnix <= time.Now().Unix() { + response.Success(c, gin.H{"active": false}) + return + } + + response.Success(c, gin.H{ + "active": true, + "state": state, + }) +} + +// ClearTempUnschedulable handles clearing temporary unschedulable status +// DELETE /api/v1/admin/accounts/:id/temp-unschedulable +func (h *AccountHandler) ClearTempUnschedulable(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + if err := h.rateLimitService.ClearTempUnschedulable(c.Request.Context(), accountID); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Temp unschedulable cleared successfully"}) +} + +// GetTodayStats handles getting account today statistics +// GET /api/v1/admin/accounts/:id/today-stats +func (h *AccountHandler) GetTodayStats(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + stats, err := h.accountUsageService.GetTodayStats(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} + +// SetSchedulableRequest represents the request body for setting schedulable status +type SetSchedulableRequest struct { + Schedulable bool `json:"schedulable"` +} + +// SetSchedulable handles toggling account schedulable status +// POST /api/v1/admin/accounts/:id/schedulable +func (h *AccountHandler) SetSchedulable(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + var req SetSchedulableRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + account, err := h.adminService.SetAccountSchedulable(c.Request.Context(), accountID, req.Schedulable) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} + +// GetAvailableModels handles getting available models for an account +// GET /api/v1/admin/accounts/:id/models +func (h *AccountHandler) GetAvailableModels(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.NotFound(c, "Account not found") + return + } + + // Handle OpenAI accounts + if account.IsOpenAI() { + // For OAuth accounts: return default OpenAI models + if account.IsOAuth() { + response.Success(c, openai.DefaultModels) + return + } + + // For API Key accounts: check model_mapping + mapping := account.GetModelMapping() + if len(mapping) == 0 { + response.Success(c, openai.DefaultModels) + return + } + + // Return mapped models + var models []openai.Model + for requestedModel := range mapping { + var found bool + for _, dm := range openai.DefaultModels { + if dm.ID == requestedModel { + models = append(models, dm) + found = true + break + } + } + if !found { + models = append(models, openai.Model{ + ID: requestedModel, + Object: "model", + Type: "model", + DisplayName: requestedModel, + }) + } + } + response.Success(c, models) + return + } + + // Handle Gemini accounts + if account.IsGemini() { + // For OAuth accounts: return default Gemini models + if account.IsOAuth() { + response.Success(c, geminicli.DefaultModels) + return + } + + // For API Key accounts: return models based on model_mapping + mapping := account.GetModelMapping() + if len(mapping) == 0 { + response.Success(c, geminicli.DefaultModels) + return + } + + var models []geminicli.Model + for requestedModel := range mapping { + var found bool + for _, dm := range geminicli.DefaultModels { + if dm.ID == requestedModel { + models = append(models, dm) + found = true + break + } + } + if !found { + models = append(models, geminicli.Model{ + ID: requestedModel, + Type: "model", + DisplayName: requestedModel, + CreatedAt: "", + }) + } + } + response.Success(c, models) + return + } + + // Handle Antigravity accounts: return Claude + Gemini models + if account.Platform == service.PlatformAntigravity { + // Antigravity 支持 Claude 和部分 Gemini 模型 + type UnifiedModel struct { + ID string `json:"id"` + Type string `json:"type"` + DisplayName string `json:"display_name"` + } + + var models []UnifiedModel + + // 添加 Claude 模型 + for _, m := range claude.DefaultModels { + models = append(models, UnifiedModel{ + ID: m.ID, + Type: m.Type, + DisplayName: m.DisplayName, + }) + } + + // 添加 Gemini 3 系列模型用于测试 + geminiTestModels := []UnifiedModel{ + {ID: "gemini-3-flash", Type: "model", DisplayName: "Gemini 3 Flash"}, + {ID: "gemini-3-pro-preview", Type: "model", DisplayName: "Gemini 3 Pro Preview"}, + } + models = append(models, geminiTestModels...) + + response.Success(c, models) + return + } + + // Handle Claude/Anthropic accounts + // For OAuth and Setup-Token accounts: return default models + if account.IsOAuth() { + response.Success(c, claude.DefaultModels) + return + } + + // For API Key accounts: return models based on model_mapping + mapping := account.GetModelMapping() + if len(mapping) == 0 { + // No mapping configured, return default models + response.Success(c, claude.DefaultModels) + return + } + + // Return mapped models (keys of the mapping are the available model IDs) + var models []claude.Model + for requestedModel := range mapping { + // Try to find display info from default models + var found bool + for _, dm := range claude.DefaultModels { + if dm.ID == requestedModel { + models = append(models, dm) + found = true + break + } + } + // If not found in defaults, create a basic entry + if !found { + models = append(models, claude.Model{ + ID: requestedModel, + Type: "model", + DisplayName: requestedModel, + CreatedAt: "", + }) + } + } + + response.Success(c, models) +} + +// RefreshTier handles refreshing Google One tier for a single account +// POST /api/v1/admin/accounts/:id/refresh-tier +func (h *AccountHandler) RefreshTier(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + ctx := c.Request.Context() + account, err := h.adminService.GetAccount(ctx, accountID) + if err != nil { + response.NotFound(c, "Account not found") + return + } + + if account.Platform != service.PlatformGemini || account.Type != service.AccountTypeOAuth { + response.BadRequest(c, "Only Gemini OAuth accounts support tier refresh") + return + } + + oauthType, _ := account.Credentials["oauth_type"].(string) + if oauthType != "google_one" { + response.BadRequest(c, "Only google_one OAuth accounts support tier refresh") + return + } + + tierID, extra, creds, err := h.geminiOAuthService.RefreshAccountGoogleOneTier(ctx, account) + if err != nil { + response.ErrorFrom(c, err) + return + } + + _, updateErr := h.adminService.UpdateAccount(ctx, accountID, &service.UpdateAccountInput{ + Credentials: creds, + Extra: extra, + }) + if updateErr != nil { + response.ErrorFrom(c, updateErr) + return + } + + response.Success(c, gin.H{ + "tier_id": tierID, + "storage_info": extra, + "drive_storage_limit": extra["drive_storage_limit"], + "drive_storage_usage": extra["drive_storage_usage"], + "updated_at": extra["drive_tier_updated_at"], + }) +} + +// BatchRefreshTierRequest represents batch tier refresh request +type BatchRefreshTierRequest struct { + AccountIDs []int64 `json:"account_ids"` +} + +// BatchRefreshTier handles batch refreshing Google One tier +// POST /api/v1/admin/accounts/batch-refresh-tier +func (h *AccountHandler) BatchRefreshTier(c *gin.Context) { + var req BatchRefreshTierRequest + if err := c.ShouldBindJSON(&req); err != nil { + req = BatchRefreshTierRequest{} + } + + ctx := c.Request.Context() + accounts := make([]*service.Account, 0) + + if len(req.AccountIDs) == 0 { + allAccounts, _, err := h.adminService.ListAccounts(ctx, 1, 10000, "gemini", "oauth", "", "") + if err != nil { + response.ErrorFrom(c, err) + return + } + for i := range allAccounts { + acc := &allAccounts[i] + oauthType, _ := acc.Credentials["oauth_type"].(string) + if oauthType == "google_one" { + accounts = append(accounts, acc) + } + } + } else { + fetched, err := h.adminService.GetAccountsByIDs(ctx, req.AccountIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + for _, acc := range fetched { + if acc == nil { + continue + } + if acc.Platform != service.PlatformGemini || acc.Type != service.AccountTypeOAuth { + continue + } + oauthType, _ := acc.Credentials["oauth_type"].(string) + if oauthType != "google_one" { + continue + } + accounts = append(accounts, acc) + } + } + + const maxConcurrency = 10 + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(maxConcurrency) + + var mu sync.Mutex + var successCount, failedCount int + var errors []gin.H + + for _, account := range accounts { + acc := account // 闭包捕获 + g.Go(func() error { + _, extra, creds, err := h.geminiOAuthService.RefreshAccountGoogleOneTier(gctx, acc) + if err != nil { + mu.Lock() + failedCount++ + errors = append(errors, gin.H{ + "account_id": acc.ID, + "error": err.Error(), + }) + mu.Unlock() + return nil + } + + _, updateErr := h.adminService.UpdateAccount(gctx, acc.ID, &service.UpdateAccountInput{ + Credentials: creds, + Extra: extra, + }) + + mu.Lock() + if updateErr != nil { + failedCount++ + errors = append(errors, gin.H{ + "account_id": acc.ID, + "error": updateErr.Error(), + }) + } else { + successCount++ + } + mu.Unlock() + + return nil + }) + } + + if err := g.Wait(); err != nil { + response.ErrorFrom(c, err) + return + } + + results := gin.H{ + "total": len(accounts), + "success": successCount, + "failed": failedCount, + "errors": errors, + } + + response.Success(c, results) +} diff --git a/backend/internal/handler/admin/antigravity_oauth_handler.go b/backend/internal/handler/admin/antigravity_oauth_handler.go new file mode 100644 index 00000000..18541684 --- /dev/null +++ b/backend/internal/handler/admin/antigravity_oauth_handler.go @@ -0,0 +1,67 @@ +package admin + +import ( + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +type AntigravityOAuthHandler struct { + antigravityOAuthService *service.AntigravityOAuthService +} + +func NewAntigravityOAuthHandler(antigravityOAuthService *service.AntigravityOAuthService) *AntigravityOAuthHandler { + return &AntigravityOAuthHandler{antigravityOAuthService: antigravityOAuthService} +} + +type AntigravityGenerateAuthURLRequest struct { + ProxyID *int64 `json:"proxy_id"` +} + +// GenerateAuthURL generates Google OAuth authorization URL +// POST /api/v1/admin/antigravity/oauth/auth-url +func (h *AntigravityOAuthHandler) GenerateAuthURL(c *gin.Context) { + var req AntigravityGenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "请求无效: "+err.Error()) + return + } + + result, err := h.antigravityOAuthService.GenerateAuthURL(c.Request.Context(), req.ProxyID) + if err != nil { + response.InternalError(c, "生成授权链接失败: "+err.Error()) + return + } + + response.Success(c, result) +} + +type AntigravityExchangeCodeRequest struct { + SessionID string `json:"session_id" binding:"required"` + State string `json:"state" binding:"required"` + Code string `json:"code" binding:"required"` + ProxyID *int64 `json:"proxy_id"` +} + +// ExchangeCode 用 authorization code 交换 token +// POST /api/v1/admin/antigravity/oauth/exchange-code +func (h *AntigravityOAuthHandler) ExchangeCode(c *gin.Context) { + var req AntigravityExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "请求无效: "+err.Error()) + return + } + + tokenInfo, err := h.antigravityOAuthService.ExchangeCode(c.Request.Context(), &service.AntigravityExchangeCodeInput{ + SessionID: req.SessionID, + State: req.State, + Code: req.Code, + ProxyID: req.ProxyID, + }) + if err != nil { + response.BadRequest(c, "Token 交换失败: "+err.Error()) + return + } + + response.Success(c, tokenInfo) +} diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go new file mode 100644 index 00000000..3f07403d --- /dev/null +++ b/backend/internal/handler/admin/dashboard_handler.go @@ -0,0 +1,397 @@ +package admin + +import ( + "errors" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// DashboardHandler handles admin dashboard statistics +type DashboardHandler struct { + dashboardService *service.DashboardService + aggregationService *service.DashboardAggregationService + startTime time.Time // Server start time for uptime calculation +} + +// NewDashboardHandler creates a new admin dashboard handler +func NewDashboardHandler(dashboardService *service.DashboardService, aggregationService *service.DashboardAggregationService) *DashboardHandler { + return &DashboardHandler{ + dashboardService: dashboardService, + aggregationService: aggregationService, + startTime: time.Now(), + } +} + +// parseTimeRange parses start_date, end_date query parameters +// Uses user's timezone if provided, otherwise falls back to server timezone +func parseTimeRange(c *gin.Context) (time.Time, time.Time) { + userTZ := c.Query("timezone") // Get user's timezone from request + now := timezone.NowInUserLocation(userTZ) + startDate := c.Query("start_date") + endDate := c.Query("end_date") + + var startTime, endTime time.Time + + if startDate != "" { + if t, err := timezone.ParseInUserLocation("2006-01-02", startDate, userTZ); err == nil { + startTime = t + } else { + startTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, -7), userTZ) + } + } else { + startTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, -7), userTZ) + } + + if endDate != "" { + if t, err := timezone.ParseInUserLocation("2006-01-02", endDate, userTZ); err == nil { + endTime = t.Add(24 * time.Hour) // Include the end date + } else { + endTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, 1), userTZ) + } + } else { + endTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, 1), userTZ) + } + + return startTime, endTime +} + +// GetStats handles getting dashboard statistics +// GET /api/v1/admin/dashboard/stats +func (h *DashboardHandler) GetStats(c *gin.Context) { + stats, err := h.dashboardService.GetDashboardStats(c.Request.Context()) + if err != nil { + response.Error(c, 500, "Failed to get dashboard statistics") + return + } + + // Calculate uptime in seconds + uptime := int64(time.Since(h.startTime).Seconds()) + + response.Success(c, gin.H{ + // 用户统计 + "total_users": stats.TotalUsers, + "today_new_users": stats.TodayNewUsers, + "active_users": stats.ActiveUsers, + + // API Key 统计 + "total_api_keys": stats.TotalAPIKeys, + "active_api_keys": stats.ActiveAPIKeys, + + // 账户统计 + "total_accounts": stats.TotalAccounts, + "normal_accounts": stats.NormalAccounts, + "error_accounts": stats.ErrorAccounts, + "ratelimit_accounts": stats.RateLimitAccounts, + "overload_accounts": stats.OverloadAccounts, + + // 累计 Token 使用统计 + "total_requests": stats.TotalRequests, + "total_input_tokens": stats.TotalInputTokens, + "total_output_tokens": stats.TotalOutputTokens, + "total_cache_creation_tokens": stats.TotalCacheCreationTokens, + "total_cache_read_tokens": stats.TotalCacheReadTokens, + "total_tokens": stats.TotalTokens, + "total_cost": stats.TotalCost, // 标准计费 + "total_actual_cost": stats.TotalActualCost, // 实际扣除 + + // 今日 Token 使用统计 + "today_requests": stats.TodayRequests, + "today_input_tokens": stats.TodayInputTokens, + "today_output_tokens": stats.TodayOutputTokens, + "today_cache_creation_tokens": stats.TodayCacheCreationTokens, + "today_cache_read_tokens": stats.TodayCacheReadTokens, + "today_tokens": stats.TodayTokens, + "today_cost": stats.TodayCost, // 今日标准计费 + "today_actual_cost": stats.TodayActualCost, // 今日实际扣除 + + // 系统运行统计 + "average_duration_ms": stats.AverageDurationMs, + "uptime": uptime, + + // 性能指标 + "rpm": stats.Rpm, + "tpm": stats.Tpm, + + // 预聚合新鲜度 + "hourly_active_users": stats.HourlyActiveUsers, + "stats_updated_at": stats.StatsUpdatedAt, + "stats_stale": stats.StatsStale, + }) +} + +type DashboardAggregationBackfillRequest struct { + Start string `json:"start"` + End string `json:"end"` +} + +// BackfillAggregation handles triggering aggregation backfill +// POST /api/v1/admin/dashboard/aggregation/backfill +func (h *DashboardHandler) BackfillAggregation(c *gin.Context) { + if h.aggregationService == nil { + response.InternalError(c, "Aggregation service not available") + return + } + + var req DashboardAggregationBackfillRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + start, err := time.Parse(time.RFC3339, req.Start) + if err != nil { + response.BadRequest(c, "Invalid start time") + return + } + end, err := time.Parse(time.RFC3339, req.End) + if err != nil { + response.BadRequest(c, "Invalid end time") + return + } + + if err := h.aggregationService.TriggerBackfill(start, end); err != nil { + if errors.Is(err, service.ErrDashboardBackfillDisabled) { + response.Forbidden(c, "Backfill is disabled") + return + } + if errors.Is(err, service.ErrDashboardBackfillTooLarge) { + response.BadRequest(c, "Backfill range too large") + return + } + response.InternalError(c, "Failed to trigger backfill") + return + } + + response.Success(c, gin.H{ + "status": "accepted", + }) +} + +// GetRealtimeMetrics handles getting real-time system metrics +// GET /api/v1/admin/dashboard/realtime +func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) { + // Return mock data for now + response.Success(c, gin.H{ + "active_requests": 0, + "requests_per_minute": 0, + "average_response_time": 0, + "error_rate": 0.0, + }) +} + +// GetUsageTrend handles getting usage trend data +// GET /api/v1/admin/dashboard/trend +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream +func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { + startTime, endTime := parseTimeRange(c) + granularity := c.DefaultQuery("granularity", "day") + + // Parse optional filter params + var userID, apiKeyID, accountID, groupID int64 + var model string + var stream *bool + + if userIDStr := c.Query("user_id"); userIDStr != "" { + if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { + userID = id + } + } + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + if id, err := strconv.ParseInt(apiKeyIDStr, 10, 64); err == nil { + apiKeyID = id + } + } + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil { + accountID = id + } + } + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil { + groupID = id + } + } + if modelStr := c.Query("model"); modelStr != "" { + model = modelStr + } + if streamStr := c.Query("stream"); streamStr != "" { + if streamVal, err := strconv.ParseBool(streamStr); err == nil { + stream = &streamVal + } + } + + trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) + if err != nil { + response.Error(c, 500, "Failed to get usage trend") + return + } + + response.Success(c, gin.H{ + "trend": trend, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + "granularity": granularity, + }) +} + +// GetModelStats handles getting model usage statistics +// GET /api/v1/admin/dashboard/models +// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream +func (h *DashboardHandler) GetModelStats(c *gin.Context) { + startTime, endTime := parseTimeRange(c) + + // Parse optional filter params + var userID, apiKeyID, accountID, groupID int64 + var stream *bool + + if userIDStr := c.Query("user_id"); userIDStr != "" { + if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { + userID = id + } + } + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + if id, err := strconv.ParseInt(apiKeyIDStr, 10, 64); err == nil { + apiKeyID = id + } + } + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil { + accountID = id + } + } + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil { + groupID = id + } + } + if streamStr := c.Query("stream"); streamStr != "" { + if streamVal, err := strconv.ParseBool(streamStr); err == nil { + stream = &streamVal + } + } + + stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream) + if err != nil { + response.Error(c, 500, "Failed to get model statistics") + return + } + + response.Success(c, gin.H{ + "models": stats, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + }) +} + +// GetAPIKeyUsageTrend handles getting API key usage trend data +// GET /api/v1/admin/dashboard/api-keys-trend +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), limit (default 5) +func (h *DashboardHandler) GetAPIKeyUsageTrend(c *gin.Context) { + startTime, endTime := parseTimeRange(c) + granularity := c.DefaultQuery("granularity", "day") + limitStr := c.DefaultQuery("limit", "5") + limit, err := strconv.Atoi(limitStr) + if err != nil || limit <= 0 { + limit = 5 + } + + trend, err := h.dashboardService.GetAPIKeyUsageTrend(c.Request.Context(), startTime, endTime, granularity, limit) + if err != nil { + response.Error(c, 500, "Failed to get API key usage trend") + return + } + + response.Success(c, gin.H{ + "trend": trend, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + "granularity": granularity, + }) +} + +// GetUserUsageTrend handles getting user usage trend data +// GET /api/v1/admin/dashboard/users-trend +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), limit (default 12) +func (h *DashboardHandler) GetUserUsageTrend(c *gin.Context) { + startTime, endTime := parseTimeRange(c) + granularity := c.DefaultQuery("granularity", "day") + limitStr := c.DefaultQuery("limit", "12") + limit, err := strconv.Atoi(limitStr) + if err != nil || limit <= 0 { + limit = 12 + } + + trend, err := h.dashboardService.GetUserUsageTrend(c.Request.Context(), startTime, endTime, granularity, limit) + if err != nil { + response.Error(c, 500, "Failed to get user usage trend") + return + } + + response.Success(c, gin.H{ + "trend": trend, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + "granularity": granularity, + }) +} + +// BatchUsersUsageRequest represents the request body for batch user usage stats +type BatchUsersUsageRequest struct { + UserIDs []int64 `json:"user_ids" binding:"required"` +} + +// GetBatchUsersUsage handles getting usage stats for multiple users +// POST /api/v1/admin/dashboard/users-usage +func (h *DashboardHandler) GetBatchUsersUsage(c *gin.Context) { + var req BatchUsersUsageRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.UserIDs) == 0 { + response.Success(c, gin.H{"stats": map[string]any{}}) + return + } + + stats, err := h.dashboardService.GetBatchUserUsageStats(c.Request.Context(), req.UserIDs) + if err != nil { + response.Error(c, 500, "Failed to get user usage stats") + return + } + + response.Success(c, gin.H{"stats": stats}) +} + +// BatchAPIKeysUsageRequest represents the request body for batch api key usage stats +type BatchAPIKeysUsageRequest struct { + APIKeyIDs []int64 `json:"api_key_ids" binding:"required"` +} + +// GetBatchAPIKeysUsage handles getting usage stats for multiple API keys +// POST /api/v1/admin/dashboard/api-keys-usage +func (h *DashboardHandler) GetBatchAPIKeysUsage(c *gin.Context) { + var req BatchAPIKeysUsageRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.APIKeyIDs) == 0 { + response.Success(c, gin.H{"stats": map[string]any{}}) + return + } + + stats, err := h.dashboardService.GetBatchAPIKeyUsageStats(c.Request.Context(), req.APIKeyIDs) + if err != nil { + response.Error(c, 500, "Failed to get API key usage stats") + return + } + + response.Success(c, gin.H{"stats": stats}) +} diff --git a/backend/internal/handler/admin/gemini_oauth_handler.go b/backend/internal/handler/admin/gemini_oauth_handler.go new file mode 100644 index 00000000..50caaa26 --- /dev/null +++ b/backend/internal/handler/admin/gemini_oauth_handler.go @@ -0,0 +1,142 @@ +package admin + +import ( + "fmt" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +type GeminiOAuthHandler struct { + geminiOAuthService *service.GeminiOAuthService +} + +func NewGeminiOAuthHandler(geminiOAuthService *service.GeminiOAuthService) *GeminiOAuthHandler { + return &GeminiOAuthHandler{geminiOAuthService: geminiOAuthService} +} + +// GetCapabilities returns the Gemini OAuth configuration capabilities. +// GET /api/v1/admin/gemini/oauth/capabilities +func (h *GeminiOAuthHandler) GetCapabilities(c *gin.Context) { + cfg := h.geminiOAuthService.GetOAuthConfig() + response.Success(c, cfg) +} + +type GeminiGenerateAuthURLRequest struct { + ProxyID *int64 `json:"proxy_id"` + ProjectID string `json:"project_id"` + // OAuth 类型: "code_assist" (需要 project_id) 或 "ai_studio" (不需要 project_id) + // 默认为 "code_assist" 以保持向后兼容 + OAuthType string `json:"oauth_type"` + // TierID is a user-selected tier to be used when auto detection is unavailable or fails. + TierID string `json:"tier_id"` +} + +// GenerateAuthURL generates Google OAuth authorization URL for Gemini. +// POST /api/v1/admin/gemini/oauth/auth-url +func (h *GeminiOAuthHandler) GenerateAuthURL(c *gin.Context) { + var req GeminiGenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // 默认使用 code_assist 以保持向后兼容 + oauthType := strings.TrimSpace(req.OAuthType) + if oauthType == "" { + oauthType = "code_assist" + } + if oauthType != "code_assist" && oauthType != "google_one" && oauthType != "ai_studio" { + response.BadRequest(c, "Invalid oauth_type: must be 'code_assist', 'google_one', or 'ai_studio'") + return + } + + // Always pass the "hosted" callback URI; the OAuth service may override it depending on + // oauth_type and whether the built-in Gemini CLI OAuth client is used. + redirectURI := deriveGeminiRedirectURI(c) + result, err := h.geminiOAuthService.GenerateAuthURL(c.Request.Context(), req.ProxyID, redirectURI, req.ProjectID, oauthType, req.TierID) + if err != nil { + msg := err.Error() + // Treat missing/invalid OAuth client configuration as a user/config error. + if strings.Contains(msg, "OAuth client not configured") || strings.Contains(msg, "requires your own OAuth Client") { + response.BadRequest(c, "Failed to generate auth URL: "+msg) + return + } + response.InternalError(c, "Failed to generate auth URL: "+msg) + return + } + + response.Success(c, result) +} + +type GeminiExchangeCodeRequest struct { + SessionID string `json:"session_id" binding:"required"` + State string `json:"state" binding:"required"` + Code string `json:"code" binding:"required"` + ProxyID *int64 `json:"proxy_id"` + // OAuth 类型: "code_assist" 或 "ai_studio",需要与 GenerateAuthURL 时的类型一致 + OAuthType string `json:"oauth_type"` + // TierID is a user-selected tier to be used when auto detection is unavailable or fails. + // This field is optional; when omitted, the server uses the tier stored in the OAuth session. + TierID string `json:"tier_id"` +} + +// ExchangeCode exchanges authorization code for tokens. +// POST /api/v1/admin/gemini/oauth/exchange-code +func (h *GeminiOAuthHandler) ExchangeCode(c *gin.Context) { + var req GeminiExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // 默认使用 code_assist 以保持向后兼容 + oauthType := strings.TrimSpace(req.OAuthType) + if oauthType == "" { + oauthType = "code_assist" + } + if oauthType != "code_assist" && oauthType != "google_one" && oauthType != "ai_studio" { + response.BadRequest(c, "Invalid oauth_type: must be 'code_assist', 'google_one', or 'ai_studio'") + return + } + + tokenInfo, err := h.geminiOAuthService.ExchangeCode(c.Request.Context(), &service.GeminiExchangeCodeInput{ + SessionID: req.SessionID, + State: req.State, + Code: req.Code, + ProxyID: req.ProxyID, + OAuthType: oauthType, + TierID: req.TierID, + }) + if err != nil { + response.BadRequest(c, "Failed to exchange code: "+err.Error()) + return + } + + response.Success(c, tokenInfo) +} + +func deriveGeminiRedirectURI(c *gin.Context) string { + origin := strings.TrimSpace(c.GetHeader("Origin")) + if origin != "" { + return strings.TrimRight(origin, "/") + "/auth/callback" + } + + scheme := "http" + if c.Request.TLS != nil { + scheme = "https" + } + if xfProto := strings.TrimSpace(c.GetHeader("X-Forwarded-Proto")); xfProto != "" { + scheme = strings.TrimSpace(strings.Split(xfProto, ",")[0]) + } + + host := strings.TrimSpace(c.Request.Host) + if xfHost := strings.TrimSpace(c.GetHeader("X-Forwarded-Host")); xfHost != "" { + host = strings.TrimSpace(strings.Split(xfHost, ",")[0]) + } + + return fmt.Sprintf("%s://%s/auth/callback", scheme, host) +} diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go new file mode 100644 index 00000000..a8bae35e --- /dev/null +++ b/backend/internal/handler/admin/group_handler.go @@ -0,0 +1,274 @@ +package admin + +import ( + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// GroupHandler handles admin group management +type GroupHandler struct { + adminService service.AdminService +} + +// NewGroupHandler creates a new admin group handler +func NewGroupHandler(adminService service.AdminService) *GroupHandler { + return &GroupHandler{ + adminService: adminService, + } +} + +// CreateGroupRequest represents create group request +type CreateGroupRequest struct { + Name string `json:"name" binding:"required"` + Description string `json:"description"` + Platform string `json:"platform" binding:"omitempty,oneof=anthropic openai gemini antigravity"` + RateMultiplier float64 `json:"rate_multiplier"` + IsExclusive bool `json:"is_exclusive"` + SubscriptionType string `json:"subscription_type" binding:"omitempty,oneof=standard subscription"` + DailyLimitUSD *float64 `json:"daily_limit_usd"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd"` + // 图片生成计费配置(antigravity 和 gemini 平台使用,负数表示清除配置) + ImagePrice1K *float64 `json:"image_price_1k"` + ImagePrice2K *float64 `json:"image_price_2k"` + ImagePrice4K *float64 `json:"image_price_4k"` + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id"` +} + +// UpdateGroupRequest represents update group request +type UpdateGroupRequest struct { + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform" binding:"omitempty,oneof=anthropic openai gemini antigravity"` + RateMultiplier *float64 `json:"rate_multiplier"` + IsExclusive *bool `json:"is_exclusive"` + Status string `json:"status" binding:"omitempty,oneof=active inactive"` + SubscriptionType string `json:"subscription_type" binding:"omitempty,oneof=standard subscription"` + DailyLimitUSD *float64 `json:"daily_limit_usd"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd"` + // 图片生成计费配置(antigravity 和 gemini 平台使用,负数表示清除配置) + ImagePrice1K *float64 `json:"image_price_1k"` + ImagePrice2K *float64 `json:"image_price_2k"` + ImagePrice4K *float64 `json:"image_price_4k"` + ClaudeCodeOnly *bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id"` +} + +// List handles listing all groups with pagination +// GET /api/v1/admin/groups +func (h *GroupHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + platform := c.Query("platform") + status := c.Query("status") + search := c.Query("search") + // 标准化和验证 search 参数 + search = strings.TrimSpace(search) + if len(search) > 100 { + search = search[:100] + } + isExclusiveStr := c.Query("is_exclusive") + + var isExclusive *bool + if isExclusiveStr != "" { + val := isExclusiveStr == "true" + isExclusive = &val + } + + groups, total, err := h.adminService.ListGroups(c.Request.Context(), page, pageSize, platform, status, search, isExclusive) + if err != nil { + response.ErrorFrom(c, err) + return + } + + outGroups := make([]dto.Group, 0, len(groups)) + for i := range groups { + outGroups = append(outGroups, *dto.GroupFromService(&groups[i])) + } + response.Paginated(c, outGroups, total, page, pageSize) +} + +// GetAll handles getting all active groups without pagination +// GET /api/v1/admin/groups/all +func (h *GroupHandler) GetAll(c *gin.Context) { + platform := c.Query("platform") + + var groups []service.Group + var err error + + if platform != "" { + groups, err = h.adminService.GetAllGroupsByPlatform(c.Request.Context(), platform) + } else { + groups, err = h.adminService.GetAllGroups(c.Request.Context()) + } + + if err != nil { + response.ErrorFrom(c, err) + return + } + + outGroups := make([]dto.Group, 0, len(groups)) + for i := range groups { + outGroups = append(outGroups, *dto.GroupFromService(&groups[i])) + } + response.Success(c, outGroups) +} + +// GetByID handles getting a group by ID +// GET /api/v1/admin/groups/:id +func (h *GroupHandler) GetByID(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + group, err := h.adminService.GetGroup(c.Request.Context(), groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.GroupFromService(group)) +} + +// Create handles creating a new group +// POST /api/v1/admin/groups +func (h *GroupHandler) Create(c *gin.Context) { + var req CreateGroupRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + group, err := h.adminService.CreateGroup(c.Request.Context(), &service.CreateGroupInput{ + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.GroupFromService(group)) +} + +// Update handles updating a group +// PUT /api/v1/admin/groups/:id +func (h *GroupHandler) Update(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + var req UpdateGroupRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + group, err := h.adminService.UpdateGroup(c.Request.Context(), groupID, &service.UpdateGroupInput{ + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + Status: req.Status, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.GroupFromService(group)) +} + +// Delete handles deleting a group +// DELETE /api/v1/admin/groups/:id +func (h *GroupHandler) Delete(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + err = h.adminService.DeleteGroup(c.Request.Context(), groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Group deleted successfully"}) +} + +// GetStats handles getting group statistics +// GET /api/v1/admin/groups/:id/stats +func (h *GroupHandler) GetStats(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + // Return mock data for now + response.Success(c, gin.H{ + "total_api_keys": 0, + "active_api_keys": 0, + "total_requests": 0, + "total_cost": 0.0, + }) + _ = groupID // TODO: implement actual stats +} + +// GetGroupAPIKeys handles getting API keys in a group +// GET /api/v1/admin/groups/:id/api-keys +func (h *GroupHandler) GetGroupAPIKeys(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + page, pageSize := response.ParsePagination(c) + + keys, total, err := h.adminService.GetGroupAPIKeys(c.Request.Context(), groupID, page, pageSize) + if err != nil { + response.ErrorFrom(c, err) + return + } + + outKeys := make([]dto.APIKey, 0, len(keys)) + for i := range keys { + outKeys = append(outKeys, *dto.APIKeyFromService(&keys[i])) + } + response.Paginated(c, outKeys, total, page, pageSize) +} diff --git a/backend/internal/handler/admin/openai_oauth_handler.go b/backend/internal/handler/admin/openai_oauth_handler.go new file mode 100644 index 00000000..ed86fea9 --- /dev/null +++ b/backend/internal/handler/admin/openai_oauth_handler.go @@ -0,0 +1,229 @@ +package admin + +import ( + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// OpenAIOAuthHandler handles OpenAI OAuth-related operations +type OpenAIOAuthHandler struct { + openaiOAuthService *service.OpenAIOAuthService + adminService service.AdminService +} + +// NewOpenAIOAuthHandler creates a new OpenAI OAuth handler +func NewOpenAIOAuthHandler(openaiOAuthService *service.OpenAIOAuthService, adminService service.AdminService) *OpenAIOAuthHandler { + return &OpenAIOAuthHandler{ + openaiOAuthService: openaiOAuthService, + adminService: adminService, + } +} + +// OpenAIGenerateAuthURLRequest represents the request for generating OpenAI auth URL +type OpenAIGenerateAuthURLRequest struct { + ProxyID *int64 `json:"proxy_id"` + RedirectURI string `json:"redirect_uri"` +} + +// GenerateAuthURL generates OpenAI OAuth authorization URL +// POST /api/v1/admin/openai/generate-auth-url +func (h *OpenAIOAuthHandler) GenerateAuthURL(c *gin.Context) { + var req OpenAIGenerateAuthURLRequest + if err := c.ShouldBindJSON(&req); err != nil { + // Allow empty body + req = OpenAIGenerateAuthURLRequest{} + } + + result, err := h.openaiOAuthService.GenerateAuthURL(c.Request.Context(), req.ProxyID, req.RedirectURI) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// OpenAIExchangeCodeRequest represents the request for exchanging OpenAI auth code +type OpenAIExchangeCodeRequest struct { + SessionID string `json:"session_id" binding:"required"` + Code string `json:"code" binding:"required"` + RedirectURI string `json:"redirect_uri"` + ProxyID *int64 `json:"proxy_id"` +} + +// ExchangeCode exchanges OpenAI authorization code for tokens +// POST /api/v1/admin/openai/exchange-code +func (h *OpenAIOAuthHandler) ExchangeCode(c *gin.Context) { + var req OpenAIExchangeCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + tokenInfo, err := h.openaiOAuthService.ExchangeCode(c.Request.Context(), &service.OpenAIExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + RedirectURI: req.RedirectURI, + ProxyID: req.ProxyID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// OpenAIRefreshTokenRequest represents the request for refreshing OpenAI token +type OpenAIRefreshTokenRequest struct { + RefreshToken string `json:"refresh_token" binding:"required"` + ProxyID *int64 `json:"proxy_id"` +} + +// RefreshToken refreshes an OpenAI OAuth token +// POST /api/v1/admin/openai/refresh-token +func (h *OpenAIOAuthHandler) RefreshToken(c *gin.Context) { + var req OpenAIRefreshTokenRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + var proxyURL string + if req.ProxyID != nil { + proxy, err := h.adminService.GetProxy(c.Request.Context(), *req.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + tokenInfo, err := h.openaiOAuthService.RefreshToken(c.Request.Context(), req.RefreshToken, proxyURL) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, tokenInfo) +} + +// RefreshAccountToken refreshes token for a specific OpenAI account +// POST /api/v1/admin/openai/accounts/:id/refresh +func (h *OpenAIOAuthHandler) RefreshAccountToken(c *gin.Context) { + accountID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account ID") + return + } + + // Get account + account, err := h.adminService.GetAccount(c.Request.Context(), accountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Ensure account is OpenAI platform + if !account.IsOpenAI() { + response.BadRequest(c, "Account is not an OpenAI account") + return + } + + // Only refresh OAuth-based accounts + if !account.IsOAuth() { + response.BadRequest(c, "Cannot refresh non-OAuth account credentials") + return + } + + // Use OpenAI OAuth service to refresh token + tokenInfo, err := h.openaiOAuthService.RefreshAccountToken(c.Request.Context(), account) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Build new credentials from token info + newCredentials := h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Preserve non-token settings from existing credentials + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + updatedAccount, err := h.adminService.UpdateAccount(c.Request.Context(), accountID, &service.UpdateAccountInput{ + Credentials: newCredentials, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(updatedAccount)) +} + +// CreateAccountFromOAuth creates a new OpenAI OAuth account from token info +// POST /api/v1/admin/openai/create-from-oauth +func (h *OpenAIOAuthHandler) CreateAccountFromOAuth(c *gin.Context) { + var req struct { + SessionID string `json:"session_id" binding:"required"` + Code string `json:"code" binding:"required"` + RedirectURI string `json:"redirect_uri"` + ProxyID *int64 `json:"proxy_id"` + Name string `json:"name"` + Concurrency int `json:"concurrency"` + Priority int `json:"priority"` + GroupIDs []int64 `json:"group_ids"` + } + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Exchange code for tokens + tokenInfo, err := h.openaiOAuthService.ExchangeCode(c.Request.Context(), &service.OpenAIExchangeCodeInput{ + SessionID: req.SessionID, + Code: req.Code, + RedirectURI: req.RedirectURI, + ProxyID: req.ProxyID, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Build credentials from token info + credentials := h.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // Use email as default name if not provided + name := req.Name + if name == "" && tokenInfo.Email != "" { + name = tokenInfo.Email + } + if name == "" { + name = "OpenAI OAuth Account" + } + + // Create account + account, err := h.adminService.CreateAccount(c.Request.Context(), &service.CreateAccountInput{ + Name: name, + Platform: "openai", + Type: "oauth", + Credentials: credentials, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, + Priority: req.Priority, + GroupIDs: req.GroupIDs, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.AccountFromService(account)) +} diff --git a/backend/internal/handler/admin/ops_alerts_handler.go b/backend/internal/handler/admin/ops_alerts_handler.go new file mode 100644 index 00000000..c9da19c7 --- /dev/null +++ b/backend/internal/handler/admin/ops_alerts_handler.go @@ -0,0 +1,602 @@ +package admin + +import ( + "encoding/json" + "fmt" + "math" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" +) + +var validOpsAlertMetricTypes = []string{ + "success_rate", + "error_rate", + "upstream_error_rate", + "cpu_usage_percent", + "memory_usage_percent", + "concurrency_queue_depth", +} + +var validOpsAlertMetricTypeSet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertMetricTypes)) + for _, v := range validOpsAlertMetricTypes { + set[v] = struct{}{} + } + return set +}() + +var validOpsAlertOperators = []string{">", "<", ">=", "<=", "==", "!="} + +var validOpsAlertOperatorSet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertOperators)) + for _, v := range validOpsAlertOperators { + set[v] = struct{}{} + } + return set +}() + +var validOpsAlertSeverities = []string{"P0", "P1", "P2", "P3"} + +var validOpsAlertSeveritySet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertSeverities)) + for _, v := range validOpsAlertSeverities { + set[v] = struct{}{} + } + return set +}() + +type opsAlertRuleValidatedInput struct { + Name string + MetricType string + Operator string + Threshold float64 + + Severity string + + WindowMinutes int + SustainedMinutes int + CooldownMinutes int + + Enabled bool + NotifyEmail bool + + WindowProvided bool + SustainedProvided bool + CooldownProvided bool + SeverityProvided bool + EnabledProvided bool + NotifyProvided bool +} + +func isPercentOrRateMetric(metricType string) bool { + switch metricType { + case "success_rate", + "error_rate", + "upstream_error_rate", + "cpu_usage_percent", + "memory_usage_percent": + return true + default: + return false + } +} + +func validateOpsAlertRulePayload(raw map[string]json.RawMessage) (*opsAlertRuleValidatedInput, error) { + if raw == nil { + return nil, fmt.Errorf("invalid request body") + } + + requiredFields := []string{"name", "metric_type", "operator", "threshold"} + for _, field := range requiredFields { + if _, ok := raw[field]; !ok { + return nil, fmt.Errorf("%s is required", field) + } + } + + var name string + if err := json.Unmarshal(raw["name"], &name); err != nil || strings.TrimSpace(name) == "" { + return nil, fmt.Errorf("name is required") + } + name = strings.TrimSpace(name) + + var metricType string + if err := json.Unmarshal(raw["metric_type"], &metricType); err != nil || strings.TrimSpace(metricType) == "" { + return nil, fmt.Errorf("metric_type is required") + } + metricType = strings.TrimSpace(metricType) + if _, ok := validOpsAlertMetricTypeSet[metricType]; !ok { + return nil, fmt.Errorf("metric_type must be one of: %s", strings.Join(validOpsAlertMetricTypes, ", ")) + } + + var operator string + if err := json.Unmarshal(raw["operator"], &operator); err != nil || strings.TrimSpace(operator) == "" { + return nil, fmt.Errorf("operator is required") + } + operator = strings.TrimSpace(operator) + if _, ok := validOpsAlertOperatorSet[operator]; !ok { + return nil, fmt.Errorf("operator must be one of: %s", strings.Join(validOpsAlertOperators, ", ")) + } + + var threshold float64 + if err := json.Unmarshal(raw["threshold"], &threshold); err != nil { + return nil, fmt.Errorf("threshold must be a number") + } + if math.IsNaN(threshold) || math.IsInf(threshold, 0) { + return nil, fmt.Errorf("threshold must be a finite number") + } + if isPercentOrRateMetric(metricType) { + if threshold < 0 || threshold > 100 { + return nil, fmt.Errorf("threshold must be between 0 and 100 for metric_type %s", metricType) + } + } else if threshold < 0 { + return nil, fmt.Errorf("threshold must be >= 0") + } + + validated := &opsAlertRuleValidatedInput{ + Name: name, + MetricType: metricType, + Operator: operator, + Threshold: threshold, + } + + if v, ok := raw["severity"]; ok { + validated.SeverityProvided = true + var sev string + if err := json.Unmarshal(v, &sev); err != nil { + return nil, fmt.Errorf("severity must be a string") + } + sev = strings.ToUpper(strings.TrimSpace(sev)) + if sev != "" { + if _, ok := validOpsAlertSeveritySet[sev]; !ok { + return nil, fmt.Errorf("severity must be one of: %s", strings.Join(validOpsAlertSeverities, ", ")) + } + validated.Severity = sev + } + } + if validated.Severity == "" { + validated.Severity = "P2" + } + + if v, ok := raw["enabled"]; ok { + validated.EnabledProvided = true + if err := json.Unmarshal(v, &validated.Enabled); err != nil { + return nil, fmt.Errorf("enabled must be a boolean") + } + } else { + validated.Enabled = true + } + + if v, ok := raw["notify_email"]; ok { + validated.NotifyProvided = true + if err := json.Unmarshal(v, &validated.NotifyEmail); err != nil { + return nil, fmt.Errorf("notify_email must be a boolean") + } + } else { + validated.NotifyEmail = true + } + + if v, ok := raw["window_minutes"]; ok { + validated.WindowProvided = true + if err := json.Unmarshal(v, &validated.WindowMinutes); err != nil { + return nil, fmt.Errorf("window_minutes must be an integer") + } + switch validated.WindowMinutes { + case 1, 5, 60: + default: + return nil, fmt.Errorf("window_minutes must be one of: 1, 5, 60") + } + } else { + validated.WindowMinutes = 1 + } + + if v, ok := raw["sustained_minutes"]; ok { + validated.SustainedProvided = true + if err := json.Unmarshal(v, &validated.SustainedMinutes); err != nil { + return nil, fmt.Errorf("sustained_minutes must be an integer") + } + if validated.SustainedMinutes < 1 || validated.SustainedMinutes > 1440 { + return nil, fmt.Errorf("sustained_minutes must be between 1 and 1440") + } + } else { + validated.SustainedMinutes = 1 + } + + if v, ok := raw["cooldown_minutes"]; ok { + validated.CooldownProvided = true + if err := json.Unmarshal(v, &validated.CooldownMinutes); err != nil { + return nil, fmt.Errorf("cooldown_minutes must be an integer") + } + if validated.CooldownMinutes < 0 || validated.CooldownMinutes > 1440 { + return nil, fmt.Errorf("cooldown_minutes must be between 0 and 1440") + } + } else { + validated.CooldownMinutes = 0 + } + + return validated, nil +} + +// ListAlertRules returns all ops alert rules. +// GET /api/v1/admin/ops/alert-rules +func (h *OpsHandler) ListAlertRules(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + rules, err := h.opsService.ListAlertRules(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, rules) +} + +// CreateAlertRule creates an ops alert rule. +// POST /api/v1/admin/ops/alert-rules +func (h *OpsHandler) CreateAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var raw map[string]json.RawMessage + if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + validated, err := validateOpsAlertRulePayload(raw) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + var rule service.OpsAlertRule + if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + rule.Name = validated.Name + rule.MetricType = validated.MetricType + rule.Operator = validated.Operator + rule.Threshold = validated.Threshold + rule.WindowMinutes = validated.WindowMinutes + rule.SustainedMinutes = validated.SustainedMinutes + rule.CooldownMinutes = validated.CooldownMinutes + rule.Severity = validated.Severity + rule.Enabled = validated.Enabled + rule.NotifyEmail = validated.NotifyEmail + + created, err := h.opsService.CreateAlertRule(c.Request.Context(), &rule) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, created) +} + +// UpdateAlertRule updates an existing ops alert rule. +// PUT /api/v1/admin/ops/alert-rules/:id +func (h *OpsHandler) UpdateAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid rule ID") + return + } + + var raw map[string]json.RawMessage + if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + validated, err := validateOpsAlertRulePayload(raw) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + var rule service.OpsAlertRule + if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + rule.ID = id + rule.Name = validated.Name + rule.MetricType = validated.MetricType + rule.Operator = validated.Operator + rule.Threshold = validated.Threshold + rule.WindowMinutes = validated.WindowMinutes + rule.SustainedMinutes = validated.SustainedMinutes + rule.CooldownMinutes = validated.CooldownMinutes + rule.Severity = validated.Severity + rule.Enabled = validated.Enabled + rule.NotifyEmail = validated.NotifyEmail + + updated, err := h.opsService.UpdateAlertRule(c.Request.Context(), &rule) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, updated) +} + +// DeleteAlertRule deletes an ops alert rule. +// DELETE /api/v1/admin/ops/alert-rules/:id +func (h *OpsHandler) DeleteAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid rule ID") + return + } + + if err := h.opsService.DeleteAlertRule(c.Request.Context(), id); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"deleted": true}) +} + +// GetAlertEvent returns a single ops alert event. +// GET /api/v1/admin/ops/alert-events/:id +func (h *OpsHandler) GetAlertEvent(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid event ID") + return + } + + ev, err := h.opsService.GetAlertEventByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, ev) +} + +// UpdateAlertEventStatus updates an ops alert event status. +// PUT /api/v1/admin/ops/alert-events/:id/status +func (h *OpsHandler) UpdateAlertEventStatus(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid event ID") + return + } + + var payload struct { + Status string `json:"status"` + } + if err := c.ShouldBindJSON(&payload); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + payload.Status = strings.TrimSpace(payload.Status) + if payload.Status == "" { + response.BadRequest(c, "Invalid status") + return + } + if payload.Status != service.OpsAlertStatusResolved && payload.Status != service.OpsAlertStatusManualResolved { + response.BadRequest(c, "Invalid status") + return + } + + var resolvedAt *time.Time + if payload.Status == service.OpsAlertStatusResolved || payload.Status == service.OpsAlertStatusManualResolved { + now := time.Now().UTC() + resolvedAt = &now + } + if err := h.opsService.UpdateAlertEventStatus(c.Request.Context(), id, payload.Status, resolvedAt); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"updated": true}) +} + +// ListAlertEvents lists recent ops alert events. +// GET /api/v1/admin/ops/alert-events +// CreateAlertSilence creates a scoped silence for ops alerts. +// POST /api/v1/admin/ops/alert-silences +func (h *OpsHandler) CreateAlertSilence(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var payload struct { + RuleID int64 `json:"rule_id"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + Region *string `json:"region"` + Until string `json:"until"` + Reason string `json:"reason"` + } + if err := c.ShouldBindJSON(&payload); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + until, err := time.Parse(time.RFC3339, strings.TrimSpace(payload.Until)) + if err != nil { + response.BadRequest(c, "Invalid until") + return + } + + createdBy := (*int64)(nil) + if subject, ok := middleware.GetAuthSubjectFromContext(c); ok { + uid := subject.UserID + createdBy = &uid + } + + silence := &service.OpsAlertSilence{ + RuleID: payload.RuleID, + Platform: strings.TrimSpace(payload.Platform), + GroupID: payload.GroupID, + Region: payload.Region, + Until: until, + Reason: strings.TrimSpace(payload.Reason), + CreatedBy: createdBy, + } + + created, err := h.opsService.CreateAlertSilence(c.Request.Context(), silence) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, created) +} + +func (h *OpsHandler) ListAlertEvents(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + limit := 20 + if raw := strings.TrimSpace(c.Query("limit")); raw != "" { + n, err := strconv.Atoi(raw) + if err != nil || n <= 0 { + response.BadRequest(c, "Invalid limit") + return + } + limit = n + } + + filter := &service.OpsAlertEventFilter{ + Limit: limit, + Status: strings.TrimSpace(c.Query("status")), + Severity: strings.TrimSpace(c.Query("severity")), + } + + if v := strings.TrimSpace(c.Query("email_sent")); v != "" { + vv := strings.ToLower(v) + switch vv { + case "true", "1": + b := true + filter.EmailSent = &b + case "false", "0": + b := false + filter.EmailSent = &b + default: + response.BadRequest(c, "Invalid email_sent") + return + } + } + + // Cursor pagination: both params must be provided together. + rawTS := strings.TrimSpace(c.Query("before_fired_at")) + rawID := strings.TrimSpace(c.Query("before_id")) + if (rawTS == "") != (rawID == "") { + response.BadRequest(c, "before_fired_at and before_id must be provided together") + return + } + if rawTS != "" { + ts, err := time.Parse(time.RFC3339Nano, rawTS) + if err != nil { + if t2, err2 := time.Parse(time.RFC3339, rawTS); err2 == nil { + ts = t2 + } else { + response.BadRequest(c, "Invalid before_fired_at") + return + } + } + filter.BeforeFiredAt = &ts + } + if rawID != "" { + id, err := strconv.ParseInt(rawID, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid before_id") + return + } + filter.BeforeID = &id + } + + // Optional global filter support (platform/group/time range). + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if startTime, endTime, err := parseOpsTimeRange(c, "24h"); err == nil { + // Only apply when explicitly provided to avoid surprising default narrowing. + if strings.TrimSpace(c.Query("start_time")) != "" || strings.TrimSpace(c.Query("end_time")) != "" || strings.TrimSpace(c.Query("time_range")) != "" { + filter.StartTime = &startTime + filter.EndTime = &endTime + } + } else { + response.BadRequest(c, err.Error()) + return + } + + events, err := h.opsService.ListAlertEvents(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, events) +} diff --git a/backend/internal/handler/admin/ops_dashboard_handler.go b/backend/internal/handler/admin/ops_dashboard_handler.go new file mode 100644 index 00000000..2c87f734 --- /dev/null +++ b/backend/internal/handler/admin/ops_dashboard_handler.go @@ -0,0 +1,243 @@ +package admin + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetDashboardOverview returns vNext ops dashboard overview (raw path). +// GET /api/v1/admin/ops/dashboard/overview +func (h *OpsHandler) GetDashboardOverview(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetDashboardOverview(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardThroughputTrend returns throughput time series (raw path). +// GET /api/v1/admin/ops/dashboard/throughput-trend +func (h *OpsHandler) GetDashboardThroughputTrend(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime)) + data, err := h.opsService.GetThroughputTrend(c.Request.Context(), filter, bucketSeconds) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardLatencyHistogram returns the latency distribution histogram (success requests). +// GET /api/v1/admin/ops/dashboard/latency-histogram +func (h *OpsHandler) GetDashboardLatencyHistogram(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetLatencyHistogram(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardErrorTrend returns error counts time series (raw path). +// GET /api/v1/admin/ops/dashboard/error-trend +func (h *OpsHandler) GetDashboardErrorTrend(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime)) + data, err := h.opsService.GetErrorTrend(c.Request.Context(), filter, bucketSeconds) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardErrorDistribution returns error distribution by status code (raw path). +// GET /api/v1/admin/ops/dashboard/error-distribution +func (h *OpsHandler) GetDashboardErrorDistribution(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetErrorDistribution(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +func pickThroughputBucketSeconds(window time.Duration) int { + // Keep buckets predictable and avoid huge responses. + switch { + case window <= 2*time.Hour: + return 60 + case window <= 24*time.Hour: + return 300 + default: + return 3600 + } +} + +func parseOpsQueryMode(c *gin.Context) service.OpsQueryMode { + if c == nil { + return "" + } + raw := strings.TrimSpace(c.Query("mode")) + if raw == "" { + // Empty means "use server default" (DB setting ops_query_mode_default). + return "" + } + return service.ParseOpsQueryMode(raw) +} diff --git a/backend/internal/handler/admin/ops_handler.go b/backend/internal/handler/admin/ops_handler.go new file mode 100644 index 00000000..44accc8f --- /dev/null +++ b/backend/internal/handler/admin/ops_handler.go @@ -0,0 +1,925 @@ +package admin + +import ( + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +type OpsHandler struct { + opsService *service.OpsService +} + +// GetErrorLogByID returns ops error log detail. +// GET /api/v1/admin/ops/errors/:id +func (h *OpsHandler) GetErrorLogByID(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, detail) +} + +const ( + opsListViewErrors = "errors" + opsListViewExcluded = "excluded" + opsListViewAll = "all" +) + +func parseOpsViewParam(c *gin.Context) string { + if c == nil { + return "" + } + v := strings.ToLower(strings.TrimSpace(c.Query("view"))) + switch v { + case "", opsListViewErrors: + return opsListViewErrors + case opsListViewExcluded: + return opsListViewExcluded + case opsListViewAll: + return opsListViewAll + default: + return opsListViewErrors + } +} + +func NewOpsHandler(opsService *service.OpsService) *OpsHandler { + return &OpsHandler{opsService: opsService} +} + +// GetErrorLogs lists ops error logs. +// GET /api/v1/admin/ops/errors +func (h *OpsHandler) GetErrorLogs(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + // Ops list can be larger than standard admin tables. + if pageSize > 500 { + pageSize = 500 + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = parseOpsViewParam(c) + filter.Phase = strings.TrimSpace(c.Query("phase")) + filter.Owner = strings.TrimSpace(c.Query("error_owner")) + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.UserQuery = strings.TrimSpace(c.Query("user_query")) + + // Force request errors: client-visible status >= 400. + // buildOpsErrorLogsWhere already applies this for non-upstream phase. + if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") { + filter.Phase = "" + } + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// ListRequestErrors lists client-visible request errors. +// GET /api/v1/admin/ops/request-errors +func (h *OpsHandler) ListRequestErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = parseOpsViewParam(c) + filter.Phase = strings.TrimSpace(c.Query("phase")) + filter.Owner = strings.TrimSpace(c.Query("error_owner")) + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.UserQuery = strings.TrimSpace(c.Query("user_query")) + + // Force request errors: client-visible status >= 400. + // buildOpsErrorLogsWhere already applies this for non-upstream phase. + if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") { + filter.Phase = "" + } + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// GetRequestError returns request error detail. +// GET /api/v1/admin/ops/request-errors/:id +func (h *OpsHandler) GetRequestError(c *gin.Context) { + // same storage; just proxy to existing detail + h.GetErrorLogByID(c) +} + +// ListRequestErrorUpstreamErrors lists upstream error logs correlated to a request error. +// GET /api/v1/admin/ops/request-errors/:id/upstream-errors +func (h *OpsHandler) ListRequestErrorUpstreamErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + // Load request error to get correlation keys. + detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Correlate by request_id/client_request_id. + requestID := strings.TrimSpace(detail.RequestID) + clientRequestID := strings.TrimSpace(detail.ClientRequestID) + if requestID == "" && clientRequestID == "" { + response.Paginated(c, []*service.OpsErrorLog{}, 0, 1, 10) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + + // Keep correlation window wide enough so linked upstream errors + // are discoverable even when UI defaults to 1h elsewhere. + startTime, endTime, err := parseOpsTimeRange(c, "30d") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = "all" + filter.Phase = "upstream" + filter.Owner = "provider" + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + + // Prefer exact match on request_id; if missing, fall back to client_request_id. + if requestID != "" { + filter.RequestID = requestID + } else { + filter.ClientRequestID = clientRequestID + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // If client asks for details, expand each upstream error log to include upstream response fields. + includeDetail := strings.TrimSpace(c.Query("include_detail")) + if includeDetail == "1" || strings.EqualFold(includeDetail, "true") || strings.EqualFold(includeDetail, "yes") { + details := make([]*service.OpsErrorLogDetail, 0, len(result.Errors)) + for _, item := range result.Errors { + if item == nil { + continue + } + d, err := h.opsService.GetErrorLogByID(c.Request.Context(), item.ID) + if err != nil || d == nil { + continue + } + details = append(details, d) + } + response.Paginated(c, details, int64(result.Total), result.Page, result.PageSize) + return + } + + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// RetryRequestErrorClient retries the client request based on stored request body. +// POST /api/v1/admin/ops/request-errors/:id/retry-client +func (h *OpsHandler) RetryRequestErrorClient(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeClient, nil) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// RetryRequestErrorUpstreamEvent retries a specific upstream attempt using captured upstream_request_body. +// POST /api/v1/admin/ops/request-errors/:id/upstream-errors/:idx/retry +func (h *OpsHandler) RetryRequestErrorUpstreamEvent(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + idxStr := strings.TrimSpace(c.Param("idx")) + idx, err := strconv.Atoi(idxStr) + if err != nil || idx < 0 { + response.BadRequest(c, "Invalid upstream idx") + return + } + + result, err := h.opsService.RetryUpstreamEvent(c.Request.Context(), subject.UserID, id, idx) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// ResolveRequestError toggles resolved status. +// PUT /api/v1/admin/ops/request-errors/:id/resolve +func (h *OpsHandler) ResolveRequestError(c *gin.Context) { + h.UpdateErrorResolution(c) +} + +// ListUpstreamErrors lists independent upstream errors. +// GET /api/v1/admin/ops/upstream-errors +func (h *OpsHandler) ListUpstreamErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + + filter.View = parseOpsViewParam(c) + filter.Phase = "upstream" + filter.Owner = "provider" + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// GetUpstreamError returns upstream error detail. +// GET /api/v1/admin/ops/upstream-errors/:id +func (h *OpsHandler) GetUpstreamError(c *gin.Context) { + h.GetErrorLogByID(c) +} + +// RetryUpstreamError retries upstream error using the original account_id. +// POST /api/v1/admin/ops/upstream-errors/:id/retry +func (h *OpsHandler) RetryUpstreamError(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeUpstream, nil) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// ResolveUpstreamError toggles resolved status. +// PUT /api/v1/admin/ops/upstream-errors/:id/resolve +func (h *OpsHandler) ResolveUpstreamError(c *gin.Context) { + h.UpdateErrorResolution(c) +} + +// ==================== Existing endpoints ==================== + +// ListRequestDetails returns a request-level list (success + error) for drill-down. +// GET /api/v1/admin/ops/requests +func (h *OpsHandler) ListRequestDetails(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 100 { + pageSize = 100 + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsRequestDetailFilter{ + Page: page, + PageSize: pageSize, + StartTime: &startTime, + EndTime: &endTime, + } + + filter.Kind = strings.TrimSpace(c.Query("kind")) + filter.Platform = strings.TrimSpace(c.Query("platform")) + filter.Model = strings.TrimSpace(c.Query("model")) + filter.RequestID = strings.TrimSpace(c.Query("request_id")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.Sort = strings.TrimSpace(c.Query("sort")) + + if v := strings.TrimSpace(c.Query("user_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid user_id") + return + } + filter.UserID = &id + } + if v := strings.TrimSpace(c.Query("api_key_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid api_key_id") + return + } + filter.APIKeyID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + if v := strings.TrimSpace(c.Query("min_duration_ms")); v != "" { + parsed, err := strconv.Atoi(v) + if err != nil || parsed < 0 { + response.BadRequest(c, "Invalid min_duration_ms") + return + } + filter.MinDurationMs = &parsed + } + if v := strings.TrimSpace(c.Query("max_duration_ms")); v != "" { + parsed, err := strconv.Atoi(v) + if err != nil || parsed < 0 { + response.BadRequest(c, "Invalid max_duration_ms") + return + } + filter.MaxDurationMs = &parsed + } + + out, err := h.opsService.ListRequestDetails(c.Request.Context(), filter) + if err != nil { + // Invalid sort/kind/platform etc should be a bad request; keep it simple. + if strings.Contains(strings.ToLower(err.Error()), "invalid") { + response.BadRequest(c, err.Error()) + return + } + response.Error(c, http.StatusInternalServerError, "Failed to list request details") + return + } + + response.Paginated(c, out.Items, out.Total, out.Page, out.PageSize) +} + +type opsRetryRequest struct { + Mode string `json:"mode"` + PinnedAccountID *int64 `json:"pinned_account_id"` + Force bool `json:"force"` +} + +type opsResolveRequest struct { + Resolved bool `json:"resolved"` +} + +// RetryErrorRequest retries a failed request using stored request_body. +// POST /api/v1/admin/ops/errors/:id/retry +func (h *OpsHandler) RetryErrorRequest(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + req := opsRetryRequest{Mode: service.OpsRetryModeClient} + if err := c.ShouldBindJSON(&req); err != nil && !errors.Is(err, io.EOF) { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + if strings.TrimSpace(req.Mode) == "" { + req.Mode = service.OpsRetryModeClient + } + + // Force flag is currently a UI-level acknowledgement. Server may still enforce safety constraints. + _ = req.Force + + // Legacy endpoint safety: only allow retrying the client request here. + // Upstream retries must go through the split endpoints. + if strings.EqualFold(strings.TrimSpace(req.Mode), service.OpsRetryModeUpstream) { + response.BadRequest(c, "upstream retry is not supported on this endpoint") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, req.Mode, req.PinnedAccountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// ListRetryAttempts lists retry attempts for an error log. +// GET /api/v1/admin/ops/errors/:id/retries +func (h *OpsHandler) ListRetryAttempts(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + limit := 50 + if v := strings.TrimSpace(c.Query("limit")); v != "" { + n, err := strconv.Atoi(v) + if err != nil || n <= 0 { + response.BadRequest(c, "Invalid limit") + return + } + limit = n + } + + items, err := h.opsService.ListRetryAttemptsByErrorID(c.Request.Context(), id, limit) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, items) +} + +// UpdateErrorResolution allows manual resolve/unresolve. +// PUT /api/v1/admin/ops/errors/:id/resolve +func (h *OpsHandler) UpdateErrorResolution(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + var req opsResolveRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + uid := subject.UserID + if err := h.opsService.UpdateErrorResolution(c.Request.Context(), id, req.Resolved, &uid, nil); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"ok": true}) +} + +func parseOpsTimeRange(c *gin.Context, defaultRange string) (time.Time, time.Time, error) { + startStr := strings.TrimSpace(c.Query("start_time")) + endStr := strings.TrimSpace(c.Query("end_time")) + + parseTS := func(s string) (time.Time, error) { + if s == "" { + return time.Time{}, nil + } + if t, err := time.Parse(time.RFC3339Nano, s); err == nil { + return t, nil + } + return time.Parse(time.RFC3339, s) + } + + start, err := parseTS(startStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + end, err := parseTS(endStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + + // start/end explicitly provided (even partially) + if startStr != "" || endStr != "" { + if end.IsZero() { + end = time.Now() + } + if start.IsZero() { + dur, _ := parseOpsDuration(defaultRange) + start = end.Add(-dur) + } + if start.After(end) { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: start_time must be <= end_time") + } + if end.Sub(start) > 30*24*time.Hour { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days") + } + return start, end, nil + } + + // time_range fallback + tr := strings.TrimSpace(c.Query("time_range")) + if tr == "" { + tr = defaultRange + } + dur, ok := parseOpsDuration(tr) + if !ok { + dur, _ = parseOpsDuration(defaultRange) + } + + end = time.Now() + start = end.Add(-dur) + if end.Sub(start) > 30*24*time.Hour { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days") + } + return start, end, nil +} + +func parseOpsDuration(v string) (time.Duration, bool) { + switch strings.TrimSpace(v) { + case "5m": + return 5 * time.Minute, true + case "30m": + return 30 * time.Minute, true + case "1h": + return time.Hour, true + case "6h": + return 6 * time.Hour, true + case "24h": + return 24 * time.Hour, true + case "7d": + return 7 * 24 * time.Hour, true + case "30d": + return 30 * 24 * time.Hour, true + default: + return 0, false + } +} diff --git a/backend/internal/handler/admin/ops_realtime_handler.go b/backend/internal/handler/admin/ops_realtime_handler.go new file mode 100644 index 00000000..4f15ec57 --- /dev/null +++ b/backend/internal/handler/admin/ops_realtime_handler.go @@ -0,0 +1,213 @@ +package admin + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetConcurrencyStats returns real-time concurrency usage aggregated by platform/group/account. +// GET /api/v1/admin/ops/concurrency +func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + response.Success(c, gin.H{ + "enabled": false, + "platform": map[string]*service.PlatformConcurrencyInfo{}, + "group": map[int64]*service.GroupConcurrencyInfo{}, + "account": map[int64]*service.AccountConcurrencyInfo{}, + "timestamp": time.Now().UTC(), + }) + return + } + + platformFilter := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + platform, group, account, collectedAt, err := h.opsService.GetConcurrencyStats(c.Request.Context(), platformFilter, groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + payload := gin.H{ + "enabled": true, + "platform": platform, + "group": group, + "account": account, + } + if collectedAt != nil { + payload["timestamp"] = collectedAt.UTC() + } + response.Success(c, payload) +} + +// GetAccountAvailability returns account availability statistics. +// GET /api/v1/admin/ops/account-availability +// +// Query params: +// - platform: optional +// - group_id: optional +func (h *OpsHandler) GetAccountAvailability(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + response.Success(c, gin.H{ + "enabled": false, + "platform": map[string]*service.PlatformAvailability{}, + "group": map[int64]*service.GroupAvailability{}, + "account": map[int64]*service.AccountAvailability{}, + "timestamp": time.Now().UTC(), + }) + return + } + + platform := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + platformStats, groupStats, accountStats, collectedAt, err := h.opsService.GetAccountAvailabilityStats(c.Request.Context(), platform, groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + payload := gin.H{ + "enabled": true, + "platform": platformStats, + "group": groupStats, + "account": accountStats, + } + if collectedAt != nil { + payload["timestamp"] = collectedAt.UTC() + } + response.Success(c, payload) +} + +func parseOpsRealtimeWindow(v string) (time.Duration, string, bool) { + switch strings.ToLower(strings.TrimSpace(v)) { + case "", "1min", "1m": + return 1 * time.Minute, "1min", true + case "5min", "5m": + return 5 * time.Minute, "5min", true + case "30min", "30m": + return 30 * time.Minute, "30min", true + case "1h", "60m", "60min": + return 1 * time.Hour, "1h", true + default: + return 0, "", false + } +} + +// GetRealtimeTrafficSummary returns QPS/TPS current/peak/avg for the selected window. +// GET /api/v1/admin/ops/realtime-traffic +// +// Query params: +// - window: 1min|5min|30min|1h (default: 1min) +// - platform: optional +// - group_id: optional +func (h *OpsHandler) GetRealtimeTrafficSummary(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + windowDur, windowLabel, ok := parseOpsRealtimeWindow(c.Query("window")) + if !ok { + response.BadRequest(c, "Invalid window") + return + } + + platform := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + endTime := time.Now().UTC() + startTime := endTime.Add(-windowDur) + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + disabledSummary := &service.OpsRealtimeTrafficSummary{ + Window: windowLabel, + StartTime: startTime, + EndTime: endTime, + Platform: platform, + GroupID: groupID, + QPS: service.OpsRateSummary{}, + TPS: service.OpsRateSummary{}, + } + response.Success(c, gin.H{ + "enabled": false, + "summary": disabledSummary, + "timestamp": endTime, + }) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: platform, + GroupID: groupID, + QueryMode: service.OpsQueryModeRaw, + } + + summary, err := h.opsService.GetRealtimeTrafficSummary(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + if summary != nil { + summary.Window = windowLabel + } + response.Success(c, gin.H{ + "enabled": true, + "summary": summary, + "timestamp": endTime, + }) +} diff --git a/backend/internal/handler/admin/ops_settings_handler.go b/backend/internal/handler/admin/ops_settings_handler.go new file mode 100644 index 00000000..ebc8bf49 --- /dev/null +++ b/backend/internal/handler/admin/ops_settings_handler.go @@ -0,0 +1,194 @@ +package admin + +import ( + "net/http" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetEmailNotificationConfig returns Ops email notification config (DB-backed). +// GET /api/v1/admin/ops/email-notification/config +func (h *OpsHandler) GetEmailNotificationConfig(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetEmailNotificationConfig(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get email notification config") + return + } + response.Success(c, cfg) +} + +// UpdateEmailNotificationConfig updates Ops email notification config (DB-backed). +// PUT /api/v1/admin/ops/email-notification/config +func (h *OpsHandler) UpdateEmailNotificationConfig(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsEmailNotificationConfigUpdateRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateEmailNotificationConfig(c.Request.Context(), &req) + if err != nil { + // Most failures here are validation errors from request payload; treat as 400. + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetAlertRuntimeSettings returns Ops alert evaluator runtime settings (DB-backed). +// GET /api/v1/admin/ops/runtime/alert +func (h *OpsHandler) GetAlertRuntimeSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetOpsAlertRuntimeSettings(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get alert runtime settings") + return + } + response.Success(c, cfg) +} + +// UpdateAlertRuntimeSettings updates Ops alert evaluator runtime settings (DB-backed). +// PUT /api/v1/admin/ops/runtime/alert +func (h *OpsHandler) UpdateAlertRuntimeSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsAlertRuntimeSettings + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateOpsAlertRuntimeSettings(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetAdvancedSettings returns Ops advanced settings (DB-backed). +// GET /api/v1/admin/ops/advanced-settings +func (h *OpsHandler) GetAdvancedSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetOpsAdvancedSettings(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get advanced settings") + return + } + response.Success(c, cfg) +} + +// UpdateAdvancedSettings updates Ops advanced settings (DB-backed). +// PUT /api/v1/admin/ops/advanced-settings +func (h *OpsHandler) UpdateAdvancedSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsAdvancedSettings + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateOpsAdvancedSettings(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetMetricThresholds returns Ops metric thresholds (DB-backed). +// GET /api/v1/admin/ops/settings/metric-thresholds +func (h *OpsHandler) GetMetricThresholds(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetMetricThresholds(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get metric thresholds") + return + } + response.Success(c, cfg) +} + +// UpdateMetricThresholds updates Ops metric thresholds (DB-backed). +// PUT /api/v1/admin/ops/settings/metric-thresholds +func (h *OpsHandler) UpdateMetricThresholds(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsMetricThresholds + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateMetricThresholds(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} diff --git a/backend/internal/handler/admin/ops_ws_handler.go b/backend/internal/handler/admin/ops_ws_handler.go new file mode 100644 index 00000000..db7442e5 --- /dev/null +++ b/backend/internal/handler/admin/ops_ws_handler.go @@ -0,0 +1,771 @@ +package admin + +import ( + "context" + "encoding/json" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" +) + +type OpsWSProxyConfig struct { + TrustProxy bool + TrustedProxies []netip.Prefix + OriginPolicy string +} + +const ( + envOpsWSTrustProxy = "OPS_WS_TRUST_PROXY" + envOpsWSTrustedProxies = "OPS_WS_TRUSTED_PROXIES" + envOpsWSOriginPolicy = "OPS_WS_ORIGIN_POLICY" + envOpsWSMaxConns = "OPS_WS_MAX_CONNS" + envOpsWSMaxConnsPerIP = "OPS_WS_MAX_CONNS_PER_IP" +) + +const ( + OriginPolicyStrict = "strict" + OriginPolicyPermissive = "permissive" +) + +var opsWSProxyConfig = loadOpsWSProxyConfigFromEnv() + +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return isAllowedOpsWSOrigin(r) + }, + // Subprotocol negotiation: + // - The frontend passes ["sub2api-admin", "jwt."]. + // - We always select "sub2api-admin" so the token is never echoed back in the handshake response. + Subprotocols: []string{"sub2api-admin"}, +} + +const ( + qpsWSPushInterval = 2 * time.Second + qpsWSRefreshInterval = 5 * time.Second + qpsWSRequestCountWindow = 1 * time.Minute + + defaultMaxWSConns = 100 + defaultMaxWSConnsPerIP = 20 +) + +var wsConnCount atomic.Int32 +var wsConnCountByIP sync.Map // map[string]*atomic.Int32 + +const qpsWSIdleStopDelay = 30 * time.Second + +const ( + opsWSCloseRealtimeDisabled = 4001 +) + +var qpsWSIdleStopMu sync.Mutex +var qpsWSIdleStopTimer *time.Timer + +func cancelQPSWSIdleStop() { + qpsWSIdleStopMu.Lock() + if qpsWSIdleStopTimer != nil { + qpsWSIdleStopTimer.Stop() + qpsWSIdleStopTimer = nil + } + qpsWSIdleStopMu.Unlock() +} + +func scheduleQPSWSIdleStop() { + qpsWSIdleStopMu.Lock() + if qpsWSIdleStopTimer != nil { + qpsWSIdleStopMu.Unlock() + return + } + qpsWSIdleStopTimer = time.AfterFunc(qpsWSIdleStopDelay, func() { + // Only stop if truly idle at fire time. + if wsConnCount.Load() == 0 { + qpsWSCache.Stop() + } + qpsWSIdleStopMu.Lock() + qpsWSIdleStopTimer = nil + qpsWSIdleStopMu.Unlock() + }) + qpsWSIdleStopMu.Unlock() +} + +type opsWSRuntimeLimits struct { + MaxConns int32 + MaxConnsPerIP int32 +} + +var opsWSLimits = loadOpsWSRuntimeLimitsFromEnv() + +const ( + qpsWSWriteTimeout = 10 * time.Second + qpsWSPongWait = 60 * time.Second + qpsWSPingInterval = 30 * time.Second + + // We don't expect clients to send application messages; we only read to process control frames (Pong/Close). + qpsWSMaxReadBytes = 1024 +) + +type opsWSQPSCache struct { + refreshInterval time.Duration + requestCountWindow time.Duration + + lastUpdatedUnixNano atomic.Int64 + payload atomic.Value // []byte + + opsService *service.OpsService + cancel context.CancelFunc + done chan struct{} + + mu sync.Mutex + running bool +} + +var qpsWSCache = &opsWSQPSCache{ + refreshInterval: qpsWSRefreshInterval, + requestCountWindow: qpsWSRequestCountWindow, +} + +func (c *opsWSQPSCache) start(opsService *service.OpsService) { + if c == nil || opsService == nil { + return + } + + for { + c.mu.Lock() + if c.running { + c.mu.Unlock() + return + } + + // If a previous refresh loop is currently stopping, wait for it to fully exit. + done := c.done + if done != nil { + c.mu.Unlock() + <-done + + c.mu.Lock() + if c.done == done && !c.running { + c.done = nil + } + c.mu.Unlock() + continue + } + + c.opsService = opsService + ctx, cancel := context.WithCancel(context.Background()) + c.cancel = cancel + c.done = make(chan struct{}) + done = c.done + c.running = true + c.mu.Unlock() + + go func() { + defer close(done) + c.refreshLoop(ctx) + }() + return + } +} + +// Stop stops the background refresh loop. +// It is safe to call multiple times. +func (c *opsWSQPSCache) Stop() { + if c == nil { + return + } + + c.mu.Lock() + if !c.running { + done := c.done + c.mu.Unlock() + if done != nil { + <-done + } + return + } + cancel := c.cancel + c.cancel = nil + c.running = false + c.opsService = nil + done := c.done + c.mu.Unlock() + + if cancel != nil { + cancel() + } + if done != nil { + <-done + } + + c.mu.Lock() + if c.done == done && !c.running { + c.done = nil + } + c.mu.Unlock() +} + +func (c *opsWSQPSCache) refreshLoop(ctx context.Context) { + ticker := time.NewTicker(c.refreshInterval) + defer ticker.Stop() + + c.refresh(ctx) + for { + select { + case <-ticker.C: + c.refresh(ctx) + case <-ctx.Done(): + return + } + } +} + +func (c *opsWSQPSCache) refresh(parentCtx context.Context) { + if c == nil { + return + } + + c.mu.Lock() + opsService := c.opsService + c.mu.Unlock() + if opsService == nil { + return + } + + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) + defer cancel() + + now := time.Now().UTC() + stats, err := opsService.GetWindowStats(ctx, now.Add(-c.requestCountWindow), now) + if err != nil || stats == nil { + if err != nil { + log.Printf("[OpsWS] refresh: get window stats failed: %v", err) + } + return + } + + requestCount := stats.SuccessCount + stats.ErrorCountTotal + qps := 0.0 + tps := 0.0 + if c.requestCountWindow > 0 { + seconds := c.requestCountWindow.Seconds() + qps = roundTo1DP(float64(requestCount) / seconds) + tps = roundTo1DP(float64(stats.TokenConsumed) / seconds) + } + + payload := gin.H{ + "type": "qps_update", + "timestamp": now.Format(time.RFC3339), + "data": gin.H{ + "qps": qps, + "tps": tps, + "request_count": requestCount, + }, + } + + msg, err := json.Marshal(payload) + if err != nil { + log.Printf("[OpsWS] refresh: marshal payload failed: %v", err) + return + } + + c.payload.Store(msg) + c.lastUpdatedUnixNano.Store(now.UnixNano()) +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func (c *opsWSQPSCache) getPayload() []byte { + if c == nil { + return nil + } + if cached, ok := c.payload.Load().([]byte); ok && cached != nil { + return cached + } + return nil +} + +func closeWS(conn *websocket.Conn, code int, reason string) { + if conn == nil { + return + } + msg := websocket.FormatCloseMessage(code, reason) + _ = conn.WriteControl(websocket.CloseMessage, msg, time.Now().Add(qpsWSWriteTimeout)) + _ = conn.Close() +} + +// QPSWSHandler handles realtime QPS push via WebSocket. +// GET /api/v1/admin/ops/ws/qps +func (h *OpsHandler) QPSWSHandler(c *gin.Context) { + clientIP := requestClientIP(c.Request) + + if h == nil || h.opsService == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "ops service not initialized"}) + return + } + + // If realtime monitoring is disabled, prefer a successful WS upgrade followed by a clean close + // with a deterministic close code. This prevents clients from spinning on 404/1006 reconnect loops. + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "ops realtime monitoring is disabled"}) + return + } + closeWS(conn, opsWSCloseRealtimeDisabled, "realtime_disabled") + return + } + + cancelQPSWSIdleStop() + // Lazily start the background refresh loop so unit tests that never hit the + // websocket route don't spawn goroutines that depend on DB/Redis stubs. + qpsWSCache.start(h.opsService) + + // Reserve a global slot before upgrading the connection to keep the limit strict. + if !tryAcquireOpsWSTotalSlot(opsWSLimits.MaxConns) { + log.Printf("[OpsWS] connection limit reached: %d/%d", wsConnCount.Load(), opsWSLimits.MaxConns) + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"}) + return + } + defer func() { + if wsConnCount.Add(-1) == 0 { + scheduleQPSWSIdleStop() + } + }() + + if opsWSLimits.MaxConnsPerIP > 0 && clientIP != "" { + if !tryAcquireOpsWSIPSlot(clientIP, opsWSLimits.MaxConnsPerIP) { + log.Printf("[OpsWS] per-ip connection limit reached: ip=%s limit=%d", clientIP, opsWSLimits.MaxConnsPerIP) + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"}) + return + } + defer releaseOpsWSIPSlot(clientIP) + } + + conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + log.Printf("[OpsWS] upgrade failed: %v", err) + return + } + + defer func() { + _ = conn.Close() + }() + + handleQPSWebSocket(c.Request.Context(), conn) +} + +func tryAcquireOpsWSTotalSlot(limit int32) bool { + if limit <= 0 { + return true + } + for { + current := wsConnCount.Load() + if current >= limit { + return false + } + if wsConnCount.CompareAndSwap(current, current+1) { + return true + } + } +} + +func tryAcquireOpsWSIPSlot(clientIP string, limit int32) bool { + if strings.TrimSpace(clientIP) == "" || limit <= 0 { + return true + } + + v, _ := wsConnCountByIP.LoadOrStore(clientIP, &atomic.Int32{}) + counter, ok := v.(*atomic.Int32) + if !ok { + return false + } + + for { + current := counter.Load() + if current >= limit { + return false + } + if counter.CompareAndSwap(current, current+1) { + return true + } + } +} + +func releaseOpsWSIPSlot(clientIP string) { + if strings.TrimSpace(clientIP) == "" { + return + } + + v, ok := wsConnCountByIP.Load(clientIP) + if !ok { + return + } + counter, ok := v.(*atomic.Int32) + if !ok { + return + } + next := counter.Add(-1) + if next <= 0 { + // Best-effort cleanup; safe even if a new slot was acquired concurrently. + wsConnCountByIP.Delete(clientIP) + } +} + +func handleQPSWebSocket(parentCtx context.Context, conn *websocket.Conn) { + if conn == nil { + return + } + + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + var closeOnce sync.Once + closeConn := func() { + closeOnce.Do(func() { + _ = conn.Close() + }) + } + + closeFrameCh := make(chan []byte, 1) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + + conn.SetReadLimit(qpsWSMaxReadBytes) + if err := conn.SetReadDeadline(time.Now().Add(qpsWSPongWait)); err != nil { + log.Printf("[OpsWS] set read deadline failed: %v", err) + return + } + conn.SetPongHandler(func(string) error { + return conn.SetReadDeadline(time.Now().Add(qpsWSPongWait)) + }) + conn.SetCloseHandler(func(code int, text string) error { + select { + case closeFrameCh <- websocket.FormatCloseMessage(code, text): + default: + } + cancel() + return nil + }) + + for { + _, _, err := conn.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { + log.Printf("[OpsWS] read failed: %v", err) + } + return + } + } + }() + + // Push QPS data every 2 seconds (values are globally cached and refreshed at most once per qpsWSRefreshInterval). + pushTicker := time.NewTicker(qpsWSPushInterval) + defer pushTicker.Stop() + + // Heartbeat ping every 30 seconds. + pingTicker := time.NewTicker(qpsWSPingInterval) + defer pingTicker.Stop() + + writeWithTimeout := func(messageType int, data []byte) error { + if err := conn.SetWriteDeadline(time.Now().Add(qpsWSWriteTimeout)); err != nil { + return err + } + return conn.WriteMessage(messageType, data) + } + + sendClose := func(closeFrame []byte) { + if closeFrame == nil { + closeFrame = websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + } + _ = writeWithTimeout(websocket.CloseMessage, closeFrame) + } + + for { + select { + case <-pushTicker.C: + msg := qpsWSCache.getPayload() + if msg == nil { + continue + } + if err := writeWithTimeout(websocket.TextMessage, msg); err != nil { + log.Printf("[OpsWS] write failed: %v", err) + cancel() + closeConn() + wg.Wait() + return + } + + case <-pingTicker.C: + if err := writeWithTimeout(websocket.PingMessage, nil); err != nil { + log.Printf("[OpsWS] ping failed: %v", err) + cancel() + closeConn() + wg.Wait() + return + } + + case closeFrame := <-closeFrameCh: + sendClose(closeFrame) + closeConn() + wg.Wait() + return + + case <-ctx.Done(): + var closeFrame []byte + select { + case closeFrame = <-closeFrameCh: + default: + } + sendClose(closeFrame) + + closeConn() + wg.Wait() + return + } + } +} + +func isAllowedOpsWSOrigin(r *http.Request) bool { + if r == nil { + return false + } + origin := strings.TrimSpace(r.Header.Get("Origin")) + if origin == "" { + switch strings.ToLower(strings.TrimSpace(opsWSProxyConfig.OriginPolicy)) { + case OriginPolicyStrict: + return false + case OriginPolicyPermissive, "": + return true + default: + return true + } + } + parsed, err := url.Parse(origin) + if err != nil || parsed.Hostname() == "" { + return false + } + originHost := strings.ToLower(parsed.Hostname()) + + trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r) + reqHost := hostWithoutPort(r.Host) + if trustProxyHeaders { + xfHost := strings.TrimSpace(r.Header.Get("X-Forwarded-Host")) + if xfHost != "" { + xfHost = strings.TrimSpace(strings.Split(xfHost, ",")[0]) + if xfHost != "" { + reqHost = hostWithoutPort(xfHost) + } + } + } + reqHost = strings.ToLower(reqHost) + if reqHost == "" { + return false + } + return originHost == reqHost +} + +func shouldTrustOpsWSProxyHeaders(r *http.Request) bool { + if r == nil { + return false + } + if !opsWSProxyConfig.TrustProxy { + return false + } + peerIP, ok := requestPeerIP(r) + if !ok { + return false + } + return isAddrInTrustedProxies(peerIP, opsWSProxyConfig.TrustedProxies) +} + +func requestPeerIP(r *http.Request) (netip.Addr, bool) { + if r == nil { + return netip.Addr{}, false + } + host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)) + if err != nil { + host = strings.TrimSpace(r.RemoteAddr) + } + host = strings.TrimPrefix(host, "[") + host = strings.TrimSuffix(host, "]") + if host == "" { + return netip.Addr{}, false + } + addr, err := netip.ParseAddr(host) + if err != nil { + return netip.Addr{}, false + } + return addr.Unmap(), true +} + +func requestClientIP(r *http.Request) string { + if r == nil { + return "" + } + + trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r) + if trustProxyHeaders { + xff := strings.TrimSpace(r.Header.Get("X-Forwarded-For")) + if xff != "" { + // Use the left-most entry (original client). If multiple proxies add values, they are comma-separated. + xff = strings.TrimSpace(strings.Split(xff, ",")[0]) + xff = strings.TrimPrefix(xff, "[") + xff = strings.TrimSuffix(xff, "]") + if addr, err := netip.ParseAddr(xff); err == nil && addr.IsValid() { + return addr.Unmap().String() + } + } + } + + if peer, ok := requestPeerIP(r); ok && peer.IsValid() { + return peer.String() + } + return "" +} + +func isAddrInTrustedProxies(addr netip.Addr, trusted []netip.Prefix) bool { + if !addr.IsValid() { + return false + } + for _, p := range trusted { + if p.Contains(addr) { + return true + } + } + return false +} + +func loadOpsWSProxyConfigFromEnv() OpsWSProxyConfig { + cfg := OpsWSProxyConfig{ + TrustProxy: true, + TrustedProxies: defaultTrustedProxies(), + OriginPolicy: OriginPolicyPermissive, + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSTrustProxy)); v != "" { + if parsed, err := strconv.ParseBool(v); err == nil { + cfg.TrustProxy = parsed + } else { + log.Printf("[OpsWS] invalid %s=%q (expected bool); using default=%v", envOpsWSTrustProxy, v, cfg.TrustProxy) + } + } + + if raw := strings.TrimSpace(os.Getenv(envOpsWSTrustedProxies)); raw != "" { + prefixes, invalid := parseTrustedProxyList(raw) + if len(invalid) > 0 { + log.Printf("[OpsWS] invalid %s entries ignored: %s", envOpsWSTrustedProxies, strings.Join(invalid, ", ")) + } + cfg.TrustedProxies = prefixes + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSOriginPolicy)); v != "" { + normalized := strings.ToLower(v) + switch normalized { + case OriginPolicyStrict, OriginPolicyPermissive: + cfg.OriginPolicy = normalized + default: + log.Printf("[OpsWS] invalid %s=%q (expected %q or %q); using default=%q", envOpsWSOriginPolicy, v, OriginPolicyStrict, OriginPolicyPermissive, cfg.OriginPolicy) + } + } + + return cfg +} + +func loadOpsWSRuntimeLimitsFromEnv() opsWSRuntimeLimits { + cfg := opsWSRuntimeLimits{ + MaxConns: defaultMaxWSConns, + MaxConnsPerIP: defaultMaxWSConnsPerIP, + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConns)); v != "" { + if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 { + cfg.MaxConns = int32(parsed) + } else { + log.Printf("[OpsWS] invalid %s=%q (expected int>0); using default=%d", envOpsWSMaxConns, v, cfg.MaxConns) + } + } + if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConnsPerIP)); v != "" { + if parsed, err := strconv.Atoi(v); err == nil && parsed >= 0 { + cfg.MaxConnsPerIP = int32(parsed) + } else { + log.Printf("[OpsWS] invalid %s=%q (expected int>=0); using default=%d", envOpsWSMaxConnsPerIP, v, cfg.MaxConnsPerIP) + } + } + return cfg +} + +func defaultTrustedProxies() []netip.Prefix { + prefixes, _ := parseTrustedProxyList("127.0.0.0/8,::1/128") + return prefixes +} + +func parseTrustedProxyList(raw string) (prefixes []netip.Prefix, invalid []string) { + for _, token := range strings.Split(raw, ",") { + item := strings.TrimSpace(token) + if item == "" { + continue + } + + var ( + p netip.Prefix + err error + ) + if strings.Contains(item, "/") { + p, err = netip.ParsePrefix(item) + } else { + var addr netip.Addr + addr, err = netip.ParseAddr(item) + if err == nil { + addr = addr.Unmap() + bits := 128 + if addr.Is4() { + bits = 32 + } + p = netip.PrefixFrom(addr, bits) + } + } + + if err != nil || !p.IsValid() { + invalid = append(invalid, item) + continue + } + + prefixes = append(prefixes, p.Masked()) + } + return prefixes, invalid +} + +func hostWithoutPort(hostport string) string { + hostport = strings.TrimSpace(hostport) + if hostport == "" { + return "" + } + if host, _, err := net.SplitHostPort(hostport); err == nil { + return host + } + if strings.HasPrefix(hostport, "[") && strings.HasSuffix(hostport, "]") { + return strings.Trim(hostport, "[]") + } + parts := strings.Split(hostport, ":") + return parts[0] +} diff --git a/backend/internal/handler/admin/promo_handler.go b/backend/internal/handler/admin/promo_handler.go new file mode 100644 index 00000000..3eafa380 --- /dev/null +++ b/backend/internal/handler/admin/promo_handler.go @@ -0,0 +1,209 @@ +package admin + +import ( + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// PromoHandler handles admin promo code management +type PromoHandler struct { + promoService *service.PromoService +} + +// NewPromoHandler creates a new admin promo handler +func NewPromoHandler(promoService *service.PromoService) *PromoHandler { + return &PromoHandler{ + promoService: promoService, + } +} + +// CreatePromoCodeRequest represents create promo code request +type CreatePromoCodeRequest struct { + Code string `json:"code"` // 可选,为空则自动生成 + BonusAmount float64 `json:"bonus_amount" binding:"required,min=0"` // 赠送余额 + MaxUses int `json:"max_uses" binding:"min=0"` // 最大使用次数,0=无限 + ExpiresAt *int64 `json:"expires_at"` // 过期时间戳(秒) + Notes string `json:"notes"` // 备注 +} + +// UpdatePromoCodeRequest represents update promo code request +type UpdatePromoCodeRequest struct { + Code *string `json:"code"` + BonusAmount *float64 `json:"bonus_amount" binding:"omitempty,min=0"` + MaxUses *int `json:"max_uses" binding:"omitempty,min=0"` + Status *string `json:"status" binding:"omitempty,oneof=active disabled"` + ExpiresAt *int64 `json:"expires_at"` + Notes *string `json:"notes"` +} + +// List handles listing all promo codes with pagination +// GET /api/v1/admin/promo-codes +func (h *PromoHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + status := c.Query("status") + search := strings.TrimSpace(c.Query("search")) + if len(search) > 100 { + search = search[:100] + } + + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + codes, paginationResult, err := h.promoService.List(c.Request.Context(), params, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.PromoCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.PromoCodeFromService(&codes[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} + +// GetByID handles getting a promo code by ID +// GET /api/v1/admin/promo-codes/:id +func (h *PromoHandler) GetByID(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + code, err := h.promoService.GetByID(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Create handles creating a new promo code +// POST /api/v1/admin/promo-codes +func (h *PromoHandler) Create(c *gin.Context) { + var req CreatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := &service.CreatePromoCodeInput{ + Code: req.Code, + BonusAmount: req.BonusAmount, + MaxUses: req.MaxUses, + Notes: req.Notes, + } + + if req.ExpiresAt != nil { + t := time.Unix(*req.ExpiresAt, 0) + input.ExpiresAt = &t + } + + code, err := h.promoService.Create(c.Request.Context(), input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Update handles updating a promo code +// PUT /api/v1/admin/promo-codes/:id +func (h *PromoHandler) Update(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + var req UpdatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := &service.UpdatePromoCodeInput{ + Code: req.Code, + BonusAmount: req.BonusAmount, + MaxUses: req.MaxUses, + Status: req.Status, + Notes: req.Notes, + } + + if req.ExpiresAt != nil { + if *req.ExpiresAt == 0 { + // 0 表示清除过期时间 + input.ExpiresAt = nil + } else { + t := time.Unix(*req.ExpiresAt, 0) + input.ExpiresAt = &t + } + } + + code, err := h.promoService.Update(c.Request.Context(), codeID, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Delete handles deleting a promo code +// DELETE /api/v1/admin/promo-codes/:id +func (h *PromoHandler) Delete(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + err = h.promoService.Delete(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Promo code deleted successfully"}) +} + +// GetUsages handles getting usage records for a promo code +// GET /api/v1/admin/promo-codes/:id/usages +func (h *PromoHandler) GetUsages(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + page, pageSize := response.ParsePagination(c) + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + usages, paginationResult, err := h.promoService.ListUsages(c.Request.Context(), codeID, params) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.PromoCodeUsage, 0, len(usages)) + for i := range usages { + out = append(out, *dto.PromoCodeUsageFromService(&usages[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} diff --git a/backend/internal/handler/admin/proxy_handler.go b/backend/internal/handler/admin/proxy_handler.go new file mode 100644 index 00000000..a6758f69 --- /dev/null +++ b/backend/internal/handler/admin/proxy_handler.go @@ -0,0 +1,348 @@ +package admin + +import ( + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// ProxyHandler handles admin proxy management +type ProxyHandler struct { + adminService service.AdminService +} + +// NewProxyHandler creates a new admin proxy handler +func NewProxyHandler(adminService service.AdminService) *ProxyHandler { + return &ProxyHandler{ + adminService: adminService, + } +} + +// CreateProxyRequest represents create proxy request +type CreateProxyRequest struct { + Name string `json:"name" binding:"required"` + Protocol string `json:"protocol" binding:"required,oneof=http https socks5 socks5h"` + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required,min=1,max=65535"` + Username string `json:"username"` + Password string `json:"password"` +} + +// UpdateProxyRequest represents update proxy request +type UpdateProxyRequest struct { + Name string `json:"name"` + Protocol string `json:"protocol" binding:"omitempty,oneof=http https socks5 socks5h"` + Host string `json:"host"` + Port int `json:"port" binding:"omitempty,min=1,max=65535"` + Username string `json:"username"` + Password string `json:"password"` + Status string `json:"status" binding:"omitempty,oneof=active inactive"` +} + +// List handles listing all proxies with pagination +// GET /api/v1/admin/proxies +func (h *ProxyHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + protocol := c.Query("protocol") + status := c.Query("status") + search := c.Query("search") + // 标准化和验证 search 参数 + search = strings.TrimSpace(search) + if len(search) > 100 { + search = search[:100] + } + + proxies, total, err := h.adminService.ListProxiesWithAccountCount(c.Request.Context(), page, pageSize, protocol, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.ProxyWithAccountCount, 0, len(proxies)) + for i := range proxies { + out = append(out, *dto.ProxyWithAccountCountFromService(&proxies[i])) + } + response.Paginated(c, out, total, page, pageSize) +} + +// GetAll handles getting all active proxies without pagination +// GET /api/v1/admin/proxies/all +// Optional query param: with_count=true to include account count per proxy +func (h *ProxyHandler) GetAll(c *gin.Context) { + withCount := c.Query("with_count") == "true" + + if withCount { + proxies, err := h.adminService.GetAllProxiesWithAccountCount(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + out := make([]dto.ProxyWithAccountCount, 0, len(proxies)) + for i := range proxies { + out = append(out, *dto.ProxyWithAccountCountFromService(&proxies[i])) + } + response.Success(c, out) + return + } + + proxies, err := h.adminService.GetAllProxies(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.Proxy, 0, len(proxies)) + for i := range proxies { + out = append(out, *dto.ProxyFromService(&proxies[i])) + } + response.Success(c, out) +} + +// GetByID handles getting a proxy by ID +// GET /api/v1/admin/proxies/:id +func (h *ProxyHandler) GetByID(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + proxy, err := h.adminService.GetProxy(c.Request.Context(), proxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.ProxyFromService(proxy)) +} + +// Create handles creating a new proxy +// POST /api/v1/admin/proxies +func (h *ProxyHandler) Create(c *gin.Context) { + var req CreateProxyRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + proxy, err := h.adminService.CreateProxy(c.Request.Context(), &service.CreateProxyInput{ + Name: strings.TrimSpace(req.Name), + Protocol: strings.TrimSpace(req.Protocol), + Host: strings.TrimSpace(req.Host), + Port: req.Port, + Username: strings.TrimSpace(req.Username), + Password: strings.TrimSpace(req.Password), + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.ProxyFromService(proxy)) +} + +// Update handles updating a proxy +// PUT /api/v1/admin/proxies/:id +func (h *ProxyHandler) Update(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + var req UpdateProxyRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + proxy, err := h.adminService.UpdateProxy(c.Request.Context(), proxyID, &service.UpdateProxyInput{ + Name: strings.TrimSpace(req.Name), + Protocol: strings.TrimSpace(req.Protocol), + Host: strings.TrimSpace(req.Host), + Port: req.Port, + Username: strings.TrimSpace(req.Username), + Password: strings.TrimSpace(req.Password), + Status: strings.TrimSpace(req.Status), + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.ProxyFromService(proxy)) +} + +// Delete handles deleting a proxy +// DELETE /api/v1/admin/proxies/:id +func (h *ProxyHandler) Delete(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + err = h.adminService.DeleteProxy(c.Request.Context(), proxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Proxy deleted successfully"}) +} + +// BatchDelete handles batch deleting proxies +// POST /api/v1/admin/proxies/batch-delete +func (h *ProxyHandler) BatchDelete(c *gin.Context) { + type BatchDeleteRequest struct { + IDs []int64 `json:"ids" binding:"required,min=1"` + } + + var req BatchDeleteRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + result, err := h.adminService.BatchDeleteProxies(c.Request.Context(), req.IDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// Test handles testing proxy connectivity +// POST /api/v1/admin/proxies/:id/test +func (h *ProxyHandler) Test(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + result, err := h.adminService.TestProxy(c.Request.Context(), proxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// GetStats handles getting proxy statistics +// GET /api/v1/admin/proxies/:id/stats +func (h *ProxyHandler) GetStats(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + // Return mock data for now + _ = proxyID + response.Success(c, gin.H{ + "total_accounts": 0, + "active_accounts": 0, + "total_requests": 0, + "success_rate": 100.0, + "average_latency": 0, + }) +} + +// GetProxyAccounts handles getting accounts using a proxy +// GET /api/v1/admin/proxies/:id/accounts +func (h *ProxyHandler) GetProxyAccounts(c *gin.Context) { + proxyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid proxy ID") + return + } + + accounts, err := h.adminService.GetProxyAccounts(c.Request.Context(), proxyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.ProxyAccountSummary, 0, len(accounts)) + for i := range accounts { + out = append(out, *dto.ProxyAccountSummaryFromService(&accounts[i])) + } + response.Success(c, out) +} + +// BatchCreateProxyItem represents a single proxy in batch create request +type BatchCreateProxyItem struct { + Protocol string `json:"protocol" binding:"required,oneof=http https socks5 socks5h"` + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required,min=1,max=65535"` + Username string `json:"username"` + Password string `json:"password"` +} + +// BatchCreateRequest represents batch create proxies request +type BatchCreateRequest struct { + Proxies []BatchCreateProxyItem `json:"proxies" binding:"required,min=1"` +} + +// BatchCreate handles batch creating proxies +// POST /api/v1/admin/proxies/batch +func (h *ProxyHandler) BatchCreate(c *gin.Context) { + var req BatchCreateRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + created := 0 + skipped := 0 + + for _, item := range req.Proxies { + // Trim all string fields + host := strings.TrimSpace(item.Host) + protocol := strings.TrimSpace(item.Protocol) + username := strings.TrimSpace(item.Username) + password := strings.TrimSpace(item.Password) + + // Check for duplicates (same host, port, username, password) + exists, err := h.adminService.CheckProxyExists(c.Request.Context(), host, item.Port, username, password) + if err != nil { + response.ErrorFrom(c, err) + return + } + + if exists { + skipped++ + continue + } + + // Create proxy with default name + _, err = h.adminService.CreateProxy(c.Request.Context(), &service.CreateProxyInput{ + Name: "default", + Protocol: protocol, + Host: host, + Port: item.Port, + Username: username, + Password: password, + }) + if err != nil { + // If creation fails due to duplicate, count as skipped + skipped++ + continue + } + + created++ + } + + response.Success(c, gin.H{ + "created": created, + "skipped": skipped, + }) +} diff --git a/backend/internal/handler/admin/redeem_handler.go b/backend/internal/handler/admin/redeem_handler.go new file mode 100644 index 00000000..5b3229b6 --- /dev/null +++ b/backend/internal/handler/admin/redeem_handler.go @@ -0,0 +1,244 @@ +package admin + +import ( + "bytes" + "encoding/csv" + "fmt" + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// RedeemHandler handles admin redeem code management +type RedeemHandler struct { + adminService service.AdminService +} + +// NewRedeemHandler creates a new admin redeem handler +func NewRedeemHandler(adminService service.AdminService) *RedeemHandler { + return &RedeemHandler{ + adminService: adminService, + } +} + +// GenerateRedeemCodesRequest represents generate redeem codes request +type GenerateRedeemCodesRequest struct { + Count int `json:"count" binding:"required,min=1,max=100"` + Type string `json:"type" binding:"required,oneof=balance concurrency subscription"` + Value float64 `json:"value" binding:"min=0"` + GroupID *int64 `json:"group_id"` // 订阅类型必填 + ValidityDays int `json:"validity_days" binding:"omitempty,max=36500"` // 订阅类型使用,默认30天,最大100年 +} + +// List handles listing all redeem codes with pagination +// GET /api/v1/admin/redeem-codes +func (h *RedeemHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + codeType := c.Query("type") + status := c.Query("status") + search := c.Query("search") + // 标准化和验证 search 参数 + search = strings.TrimSpace(search) + if len(search) > 100 { + search = search[:100] + } + + codes, total, err := h.adminService.ListRedeemCodes(c.Request.Context(), page, pageSize, codeType, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.RedeemCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.RedeemCodeFromService(&codes[i])) + } + response.Paginated(c, out, total, page, pageSize) +} + +// GetByID handles getting a redeem code by ID +// GET /api/v1/admin/redeem-codes/:id +func (h *RedeemHandler) GetByID(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid redeem code ID") + return + } + + code, err := h.adminService.GetRedeemCode(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.RedeemCodeFromService(code)) +} + +// Generate handles generating new redeem codes +// POST /api/v1/admin/redeem-codes/generate +func (h *RedeemHandler) Generate(c *gin.Context) { + var req GenerateRedeemCodesRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + codes, err := h.adminService.GenerateRedeemCodes(c.Request.Context(), &service.GenerateRedeemCodesInput{ + Count: req.Count, + Type: req.Type, + Value: req.Value, + GroupID: req.GroupID, + ValidityDays: req.ValidityDays, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.RedeemCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.RedeemCodeFromService(&codes[i])) + } + response.Success(c, out) +} + +// Delete handles deleting a redeem code +// DELETE /api/v1/admin/redeem-codes/:id +func (h *RedeemHandler) Delete(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid redeem code ID") + return + } + + err = h.adminService.DeleteRedeemCode(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Redeem code deleted successfully"}) +} + +// BatchDelete handles batch deleting redeem codes +// POST /api/v1/admin/redeem-codes/batch-delete +func (h *RedeemHandler) BatchDelete(c *gin.Context) { + var req struct { + IDs []int64 `json:"ids" binding:"required,min=1"` + } + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + deleted, err := h.adminService.BatchDeleteRedeemCodes(c.Request.Context(), req.IDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{ + "deleted": deleted, + "message": "Redeem codes deleted successfully", + }) +} + +// Expire handles expiring a redeem code +// POST /api/v1/admin/redeem-codes/:id/expire +func (h *RedeemHandler) Expire(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid redeem code ID") + return + } + + code, err := h.adminService.ExpireRedeemCode(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.RedeemCodeFromService(code)) +} + +// GetStats handles getting redeem code statistics +// GET /api/v1/admin/redeem-codes/stats +func (h *RedeemHandler) GetStats(c *gin.Context) { + // Return mock data for now + response.Success(c, gin.H{ + "total_codes": 0, + "active_codes": 0, + "used_codes": 0, + "expired_codes": 0, + "total_value_distributed": 0.0, + "by_type": gin.H{ + "balance": 0, + "concurrency": 0, + "trial": 0, + }, + }) +} + +// Export handles exporting redeem codes to CSV +// GET /api/v1/admin/redeem-codes/export +func (h *RedeemHandler) Export(c *gin.Context) { + codeType := c.Query("type") + status := c.Query("status") + + // Get all codes without pagination (use large page size) + codes, _, err := h.adminService.ListRedeemCodes(c.Request.Context(), 1, 10000, codeType, status, "") + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Create CSV buffer + var buf bytes.Buffer + writer := csv.NewWriter(&buf) + + // Write header + if err := writer.Write([]string{"id", "code", "type", "value", "status", "used_by", "used_at", "created_at"}); err != nil { + response.InternalError(c, "Failed to export redeem codes: "+err.Error()) + return + } + + // Write data rows + for _, code := range codes { + usedBy := "" + if code.UsedBy != nil { + usedBy = fmt.Sprintf("%d", *code.UsedBy) + } + usedAt := "" + if code.UsedAt != nil { + usedAt = code.UsedAt.Format("2006-01-02 15:04:05") + } + if err := writer.Write([]string{ + fmt.Sprintf("%d", code.ID), + code.Code, + code.Type, + fmt.Sprintf("%.2f", code.Value), + code.Status, + usedBy, + usedAt, + code.CreatedAt.Format("2006-01-02 15:04:05"), + }); err != nil { + response.InternalError(c, "Failed to export redeem codes: "+err.Error()) + return + } + } + + writer.Flush() + if err := writer.Error(); err != nil { + response.InternalError(c, "Failed to export redeem codes: "+err.Error()) + return + } + + c.Header("Content-Type", "text/csv") + c.Header("Content-Disposition", "attachment; filename=redeem_codes.csv") + c.Data(200, "text/csv", buf.Bytes()) +} diff --git a/backend/internal/handler/admin/setting_handler.go b/backend/internal/handler/admin/setting_handler.go new file mode 100644 index 00000000..6666ce4e --- /dev/null +++ b/backend/internal/handler/admin/setting_handler.go @@ -0,0 +1,721 @@ +package admin + +import ( + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// SettingHandler 系统设置处理器 +type SettingHandler struct { + settingService *service.SettingService + emailService *service.EmailService + turnstileService *service.TurnstileService + opsService *service.OpsService +} + +// NewSettingHandler 创建系统设置处理器 +func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService, opsService *service.OpsService) *SettingHandler { + return &SettingHandler{ + settingService: settingService, + emailService: emailService, + turnstileService: turnstileService, + opsService: opsService, + } +} + +// GetSettings 获取所有系统设置 +// GET /api/v1/admin/settings +func (h *SettingHandler) GetSettings(c *gin.Context) { + settings, err := h.settingService.GetAllSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Check if ops monitoring is enabled (respects config.ops.enabled) + opsEnabled := h.opsService != nil && h.opsService.IsMonitoringEnabled(c.Request.Context()) + + response.Success(c, dto.SystemSettings{ + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + SMTPHost: settings.SMTPHost, + SMTPPort: settings.SMTPPort, + SMTPUsername: settings.SMTPUsername, + SMTPPasswordConfigured: settings.SMTPPasswordConfigured, + SMTPFrom: settings.SMTPFrom, + SMTPFromName: settings.SMTPFromName, + SMTPUseTLS: settings.SMTPUseTLS, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + TurnstileSecretKeyConfigured: settings.TurnstileSecretKeyConfigured, + LinuxDoConnectEnabled: settings.LinuxDoConnectEnabled, + LinuxDoConnectClientID: settings.LinuxDoConnectClientID, + LinuxDoConnectClientSecretConfigured: settings.LinuxDoConnectClientSecretConfigured, + LinuxDoConnectRedirectURL: settings.LinuxDoConnectRedirectURL, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + DefaultConcurrency: settings.DefaultConcurrency, + DefaultBalance: settings.DefaultBalance, + EnableModelFallback: settings.EnableModelFallback, + FallbackModelAnthropic: settings.FallbackModelAnthropic, + FallbackModelOpenAI: settings.FallbackModelOpenAI, + FallbackModelGemini: settings.FallbackModelGemini, + FallbackModelAntigravity: settings.FallbackModelAntigravity, + EnableIdentityPatch: settings.EnableIdentityPatch, + IdentityPatchPrompt: settings.IdentityPatchPrompt, + OpsMonitoringEnabled: opsEnabled && settings.OpsMonitoringEnabled, + OpsRealtimeMonitoringEnabled: settings.OpsRealtimeMonitoringEnabled, + OpsQueryModeDefault: settings.OpsQueryModeDefault, + OpsMetricsIntervalSeconds: settings.OpsMetricsIntervalSeconds, + }) +} + +// UpdateSettingsRequest 更新设置请求 +type UpdateSettingsRequest struct { + // 注册设置 + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + + // 邮件服务设置 + SMTPHost string `json:"smtp_host"` + SMTPPort int `json:"smtp_port"` + SMTPUsername string `json:"smtp_username"` + SMTPPassword string `json:"smtp_password"` + SMTPFrom string `json:"smtp_from_email"` + SMTPFromName string `json:"smtp_from_name"` + SMTPUseTLS bool `json:"smtp_use_tls"` + + // Cloudflare Turnstile 设置 + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecretKey string `json:"turnstile_secret_key"` + + // LinuxDo Connect OAuth 登录 + LinuxDoConnectEnabled bool `json:"linuxdo_connect_enabled"` + LinuxDoConnectClientID string `json:"linuxdo_connect_client_id"` + LinuxDoConnectClientSecret string `json:"linuxdo_connect_client_secret"` + LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"` + + // OEM设置 + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + + // 默认配置 + DefaultConcurrency int `json:"default_concurrency"` + DefaultBalance float64 `json:"default_balance"` + + // Model fallback configuration + EnableModelFallback bool `json:"enable_model_fallback"` + FallbackModelAnthropic string `json:"fallback_model_anthropic"` + FallbackModelOpenAI string `json:"fallback_model_openai"` + FallbackModelGemini string `json:"fallback_model_gemini"` + FallbackModelAntigravity string `json:"fallback_model_antigravity"` + + // Identity patch configuration (Claude -> Gemini) + EnableIdentityPatch bool `json:"enable_identity_patch"` + IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled *bool `json:"ops_monitoring_enabled"` + OpsRealtimeMonitoringEnabled *bool `json:"ops_realtime_monitoring_enabled"` + OpsQueryModeDefault *string `json:"ops_query_mode_default"` + OpsMetricsIntervalSeconds *int `json:"ops_metrics_interval_seconds"` +} + +// UpdateSettings 更新系统设置 +// PUT /api/v1/admin/settings +func (h *SettingHandler) UpdateSettings(c *gin.Context) { + var req UpdateSettingsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + previousSettings, err := h.settingService.GetAllSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 验证参数 + if req.DefaultConcurrency < 1 { + req.DefaultConcurrency = 1 + } + if req.DefaultBalance < 0 { + req.DefaultBalance = 0 + } + if req.SMTPPort <= 0 { + req.SMTPPort = 587 + } + + // Turnstile 参数验证 + if req.TurnstileEnabled { + // 检查必填字段 + if req.TurnstileSiteKey == "" { + response.BadRequest(c, "Turnstile Site Key is required when enabled") + return + } + // 如果未提供 secret key,使用已保存的值(留空保留当前值) + if req.TurnstileSecretKey == "" { + if previousSettings.TurnstileSecretKey == "" { + response.BadRequest(c, "Turnstile Secret Key is required when enabled") + return + } + req.TurnstileSecretKey = previousSettings.TurnstileSecretKey + } + + // 当 site_key 或 secret_key 任一变化时验证(避免配置错误导致无法登录) + siteKeyChanged := previousSettings.TurnstileSiteKey != req.TurnstileSiteKey + secretKeyChanged := previousSettings.TurnstileSecretKey != req.TurnstileSecretKey + if siteKeyChanged || secretKeyChanged { + if err := h.turnstileService.ValidateSecretKey(c.Request.Context(), req.TurnstileSecretKey); err != nil { + response.ErrorFrom(c, err) + return + } + } + } + + // LinuxDo Connect 参数验证 + if req.LinuxDoConnectEnabled { + req.LinuxDoConnectClientID = strings.TrimSpace(req.LinuxDoConnectClientID) + req.LinuxDoConnectClientSecret = strings.TrimSpace(req.LinuxDoConnectClientSecret) + req.LinuxDoConnectRedirectURL = strings.TrimSpace(req.LinuxDoConnectRedirectURL) + + if req.LinuxDoConnectClientID == "" { + response.BadRequest(c, "LinuxDo Client ID is required when enabled") + return + } + if req.LinuxDoConnectRedirectURL == "" { + response.BadRequest(c, "LinuxDo Redirect URL is required when enabled") + return + } + if err := config.ValidateAbsoluteHTTPURL(req.LinuxDoConnectRedirectURL); err != nil { + response.BadRequest(c, "LinuxDo Redirect URL must be an absolute http(s) URL") + return + } + + // 如果未提供 client_secret,则保留现有值(如有)。 + if req.LinuxDoConnectClientSecret == "" { + if previousSettings.LinuxDoConnectClientSecret == "" { + response.BadRequest(c, "LinuxDo Client Secret is required when enabled") + return + } + req.LinuxDoConnectClientSecret = previousSettings.LinuxDoConnectClientSecret + } + } + + // Ops metrics collector interval validation (seconds). + if req.OpsMetricsIntervalSeconds != nil { + v := *req.OpsMetricsIntervalSeconds + if v < 60 { + v = 60 + } + if v > 3600 { + v = 3600 + } + req.OpsMetricsIntervalSeconds = &v + } + + settings := &service.SystemSettings{ + RegistrationEnabled: req.RegistrationEnabled, + EmailVerifyEnabled: req.EmailVerifyEnabled, + SMTPHost: req.SMTPHost, + SMTPPort: req.SMTPPort, + SMTPUsername: req.SMTPUsername, + SMTPPassword: req.SMTPPassword, + SMTPFrom: req.SMTPFrom, + SMTPFromName: req.SMTPFromName, + SMTPUseTLS: req.SMTPUseTLS, + TurnstileEnabled: req.TurnstileEnabled, + TurnstileSiteKey: req.TurnstileSiteKey, + TurnstileSecretKey: req.TurnstileSecretKey, + LinuxDoConnectEnabled: req.LinuxDoConnectEnabled, + LinuxDoConnectClientID: req.LinuxDoConnectClientID, + LinuxDoConnectClientSecret: req.LinuxDoConnectClientSecret, + LinuxDoConnectRedirectURL: req.LinuxDoConnectRedirectURL, + SiteName: req.SiteName, + SiteLogo: req.SiteLogo, + SiteSubtitle: req.SiteSubtitle, + APIBaseURL: req.APIBaseURL, + ContactInfo: req.ContactInfo, + DocURL: req.DocURL, + HomeContent: req.HomeContent, + DefaultConcurrency: req.DefaultConcurrency, + DefaultBalance: req.DefaultBalance, + EnableModelFallback: req.EnableModelFallback, + FallbackModelAnthropic: req.FallbackModelAnthropic, + FallbackModelOpenAI: req.FallbackModelOpenAI, + FallbackModelGemini: req.FallbackModelGemini, + FallbackModelAntigravity: req.FallbackModelAntigravity, + EnableIdentityPatch: req.EnableIdentityPatch, + IdentityPatchPrompt: req.IdentityPatchPrompt, + OpsMonitoringEnabled: func() bool { + if req.OpsMonitoringEnabled != nil { + return *req.OpsMonitoringEnabled + } + return previousSettings.OpsMonitoringEnabled + }(), + OpsRealtimeMonitoringEnabled: func() bool { + if req.OpsRealtimeMonitoringEnabled != nil { + return *req.OpsRealtimeMonitoringEnabled + } + return previousSettings.OpsRealtimeMonitoringEnabled + }(), + OpsQueryModeDefault: func() string { + if req.OpsQueryModeDefault != nil { + return *req.OpsQueryModeDefault + } + return previousSettings.OpsQueryModeDefault + }(), + OpsMetricsIntervalSeconds: func() int { + if req.OpsMetricsIntervalSeconds != nil { + return *req.OpsMetricsIntervalSeconds + } + return previousSettings.OpsMetricsIntervalSeconds + }(), + } + + if err := h.settingService.UpdateSettings(c.Request.Context(), settings); err != nil { + response.ErrorFrom(c, err) + return + } + + h.auditSettingsUpdate(c, previousSettings, settings, req) + + // 重新获取设置返回 + updatedSettings, err := h.settingService.GetAllSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.SystemSettings{ + RegistrationEnabled: updatedSettings.RegistrationEnabled, + EmailVerifyEnabled: updatedSettings.EmailVerifyEnabled, + SMTPHost: updatedSettings.SMTPHost, + SMTPPort: updatedSettings.SMTPPort, + SMTPUsername: updatedSettings.SMTPUsername, + SMTPPasswordConfigured: updatedSettings.SMTPPasswordConfigured, + SMTPFrom: updatedSettings.SMTPFrom, + SMTPFromName: updatedSettings.SMTPFromName, + SMTPUseTLS: updatedSettings.SMTPUseTLS, + TurnstileEnabled: updatedSettings.TurnstileEnabled, + TurnstileSiteKey: updatedSettings.TurnstileSiteKey, + TurnstileSecretKeyConfigured: updatedSettings.TurnstileSecretKeyConfigured, + LinuxDoConnectEnabled: updatedSettings.LinuxDoConnectEnabled, + LinuxDoConnectClientID: updatedSettings.LinuxDoConnectClientID, + LinuxDoConnectClientSecretConfigured: updatedSettings.LinuxDoConnectClientSecretConfigured, + LinuxDoConnectRedirectURL: updatedSettings.LinuxDoConnectRedirectURL, + SiteName: updatedSettings.SiteName, + SiteLogo: updatedSettings.SiteLogo, + SiteSubtitle: updatedSettings.SiteSubtitle, + APIBaseURL: updatedSettings.APIBaseURL, + ContactInfo: updatedSettings.ContactInfo, + DocURL: updatedSettings.DocURL, + HomeContent: updatedSettings.HomeContent, + DefaultConcurrency: updatedSettings.DefaultConcurrency, + DefaultBalance: updatedSettings.DefaultBalance, + EnableModelFallback: updatedSettings.EnableModelFallback, + FallbackModelAnthropic: updatedSettings.FallbackModelAnthropic, + FallbackModelOpenAI: updatedSettings.FallbackModelOpenAI, + FallbackModelGemini: updatedSettings.FallbackModelGemini, + FallbackModelAntigravity: updatedSettings.FallbackModelAntigravity, + EnableIdentityPatch: updatedSettings.EnableIdentityPatch, + IdentityPatchPrompt: updatedSettings.IdentityPatchPrompt, + OpsMonitoringEnabled: updatedSettings.OpsMonitoringEnabled, + OpsRealtimeMonitoringEnabled: updatedSettings.OpsRealtimeMonitoringEnabled, + OpsQueryModeDefault: updatedSettings.OpsQueryModeDefault, + OpsMetricsIntervalSeconds: updatedSettings.OpsMetricsIntervalSeconds, + }) +} + +func (h *SettingHandler) auditSettingsUpdate(c *gin.Context, before *service.SystemSettings, after *service.SystemSettings, req UpdateSettingsRequest) { + if before == nil || after == nil { + return + } + + changed := diffSettings(before, after, req) + if len(changed) == 0 { + return + } + + subject, _ := middleware.GetAuthSubjectFromContext(c) + role, _ := middleware.GetUserRoleFromContext(c) + log.Printf("AUDIT: settings updated at=%s user_id=%d role=%s changed=%v", + time.Now().UTC().Format(time.RFC3339), + subject.UserID, + role, + changed, + ) +} + +func diffSettings(before *service.SystemSettings, after *service.SystemSettings, req UpdateSettingsRequest) []string { + changed := make([]string, 0, 20) + if before.RegistrationEnabled != after.RegistrationEnabled { + changed = append(changed, "registration_enabled") + } + if before.EmailVerifyEnabled != after.EmailVerifyEnabled { + changed = append(changed, "email_verify_enabled") + } + if before.SMTPHost != after.SMTPHost { + changed = append(changed, "smtp_host") + } + if before.SMTPPort != after.SMTPPort { + changed = append(changed, "smtp_port") + } + if before.SMTPUsername != after.SMTPUsername { + changed = append(changed, "smtp_username") + } + if req.SMTPPassword != "" { + changed = append(changed, "smtp_password") + } + if before.SMTPFrom != after.SMTPFrom { + changed = append(changed, "smtp_from_email") + } + if before.SMTPFromName != after.SMTPFromName { + changed = append(changed, "smtp_from_name") + } + if before.SMTPUseTLS != after.SMTPUseTLS { + changed = append(changed, "smtp_use_tls") + } + if before.TurnstileEnabled != after.TurnstileEnabled { + changed = append(changed, "turnstile_enabled") + } + if before.TurnstileSiteKey != after.TurnstileSiteKey { + changed = append(changed, "turnstile_site_key") + } + if req.TurnstileSecretKey != "" { + changed = append(changed, "turnstile_secret_key") + } + if before.LinuxDoConnectEnabled != after.LinuxDoConnectEnabled { + changed = append(changed, "linuxdo_connect_enabled") + } + if before.LinuxDoConnectClientID != after.LinuxDoConnectClientID { + changed = append(changed, "linuxdo_connect_client_id") + } + if req.LinuxDoConnectClientSecret != "" { + changed = append(changed, "linuxdo_connect_client_secret") + } + if before.LinuxDoConnectRedirectURL != after.LinuxDoConnectRedirectURL { + changed = append(changed, "linuxdo_connect_redirect_url") + } + if before.SiteName != after.SiteName { + changed = append(changed, "site_name") + } + if before.SiteLogo != after.SiteLogo { + changed = append(changed, "site_logo") + } + if before.SiteSubtitle != after.SiteSubtitle { + changed = append(changed, "site_subtitle") + } + if before.APIBaseURL != after.APIBaseURL { + changed = append(changed, "api_base_url") + } + if before.ContactInfo != after.ContactInfo { + changed = append(changed, "contact_info") + } + if before.DocURL != after.DocURL { + changed = append(changed, "doc_url") + } + if before.HomeContent != after.HomeContent { + changed = append(changed, "home_content") + } + if before.DefaultConcurrency != after.DefaultConcurrency { + changed = append(changed, "default_concurrency") + } + if before.DefaultBalance != after.DefaultBalance { + changed = append(changed, "default_balance") + } + if before.EnableModelFallback != after.EnableModelFallback { + changed = append(changed, "enable_model_fallback") + } + if before.FallbackModelAnthropic != after.FallbackModelAnthropic { + changed = append(changed, "fallback_model_anthropic") + } + if before.FallbackModelOpenAI != after.FallbackModelOpenAI { + changed = append(changed, "fallback_model_openai") + } + if before.FallbackModelGemini != after.FallbackModelGemini { + changed = append(changed, "fallback_model_gemini") + } + if before.FallbackModelAntigravity != after.FallbackModelAntigravity { + changed = append(changed, "fallback_model_antigravity") + } + if before.EnableIdentityPatch != after.EnableIdentityPatch { + changed = append(changed, "enable_identity_patch") + } + if before.IdentityPatchPrompt != after.IdentityPatchPrompt { + changed = append(changed, "identity_patch_prompt") + } + if before.OpsMonitoringEnabled != after.OpsMonitoringEnabled { + changed = append(changed, "ops_monitoring_enabled") + } + if before.OpsRealtimeMonitoringEnabled != after.OpsRealtimeMonitoringEnabled { + changed = append(changed, "ops_realtime_monitoring_enabled") + } + if before.OpsQueryModeDefault != after.OpsQueryModeDefault { + changed = append(changed, "ops_query_mode_default") + } + if before.OpsMetricsIntervalSeconds != after.OpsMetricsIntervalSeconds { + changed = append(changed, "ops_metrics_interval_seconds") + } + return changed +} + +// TestSMTPRequest 测试SMTP连接请求 +type TestSMTPRequest struct { + SMTPHost string `json:"smtp_host" binding:"required"` + SMTPPort int `json:"smtp_port"` + SMTPUsername string `json:"smtp_username"` + SMTPPassword string `json:"smtp_password"` + SMTPUseTLS bool `json:"smtp_use_tls"` +} + +// TestSMTPConnection 测试SMTP连接 +// POST /api/v1/admin/settings/test-smtp +func (h *SettingHandler) TestSMTPConnection(c *gin.Context) { + var req TestSMTPRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if req.SMTPPort <= 0 { + req.SMTPPort = 587 + } + + // 如果未提供密码,从数据库获取已保存的密码 + password := req.SMTPPassword + if password == "" { + savedConfig, err := h.emailService.GetSMTPConfig(c.Request.Context()) + if err == nil && savedConfig != nil { + password = savedConfig.Password + } + } + + config := &service.SMTPConfig{ + Host: req.SMTPHost, + Port: req.SMTPPort, + Username: req.SMTPUsername, + Password: password, + UseTLS: req.SMTPUseTLS, + } + + err := h.emailService.TestSMTPConnectionWithConfig(config) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "SMTP connection successful"}) +} + +// SendTestEmailRequest 发送测试邮件请求 +type SendTestEmailRequest struct { + Email string `json:"email" binding:"required,email"` + SMTPHost string `json:"smtp_host" binding:"required"` + SMTPPort int `json:"smtp_port"` + SMTPUsername string `json:"smtp_username"` + SMTPPassword string `json:"smtp_password"` + SMTPFrom string `json:"smtp_from_email"` + SMTPFromName string `json:"smtp_from_name"` + SMTPUseTLS bool `json:"smtp_use_tls"` +} + +// SendTestEmail 发送测试邮件 +// POST /api/v1/admin/settings/send-test-email +func (h *SettingHandler) SendTestEmail(c *gin.Context) { + var req SendTestEmailRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if req.SMTPPort <= 0 { + req.SMTPPort = 587 + } + + // 如果未提供密码,从数据库获取已保存的密码 + password := req.SMTPPassword + if password == "" { + savedConfig, err := h.emailService.GetSMTPConfig(c.Request.Context()) + if err == nil && savedConfig != nil { + password = savedConfig.Password + } + } + + config := &service.SMTPConfig{ + Host: req.SMTPHost, + Port: req.SMTPPort, + Username: req.SMTPUsername, + Password: password, + From: req.SMTPFrom, + FromName: req.SMTPFromName, + UseTLS: req.SMTPUseTLS, + } + + siteName := h.settingService.GetSiteName(c.Request.Context()) + subject := "[" + siteName + "] Test Email" + body := ` + + + + + + + +
+
+

` + siteName + `

+
+
+
+

Email Configuration Successful!

+

This is a test email to verify your SMTP settings are working correctly.

+
+ +
+ + +` + + if err := h.emailService.SendEmailWithConfig(config, req.Email, subject, body); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Test email sent successfully"}) +} + +// GetAdminAPIKey 获取管理员 API Key 状态 +// GET /api/v1/admin/settings/admin-api-key +func (h *SettingHandler) GetAdminAPIKey(c *gin.Context) { + maskedKey, exists, err := h.settingService.GetAdminAPIKeyStatus(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{ + "exists": exists, + "masked_key": maskedKey, + }) +} + +// RegenerateAdminAPIKey 生成/重新生成管理员 API Key +// POST /api/v1/admin/settings/admin-api-key/regenerate +func (h *SettingHandler) RegenerateAdminAPIKey(c *gin.Context) { + key, err := h.settingService.GenerateAdminAPIKey(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{ + "key": key, // 完整 key 只在生成时返回一次 + }) +} + +// DeleteAdminAPIKey 删除管理员 API Key +// DELETE /api/v1/admin/settings/admin-api-key +func (h *SettingHandler) DeleteAdminAPIKey(c *gin.Context) { + if err := h.settingService.DeleteAdminAPIKey(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Admin API key deleted"}) +} + +// GetStreamTimeoutSettings 获取流超时处理配置 +// GET /api/v1/admin/settings/stream-timeout +func (h *SettingHandler) GetStreamTimeoutSettings(c *gin.Context) { + settings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.StreamTimeoutSettings{ + Enabled: settings.Enabled, + Action: settings.Action, + TempUnschedMinutes: settings.TempUnschedMinutes, + ThresholdCount: settings.ThresholdCount, + ThresholdWindowMinutes: settings.ThresholdWindowMinutes, + }) +} + +// UpdateStreamTimeoutSettingsRequest 更新流超时配置请求 +type UpdateStreamTimeoutSettingsRequest struct { + Enabled bool `json:"enabled"` + Action string `json:"action"` + TempUnschedMinutes int `json:"temp_unsched_minutes"` + ThresholdCount int `json:"threshold_count"` + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} + +// UpdateStreamTimeoutSettings 更新流超时处理配置 +// PUT /api/v1/admin/settings/stream-timeout +func (h *SettingHandler) UpdateStreamTimeoutSettings(c *gin.Context) { + var req UpdateStreamTimeoutSettingsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + settings := &service.StreamTimeoutSettings{ + Enabled: req.Enabled, + Action: req.Action, + TempUnschedMinutes: req.TempUnschedMinutes, + ThresholdCount: req.ThresholdCount, + ThresholdWindowMinutes: req.ThresholdWindowMinutes, + } + + if err := h.settingService.SetStreamTimeoutSettings(c.Request.Context(), settings); err != nil { + response.BadRequest(c, err.Error()) + return + } + + // 重新获取设置返回 + updatedSettings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.StreamTimeoutSettings{ + Enabled: updatedSettings.Enabled, + Action: updatedSettings.Action, + TempUnschedMinutes: updatedSettings.TempUnschedMinutes, + ThresholdCount: updatedSettings.ThresholdCount, + ThresholdWindowMinutes: updatedSettings.ThresholdWindowMinutes, + }) +} diff --git a/backend/internal/handler/admin/subscription_handler.go b/backend/internal/handler/admin/subscription_handler.go new file mode 100644 index 00000000..08db999a --- /dev/null +++ b/backend/internal/handler/admin/subscription_handler.go @@ -0,0 +1,278 @@ +package admin + +import ( + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// toResponsePagination converts pagination.PaginationResult to response.PaginationResult +func toResponsePagination(p *pagination.PaginationResult) *response.PaginationResult { + if p == nil { + return nil + } + return &response.PaginationResult{ + Total: p.Total, + Page: p.Page, + PageSize: p.PageSize, + Pages: p.Pages, + } +} + +// SubscriptionHandler handles admin subscription management +type SubscriptionHandler struct { + subscriptionService *service.SubscriptionService +} + +// NewSubscriptionHandler creates a new admin subscription handler +func NewSubscriptionHandler(subscriptionService *service.SubscriptionService) *SubscriptionHandler { + return &SubscriptionHandler{ + subscriptionService: subscriptionService, + } +} + +// AssignSubscriptionRequest represents assign subscription request +type AssignSubscriptionRequest struct { + UserID int64 `json:"user_id" binding:"required"` + GroupID int64 `json:"group_id" binding:"required"` + ValidityDays int `json:"validity_days" binding:"omitempty,max=36500"` // max 100 years + Notes string `json:"notes"` +} + +// BulkAssignSubscriptionRequest represents bulk assign subscription request +type BulkAssignSubscriptionRequest struct { + UserIDs []int64 `json:"user_ids" binding:"required,min=1"` + GroupID int64 `json:"group_id" binding:"required"` + ValidityDays int `json:"validity_days" binding:"omitempty,max=36500"` // max 100 years + Notes string `json:"notes"` +} + +// ExtendSubscriptionRequest represents extend subscription request +type ExtendSubscriptionRequest struct { + Days int `json:"days" binding:"required,min=1,max=36500"` // max 100 years +} + +// List handles listing all subscriptions with pagination and filters +// GET /api/v1/admin/subscriptions +func (h *SubscriptionHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + + // Parse optional filters + var userID, groupID *int64 + if userIDStr := c.Query("user_id"); userIDStr != "" { + if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { + userID = &id + } + } + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil { + groupID = &id + } + } + status := c.Query("status") + + subscriptions, pagination, err := h.subscriptionService.List(c.Request.Context(), page, pageSize, userID, groupID, status) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserSubscription, 0, len(subscriptions)) + for i := range subscriptions { + out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + } + response.PaginatedWithResult(c, out, toResponsePagination(pagination)) +} + +// GetByID handles getting a subscription by ID +// GET /api/v1/admin/subscriptions/:id +func (h *SubscriptionHandler) GetByID(c *gin.Context) { + subscriptionID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid subscription ID") + return + } + + subscription, err := h.subscriptionService.GetByID(c.Request.Context(), subscriptionID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserSubscriptionFromService(subscription)) +} + +// GetProgress handles getting subscription usage progress +// GET /api/v1/admin/subscriptions/:id/progress +func (h *SubscriptionHandler) GetProgress(c *gin.Context) { + subscriptionID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid subscription ID") + return + } + + progress, err := h.subscriptionService.GetSubscriptionProgress(c.Request.Context(), subscriptionID) + if err != nil { + response.NotFound(c, "Subscription not found") + return + } + + response.Success(c, progress) +} + +// Assign handles assigning a subscription to a user +// POST /api/v1/admin/subscriptions/assign +func (h *SubscriptionHandler) Assign(c *gin.Context) { + var req AssignSubscriptionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Get admin user ID from context + adminID := getAdminIDFromContext(c) + + subscription, err := h.subscriptionService.AssignSubscription(c.Request.Context(), &service.AssignSubscriptionInput{ + UserID: req.UserID, + GroupID: req.GroupID, + ValidityDays: req.ValidityDays, + AssignedBy: adminID, + Notes: req.Notes, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserSubscriptionFromService(subscription)) +} + +// BulkAssign handles bulk assigning subscriptions to multiple users +// POST /api/v1/admin/subscriptions/bulk-assign +func (h *SubscriptionHandler) BulkAssign(c *gin.Context) { + var req BulkAssignSubscriptionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Get admin user ID from context + adminID := getAdminIDFromContext(c) + + result, err := h.subscriptionService.BulkAssignSubscription(c.Request.Context(), &service.BulkAssignSubscriptionInput{ + UserIDs: req.UserIDs, + GroupID: req.GroupID, + ValidityDays: req.ValidityDays, + AssignedBy: adminID, + Notes: req.Notes, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.BulkAssignResultFromService(result)) +} + +// Extend handles extending a subscription +// POST /api/v1/admin/subscriptions/:id/extend +func (h *SubscriptionHandler) Extend(c *gin.Context) { + subscriptionID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid subscription ID") + return + } + + var req ExtendSubscriptionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + subscription, err := h.subscriptionService.ExtendSubscription(c.Request.Context(), subscriptionID, req.Days) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserSubscriptionFromService(subscription)) +} + +// Revoke handles revoking a subscription +// DELETE /api/v1/admin/subscriptions/:id +func (h *SubscriptionHandler) Revoke(c *gin.Context) { + subscriptionID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid subscription ID") + return + } + + err = h.subscriptionService.RevokeSubscription(c.Request.Context(), subscriptionID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Subscription revoked successfully"}) +} + +// ListByGroup handles listing subscriptions for a specific group +// GET /api/v1/admin/groups/:id/subscriptions +func (h *SubscriptionHandler) ListByGroup(c *gin.Context) { + groupID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group ID") + return + } + + page, pageSize := response.ParsePagination(c) + + subscriptions, pagination, err := h.subscriptionService.ListGroupSubscriptions(c.Request.Context(), groupID, page, pageSize) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserSubscription, 0, len(subscriptions)) + for i := range subscriptions { + out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + } + response.PaginatedWithResult(c, out, toResponsePagination(pagination)) +} + +// ListByUser handles listing subscriptions for a specific user +// GET /api/v1/admin/users/:id/subscriptions +func (h *SubscriptionHandler) ListByUser(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + subscriptions, err := h.subscriptionService.ListUserSubscriptions(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserSubscription, 0, len(subscriptions)) + for i := range subscriptions { + out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + } + response.Success(c, out) +} + +// Helper function to get admin ID from context +func getAdminIDFromContext(c *gin.Context) int64 { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + return 0 + } + return subject.UserID +} diff --git a/backend/internal/handler/admin/system_handler.go b/backend/internal/handler/admin/system_handler.go new file mode 100644 index 00000000..28c075aa --- /dev/null +++ b/backend/internal/handler/admin/system_handler.go @@ -0,0 +1,87 @@ +package admin + +import ( + "net/http" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/sysutil" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// SystemHandler handles system-related operations +type SystemHandler struct { + updateSvc *service.UpdateService +} + +// NewSystemHandler creates a new SystemHandler +func NewSystemHandler(updateSvc *service.UpdateService) *SystemHandler { + return &SystemHandler{ + updateSvc: updateSvc, + } +} + +// GetVersion returns the current version +// GET /api/v1/admin/system/version +func (h *SystemHandler) GetVersion(c *gin.Context) { + info, _ := h.updateSvc.CheckUpdate(c.Request.Context(), false) + response.Success(c, gin.H{ + "version": info.CurrentVersion, + }) +} + +// CheckUpdates checks for available updates +// GET /api/v1/admin/system/check-updates +func (h *SystemHandler) CheckUpdates(c *gin.Context) { + force := c.Query("force") == "true" + info, err := h.updateSvc.CheckUpdate(c.Request.Context(), force) + if err != nil { + response.Error(c, http.StatusInternalServerError, err.Error()) + return + } + response.Success(c, info) +} + +// PerformUpdate downloads and applies the update +// POST /api/v1/admin/system/update +func (h *SystemHandler) PerformUpdate(c *gin.Context) { + if err := h.updateSvc.PerformUpdate(c.Request.Context()); err != nil { + response.Error(c, http.StatusInternalServerError, err.Error()) + return + } + response.Success(c, gin.H{ + "message": "Update completed. Please restart the service.", + "need_restart": true, + }) +} + +// Rollback restores the previous version +// POST /api/v1/admin/system/rollback +func (h *SystemHandler) Rollback(c *gin.Context) { + if err := h.updateSvc.Rollback(); err != nil { + response.Error(c, http.StatusInternalServerError, err.Error()) + return + } + response.Success(c, gin.H{ + "message": "Rollback completed. Please restart the service.", + "need_restart": true, + }) +} + +// RestartService restarts the systemd service +// POST /api/v1/admin/system/restart +func (h *SystemHandler) RestartService(c *gin.Context) { + // Schedule service restart in background after sending response + // This ensures the client receives the success response before the service restarts + go func() { + // Wait a moment to ensure the response is sent + time.Sleep(500 * time.Millisecond) + sysutil.RestartServiceAsync() + }() + + response.Success(c, gin.H{ + "message": "Service restart initiated", + }) +} diff --git a/backend/internal/handler/admin/usage_handler.go b/backend/internal/handler/admin/usage_handler.go new file mode 100644 index 00000000..c7b983f1 --- /dev/null +++ b/backend/internal/handler/admin/usage_handler.go @@ -0,0 +1,346 @@ +package admin + +import ( + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UsageHandler handles admin usage-related requests +type UsageHandler struct { + usageService *service.UsageService + apiKeyService *service.APIKeyService + adminService service.AdminService +} + +// NewUsageHandler creates a new admin usage handler +func NewUsageHandler( + usageService *service.UsageService, + apiKeyService *service.APIKeyService, + adminService service.AdminService, +) *UsageHandler { + return &UsageHandler{ + usageService: usageService, + apiKeyService: apiKeyService, + adminService: adminService, + } +} + +// List handles listing all usage records with filters +// GET /api/v1/admin/usage +func (h *UsageHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + + // Parse filters + var userID, apiKeyID, accountID, groupID int64 + if userIDStr := c.Query("user_id"); userIDStr != "" { + id, err := strconv.ParseInt(userIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user_id") + return + } + userID = id + } + + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + id, err := strconv.ParseInt(apiKeyIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid api_key_id") + return + } + apiKeyID = id + } + + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + id, err := strconv.ParseInt(accountIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account_id") + return + } + accountID = id + } + + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + id, err := strconv.ParseInt(groupIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = id + } + + model := c.Query("model") + + var stream *bool + if streamStr := c.Query("stream"); streamStr != "" { + val, err := strconv.ParseBool(streamStr) + if err != nil { + response.BadRequest(c, "Invalid stream value, use true or false") + return + } + stream = &val + } + + var billingType *int8 + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + val, err := strconv.ParseInt(billingTypeStr, 10, 8) + if err != nil { + response.BadRequest(c, "Invalid billing_type") + return + } + bt := int8(val) + billingType = &bt + } + + // Parse date range + var startTime, endTime *time.Time + userTZ := c.Query("timezone") // Get user's timezone from request + if startDateStr := c.Query("start_date"); startDateStr != "" { + t, err := timezone.ParseInUserLocation("2006-01-02", startDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + startTime = &t + } + + if endDateStr := c.Query("end_date"); endDateStr != "" { + t, err := timezone.ParseInUserLocation("2006-01-02", endDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + // Set end time to end of day + t = t.Add(24*time.Hour - time.Nanosecond) + endTime = &t + } + + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + filters := usagestats.UsageLogFilters{ + UserID: userID, + APIKeyID: apiKeyID, + AccountID: accountID, + GroupID: groupID, + Model: model, + Stream: stream, + BillingType: billingType, + StartTime: startTime, + EndTime: endTime, + } + + records, result, err := h.usageService.ListWithFilters(c.Request.Context(), params, filters) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UsageLog, 0, len(records)) + for i := range records { + out = append(out, *dto.UsageLogFromServiceAdmin(&records[i])) + } + response.Paginated(c, out, result.Total, page, pageSize) +} + +// Stats handles getting usage statistics with filters +// GET /api/v1/admin/usage/stats +func (h *UsageHandler) Stats(c *gin.Context) { + // Parse filters - same as List endpoint + var userID, apiKeyID, accountID, groupID int64 + if userIDStr := c.Query("user_id"); userIDStr != "" { + id, err := strconv.ParseInt(userIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user_id") + return + } + userID = id + } + + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + id, err := strconv.ParseInt(apiKeyIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid api_key_id") + return + } + apiKeyID = id + } + + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + id, err := strconv.ParseInt(accountIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid account_id") + return + } + accountID = id + } + + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + id, err := strconv.ParseInt(groupIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = id + } + + model := c.Query("model") + + var stream *bool + if streamStr := c.Query("stream"); streamStr != "" { + val, err := strconv.ParseBool(streamStr) + if err != nil { + response.BadRequest(c, "Invalid stream value, use true or false") + return + } + stream = &val + } + + var billingType *int8 + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + val, err := strconv.ParseInt(billingTypeStr, 10, 8) + if err != nil { + response.BadRequest(c, "Invalid billing_type") + return + } + bt := int8(val) + billingType = &bt + } + + // Parse date range + userTZ := c.Query("timezone") + now := timezone.NowInUserLocation(userTZ) + var startTime, endTime time.Time + + startDateStr := c.Query("start_date") + endDateStr := c.Query("end_date") + + if startDateStr != "" && endDateStr != "" { + var err error + startTime, err = timezone.ParseInUserLocation("2006-01-02", startDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + endTime, err = timezone.ParseInUserLocation("2006-01-02", endDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + endTime = endTime.Add(24*time.Hour - time.Nanosecond) + } else { + period := c.DefaultQuery("period", "today") + switch period { + case "today": + startTime = timezone.StartOfDayInUserLocation(now, userTZ) + case "week": + startTime = now.AddDate(0, 0, -7) + case "month": + startTime = now.AddDate(0, -1, 0) + default: + startTime = timezone.StartOfDayInUserLocation(now, userTZ) + } + endTime = now + } + + // Build filters and call GetStatsWithFilters + filters := usagestats.UsageLogFilters{ + UserID: userID, + APIKeyID: apiKeyID, + AccountID: accountID, + GroupID: groupID, + Model: model, + Stream: stream, + BillingType: billingType, + StartTime: &startTime, + EndTime: &endTime, + } + + stats, err := h.usageService.GetStatsWithFilters(c.Request.Context(), filters) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} + +// SearchUsers handles searching users by email keyword +// GET /api/v1/admin/usage/search-users +func (h *UsageHandler) SearchUsers(c *gin.Context) { + keyword := c.Query("q") + if keyword == "" { + response.Success(c, []any{}) + return + } + + // Limit to 30 results + users, _, err := h.adminService.ListUsers(c.Request.Context(), 1, 30, service.UserListFilters{Search: keyword}) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Return simplified user list (only id and email) + type SimpleUser struct { + ID int64 `json:"id"` + Email string `json:"email"` + } + + result := make([]SimpleUser, len(users)) + for i, u := range users { + result[i] = SimpleUser{ + ID: u.ID, + Email: u.Email, + } + } + + response.Success(c, result) +} + +// SearchAPIKeys handles searching API keys by user +// GET /api/v1/admin/usage/search-api-keys +func (h *UsageHandler) SearchAPIKeys(c *gin.Context) { + userIDStr := c.Query("user_id") + keyword := c.Query("q") + + var userID int64 + if userIDStr != "" { + id, err := strconv.ParseInt(userIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user_id") + return + } + userID = id + } + + keys, err := h.apiKeyService.SearchAPIKeys(c.Request.Context(), userID, keyword, 30) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Return simplified API key list (only id and name) + type SimpleAPIKey struct { + ID int64 `json:"id"` + Name string `json:"name"` + UserID int64 `json:"user_id"` + } + + result := make([]SimpleAPIKey, len(keys)) + for i, k := range keys { + result[i] = SimpleAPIKey{ + ID: k.ID, + Name: k.Name, + UserID: k.UserID, + } + } + + response.Success(c, result) +} diff --git a/backend/internal/handler/admin/user_attribute_handler.go b/backend/internal/handler/admin/user_attribute_handler.go new file mode 100644 index 00000000..2f326279 --- /dev/null +++ b/backend/internal/handler/admin/user_attribute_handler.go @@ -0,0 +1,342 @@ +package admin + +import ( + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UserAttributeHandler handles user attribute management +type UserAttributeHandler struct { + attrService *service.UserAttributeService +} + +// NewUserAttributeHandler creates a new handler +func NewUserAttributeHandler(attrService *service.UserAttributeService) *UserAttributeHandler { + return &UserAttributeHandler{attrService: attrService} +} + +// --- Request/Response DTOs --- + +// CreateAttributeDefinitionRequest represents create attribute definition request +type CreateAttributeDefinitionRequest struct { + Key string `json:"key" binding:"required,min=1,max=100"` + Name string `json:"name" binding:"required,min=1,max=255"` + Description string `json:"description"` + Type string `json:"type" binding:"required"` + Options []service.UserAttributeOption `json:"options"` + Required bool `json:"required"` + Validation service.UserAttributeValidation `json:"validation"` + Placeholder string `json:"placeholder"` + Enabled bool `json:"enabled"` +} + +// UpdateAttributeDefinitionRequest represents update attribute definition request +type UpdateAttributeDefinitionRequest struct { + Name *string `json:"name"` + Description *string `json:"description"` + Type *string `json:"type"` + Options *[]service.UserAttributeOption `json:"options"` + Required *bool `json:"required"` + Validation *service.UserAttributeValidation `json:"validation"` + Placeholder *string `json:"placeholder"` + Enabled *bool `json:"enabled"` +} + +// ReorderRequest represents reorder attribute definitions request +type ReorderRequest struct { + IDs []int64 `json:"ids" binding:"required"` +} + +// UpdateUserAttributesRequest represents update user attributes request +type UpdateUserAttributesRequest struct { + Values map[int64]string `json:"values" binding:"required"` +} + +// BatchGetUserAttributesRequest represents batch get user attributes request +type BatchGetUserAttributesRequest struct { + UserIDs []int64 `json:"user_ids" binding:"required"` +} + +// BatchUserAttributesResponse represents batch user attributes response +type BatchUserAttributesResponse struct { + // Map of userID -> map of attributeID -> value + Attributes map[int64]map[int64]string `json:"attributes"` +} + +// AttributeDefinitionResponse represents attribute definition response +type AttributeDefinitionResponse struct { + ID int64 `json:"id"` + Key string `json:"key"` + Name string `json:"name"` + Description string `json:"description"` + Type string `json:"type"` + Options []service.UserAttributeOption `json:"options"` + Required bool `json:"required"` + Validation service.UserAttributeValidation `json:"validation"` + Placeholder string `json:"placeholder"` + DisplayOrder int `json:"display_order"` + Enabled bool `json:"enabled"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// AttributeValueResponse represents attribute value response +type AttributeValueResponse struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + AttributeID int64 `json:"attribute_id"` + Value string `json:"value"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` +} + +// --- Helpers --- + +func defToResponse(def *service.UserAttributeDefinition) *AttributeDefinitionResponse { + return &AttributeDefinitionResponse{ + ID: def.ID, + Key: def.Key, + Name: def.Name, + Description: def.Description, + Type: string(def.Type), + Options: def.Options, + Required: def.Required, + Validation: def.Validation, + Placeholder: def.Placeholder, + DisplayOrder: def.DisplayOrder, + Enabled: def.Enabled, + CreatedAt: def.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + UpdatedAt: def.UpdatedAt.Format("2006-01-02T15:04:05Z07:00"), + } +} + +func valueToResponse(val *service.UserAttributeValue) *AttributeValueResponse { + return &AttributeValueResponse{ + ID: val.ID, + UserID: val.UserID, + AttributeID: val.AttributeID, + Value: val.Value, + CreatedAt: val.CreatedAt.Format("2006-01-02T15:04:05Z07:00"), + UpdatedAt: val.UpdatedAt.Format("2006-01-02T15:04:05Z07:00"), + } +} + +// --- Handlers --- + +// ListDefinitions lists all attribute definitions +// GET /admin/user-attributes +func (h *UserAttributeHandler) ListDefinitions(c *gin.Context) { + enabledOnly := c.Query("enabled") == "true" + + defs, err := h.attrService.ListDefinitions(c.Request.Context(), enabledOnly) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeDefinitionResponse, 0, len(defs)) + for i := range defs { + out = append(out, defToResponse(&defs[i])) + } + + response.Success(c, out) +} + +// CreateDefinition creates a new attribute definition +// POST /admin/user-attributes +func (h *UserAttributeHandler) CreateDefinition(c *gin.Context) { + var req CreateAttributeDefinitionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + def, err := h.attrService.CreateDefinition(c.Request.Context(), service.CreateAttributeDefinitionInput{ + Key: req.Key, + Name: req.Name, + Description: req.Description, + Type: service.UserAttributeType(req.Type), + Options: req.Options, + Required: req.Required, + Validation: req.Validation, + Placeholder: req.Placeholder, + Enabled: req.Enabled, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, defToResponse(def)) +} + +// UpdateDefinition updates an attribute definition +// PUT /admin/user-attributes/:id +func (h *UserAttributeHandler) UpdateDefinition(c *gin.Context) { + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid attribute ID") + return + } + + var req UpdateAttributeDefinitionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := service.UpdateAttributeDefinitionInput{ + Name: req.Name, + Description: req.Description, + Options: req.Options, + Required: req.Required, + Validation: req.Validation, + Placeholder: req.Placeholder, + Enabled: req.Enabled, + } + if req.Type != nil { + t := service.UserAttributeType(*req.Type) + input.Type = &t + } + + def, err := h.attrService.UpdateDefinition(c.Request.Context(), id, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, defToResponse(def)) +} + +// DeleteDefinition deletes an attribute definition +// DELETE /admin/user-attributes/:id +func (h *UserAttributeHandler) DeleteDefinition(c *gin.Context) { + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid attribute ID") + return + } + + if err := h.attrService.DeleteDefinition(c.Request.Context(), id); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Attribute definition deleted successfully"}) +} + +// ReorderDefinitions reorders attribute definitions +// PUT /admin/user-attributes/reorder +func (h *UserAttributeHandler) ReorderDefinitions(c *gin.Context) { + var req ReorderRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Convert IDs array to orders map (position in array = display_order) + orders := make(map[int64]int, len(req.IDs)) + for i, id := range req.IDs { + orders[id] = i + } + + if err := h.attrService.ReorderDefinitions(c.Request.Context(), orders); err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Reorder successful"}) +} + +// GetUserAttributes gets a user's attribute values +// GET /admin/users/:id/attributes +func (h *UserAttributeHandler) GetUserAttributes(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + values, err := h.attrService.GetUserAttributes(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeValueResponse, 0, len(values)) + for i := range values { + out = append(out, valueToResponse(&values[i])) + } + + response.Success(c, out) +} + +// UpdateUserAttributes updates a user's attribute values +// PUT /admin/users/:id/attributes +func (h *UserAttributeHandler) UpdateUserAttributes(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + var req UpdateUserAttributesRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + inputs := make([]service.UpdateUserAttributeInput, 0, len(req.Values)) + for attrID, value := range req.Values { + inputs = append(inputs, service.UpdateUserAttributeInput{ + AttributeID: attrID, + Value: value, + }) + } + + if err := h.attrService.UpdateUserAttributes(c.Request.Context(), userID, inputs); err != nil { + response.ErrorFrom(c, err) + return + } + + // Return updated values + values, err := h.attrService.GetUserAttributes(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]*AttributeValueResponse, 0, len(values)) + for i := range values { + out = append(out, valueToResponse(&values[i])) + } + + response.Success(c, out) +} + +// GetBatchUserAttributes gets attribute values for multiple users +// POST /admin/user-attributes/batch +func (h *UserAttributeHandler) GetBatchUserAttributes(c *gin.Context) { + var req BatchGetUserAttributesRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.UserIDs) == 0 { + response.Success(c, BatchUserAttributesResponse{Attributes: map[int64]map[int64]string{}}) + return + } + + attrs, err := h.attrService.GetBatchUserAttributes(c.Request.Context(), req.UserIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, BatchUserAttributesResponse{Attributes: attrs}) +} diff --git a/backend/internal/handler/admin/user_handler.go b/backend/internal/handler/admin/user_handler.go new file mode 100644 index 00000000..38cc8acd --- /dev/null +++ b/backend/internal/handler/admin/user_handler.go @@ -0,0 +1,279 @@ +package admin + +import ( + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UserHandler handles admin user management +type UserHandler struct { + adminService service.AdminService +} + +// NewUserHandler creates a new admin user handler +func NewUserHandler(adminService service.AdminService) *UserHandler { + return &UserHandler{ + adminService: adminService, + } +} + +// CreateUserRequest represents admin create user request +type CreateUserRequest struct { + Email string `json:"email" binding:"required,email"` + Password string `json:"password" binding:"required,min=6"` + Username string `json:"username"` + Notes string `json:"notes"` + Balance float64 `json:"balance"` + Concurrency int `json:"concurrency"` + AllowedGroups []int64 `json:"allowed_groups"` +} + +// UpdateUserRequest represents admin update user request +// 使用指针类型来区分"未提供"和"设置为0" +type UpdateUserRequest struct { + Email string `json:"email" binding:"omitempty,email"` + Password string `json:"password" binding:"omitempty,min=6"` + Username *string `json:"username"` + Notes *string `json:"notes"` + Balance *float64 `json:"balance"` + Concurrency *int `json:"concurrency"` + Status string `json:"status" binding:"omitempty,oneof=active disabled"` + AllowedGroups *[]int64 `json:"allowed_groups"` +} + +// UpdateBalanceRequest represents balance update request +type UpdateBalanceRequest struct { + Balance float64 `json:"balance" binding:"required,gt=0"` + Operation string `json:"operation" binding:"required,oneof=set add subtract"` + Notes string `json:"notes"` +} + +// List handles listing all users with pagination +// GET /api/v1/admin/users +// Query params: +// - status: filter by user status +// - role: filter by user role +// - search: search in email, username +// - attr[{id}]: filter by custom attribute value, e.g. attr[1]=company +func (h *UserHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + + search := c.Query("search") + // 标准化和验证 search 参数 + search = strings.TrimSpace(search) + if len(search) > 100 { + search = search[:100] + } + + filters := service.UserListFilters{ + Status: c.Query("status"), + Role: c.Query("role"), + Search: search, + Attributes: parseAttributeFilters(c), + } + + users, total, err := h.adminService.ListUsers(c.Request.Context(), page, pageSize, filters) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.User, 0, len(users)) + for i := range users { + out = append(out, *dto.UserFromService(&users[i])) + } + response.Paginated(c, out, total, page, pageSize) +} + +// parseAttributeFilters extracts attribute filters from query params +// Format: attr[{attributeID}]=value, e.g. attr[1]=company&attr[2]=developer +func parseAttributeFilters(c *gin.Context) map[int64]string { + result := make(map[int64]string) + + // Get all query params and look for attr[*] pattern + for key, values := range c.Request.URL.Query() { + if len(values) == 0 || values[0] == "" { + continue + } + // Check if key matches pattern attr[{id}] + if len(key) > 5 && key[:5] == "attr[" && key[len(key)-1] == ']' { + idStr := key[5 : len(key)-1] + id, err := strconv.ParseInt(idStr, 10, 64) + if err == nil && id > 0 { + result[id] = values[0] + } + } + } + + return result +} + +// GetByID handles getting a user by ID +// GET /api/v1/admin/users/:id +func (h *UserHandler) GetByID(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + user, err := h.adminService.GetUser(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserFromService(user)) +} + +// Create handles creating a new user +// POST /api/v1/admin/users +func (h *UserHandler) Create(c *gin.Context) { + var req CreateUserRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + user, err := h.adminService.CreateUser(c.Request.Context(), &service.CreateUserInput{ + Email: req.Email, + Password: req.Password, + Username: req.Username, + Notes: req.Notes, + Balance: req.Balance, + Concurrency: req.Concurrency, + AllowedGroups: req.AllowedGroups, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserFromService(user)) +} + +// Update handles updating a user +// PUT /api/v1/admin/users/:id +func (h *UserHandler) Update(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + var req UpdateUserRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // 使用指针类型直接传递,nil 表示未提供该字段 + user, err := h.adminService.UpdateUser(c.Request.Context(), userID, &service.UpdateUserInput{ + Email: req.Email, + Password: req.Password, + Username: req.Username, + Notes: req.Notes, + Balance: req.Balance, + Concurrency: req.Concurrency, + Status: req.Status, + AllowedGroups: req.AllowedGroups, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserFromService(user)) +} + +// Delete handles deleting a user +// DELETE /api/v1/admin/users/:id +func (h *UserHandler) Delete(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + err = h.adminService.DeleteUser(c.Request.Context(), userID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "User deleted successfully"}) +} + +// UpdateBalance handles updating user balance +// POST /api/v1/admin/users/:id/balance +func (h *UserHandler) UpdateBalance(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + var req UpdateBalanceRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + user, err := h.adminService.UpdateUserBalance(c.Request.Context(), userID, req.Balance, req.Operation, req.Notes) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.UserFromService(user)) +} + +// GetUserAPIKeys handles getting user's API keys +// GET /api/v1/admin/users/:id/api-keys +func (h *UserHandler) GetUserAPIKeys(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + page, pageSize := response.ParsePagination(c) + + keys, total, err := h.adminService.GetUserAPIKeys(c.Request.Context(), userID, page, pageSize) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.APIKey, 0, len(keys)) + for i := range keys { + out = append(out, *dto.APIKeyFromService(&keys[i])) + } + response.Paginated(c, out, total, page, pageSize) +} + +// GetUserUsage handles getting user's usage statistics +// GET /api/v1/admin/users/:id/usage +func (h *UserHandler) GetUserUsage(c *gin.Context) { + userID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid user ID") + return + } + + period := c.DefaultQuery("period", "month") + + stats, err := h.adminService.GetUserUsageStats(c.Request.Context(), userID, period) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} diff --git a/backend/internal/handler/api_key_handler.go b/backend/internal/handler/api_key_handler.go new file mode 100644 index 00000000..52dc6911 --- /dev/null +++ b/backend/internal/handler/api_key_handler.go @@ -0,0 +1,218 @@ +// Package handler provides HTTP request handlers for the application. +package handler + +import ( + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// APIKeyHandler handles API key-related requests +type APIKeyHandler struct { + apiKeyService *service.APIKeyService +} + +// NewAPIKeyHandler creates a new APIKeyHandler +func NewAPIKeyHandler(apiKeyService *service.APIKeyService) *APIKeyHandler { + return &APIKeyHandler{ + apiKeyService: apiKeyService, + } +} + +// CreateAPIKeyRequest represents the create API key request payload +type CreateAPIKeyRequest struct { + Name string `json:"name" binding:"required"` + GroupID *int64 `json:"group_id"` // nullable + CustomKey *string `json:"custom_key"` // 可选的自定义key + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 +} + +// UpdateAPIKeyRequest represents the update API key request payload +type UpdateAPIKeyRequest struct { + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + Status string `json:"status" binding:"omitempty,oneof=active inactive"` + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 +} + +// List handles listing user's API keys with pagination +// GET /api/v1/api-keys +func (h *APIKeyHandler) List(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + page, pageSize := response.ParsePagination(c) + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + + keys, result, err := h.apiKeyService.List(c.Request.Context(), subject.UserID, params) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.APIKey, 0, len(keys)) + for i := range keys { + out = append(out, *dto.APIKeyFromService(&keys[i])) + } + response.Paginated(c, out, result.Total, page, pageSize) +} + +// GetByID handles getting a single API key +// GET /api/v1/api-keys/:id +func (h *APIKeyHandler) GetByID(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + keyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid key ID") + return + } + + key, err := h.apiKeyService.GetByID(c.Request.Context(), keyID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 验证所有权 + if key.UserID != subject.UserID { + response.Forbidden(c, "Not authorized to access this key") + return + } + + response.Success(c, dto.APIKeyFromService(key)) +} + +// Create handles creating a new API key +// POST /api/v1/api-keys +func (h *APIKeyHandler) Create(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req CreateAPIKeyRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + svcReq := service.CreateAPIKeyRequest{ + Name: req.Name, + GroupID: req.GroupID, + CustomKey: req.CustomKey, + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, + } + key, err := h.apiKeyService.Create(c.Request.Context(), subject.UserID, svcReq) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.APIKeyFromService(key)) +} + +// Update handles updating an API key +// PUT /api/v1/api-keys/:id +func (h *APIKeyHandler) Update(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + keyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid key ID") + return + } + + var req UpdateAPIKeyRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + svcReq := service.UpdateAPIKeyRequest{ + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, + } + if req.Name != "" { + svcReq.Name = &req.Name + } + svcReq.GroupID = req.GroupID + if req.Status != "" { + svcReq.Status = &req.Status + } + + key, err := h.apiKeyService.Update(c.Request.Context(), keyID, subject.UserID, svcReq) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.APIKeyFromService(key)) +} + +// Delete handles deleting an API key +// DELETE /api/v1/api-keys/:id +func (h *APIKeyHandler) Delete(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + keyID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid key ID") + return + } + + err = h.apiKeyService.Delete(c.Request.Context(), keyID, subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "API key deleted successfully"}) +} + +// GetAvailableGroups 获取用户可以绑定的分组列表 +// GET /api/v1/groups/available +func (h *APIKeyHandler) GetAvailableGroups(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + groups, err := h.apiKeyService.GetAvailableGroups(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.Group, 0, len(groups)) + for i := range groups { + out = append(out, *dto.GroupFromService(&groups[i])) + } + response.Success(c, out) +} diff --git a/backend/internal/handler/auth_handler.go b/backend/internal/handler/auth_handler.go new file mode 100644 index 00000000..882e4cf2 --- /dev/null +++ b/backend/internal/handler/auth_handler.go @@ -0,0 +1,240 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AuthHandler handles authentication-related requests +type AuthHandler struct { + cfg *config.Config + authService *service.AuthService + userService *service.UserService + settingSvc *service.SettingService + promoService *service.PromoService +} + +// NewAuthHandler creates a new AuthHandler +func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService) *AuthHandler { + return &AuthHandler{ + cfg: cfg, + authService: authService, + userService: userService, + settingSvc: settingService, + promoService: promoService, + } +} + +// RegisterRequest represents the registration request payload +type RegisterRequest struct { + Email string `json:"email" binding:"required,email"` + Password string `json:"password" binding:"required,min=6"` + VerifyCode string `json:"verify_code"` + TurnstileToken string `json:"turnstile_token"` + PromoCode string `json:"promo_code"` // 注册优惠码 +} + +// SendVerifyCodeRequest 发送验证码请求 +type SendVerifyCodeRequest struct { + Email string `json:"email" binding:"required,email"` + TurnstileToken string `json:"turnstile_token"` +} + +// SendVerifyCodeResponse 发送验证码响应 +type SendVerifyCodeResponse struct { + Message string `json:"message"` + Countdown int `json:"countdown"` // 倒计时秒数 +} + +// LoginRequest represents the login request payload +type LoginRequest struct { + Email string `json:"email" binding:"required,email"` + Password string `json:"password" binding:"required"` + TurnstileToken string `json:"turnstile_token"` +} + +// AuthResponse 认证响应格式(匹配前端期望) +type AuthResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + User *dto.User `json:"user"` +} + +// Register handles user registration +// POST /api/v1/auth/register +func (h *AuthHandler) Register(c *gin.Context) { + var req RegisterRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Turnstile 验证(当提供了邮箱验证码时跳过,因为发送验证码时已验证过) + if req.VerifyCode == "" { + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { + response.ErrorFrom(c, err) + return + } + } + + token, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode, req.PromoCode) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, AuthResponse{ + AccessToken: token, + TokenType: "Bearer", + User: dto.UserFromService(user), + }) +} + +// SendVerifyCode 发送邮箱验证码 +// POST /api/v1/auth/send-verify-code +func (h *AuthHandler) SendVerifyCode(c *gin.Context) { + var req SendVerifyCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Turnstile 验证 + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { + response.ErrorFrom(c, err) + return + } + + result, err := h.authService.SendVerifyCodeAsync(c.Request.Context(), req.Email) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, SendVerifyCodeResponse{ + Message: "Verification code sent successfully", + Countdown: result.Countdown, + }) +} + +// Login handles user login +// POST /api/v1/auth/login +func (h *AuthHandler) Login(c *gin.Context) { + var req LoginRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + // Turnstile 验证 + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { + response.ErrorFrom(c, err) + return + } + + token, user, err := h.authService.Login(c.Request.Context(), req.Email, req.Password) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, AuthResponse{ + AccessToken: token, + TokenType: "Bearer", + User: dto.UserFromService(user), + }) +} + +// GetCurrentUser handles getting current authenticated user +// GET /api/v1/auth/me +func (h *AuthHandler) GetCurrentUser(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + user, err := h.userService.GetByID(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + type UserResponse struct { + *dto.User + RunMode string `json:"run_mode"` + } + + runMode := config.RunModeStandard + if h.cfg != nil { + runMode = h.cfg.RunMode + } + + response.Success(c, UserResponse{User: dto.UserFromService(user), RunMode: runMode}) +} + +// ValidatePromoCodeRequest 验证优惠码请求 +type ValidatePromoCodeRequest struct { + Code string `json:"code" binding:"required"` +} + +// ValidatePromoCodeResponse 验证优惠码响应 +type ValidatePromoCodeResponse struct { + Valid bool `json:"valid"` + BonusAmount float64 `json:"bonus_amount,omitempty"` + ErrorCode string `json:"error_code,omitempty"` + Message string `json:"message,omitempty"` +} + +// ValidatePromoCode 验证优惠码(公开接口,注册前调用) +// POST /api/v1/auth/validate-promo-code +func (h *AuthHandler) ValidatePromoCode(c *gin.Context) { + var req ValidatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + promoCode, err := h.promoService.ValidatePromoCode(c.Request.Context(), req.Code) + if err != nil { + // 根据错误类型返回对应的错误码 + errorCode := "PROMO_CODE_INVALID" + switch err { + case service.ErrPromoCodeNotFound: + errorCode = "PROMO_CODE_NOT_FOUND" + case service.ErrPromoCodeExpired: + errorCode = "PROMO_CODE_EXPIRED" + case service.ErrPromoCodeDisabled: + errorCode = "PROMO_CODE_DISABLED" + case service.ErrPromoCodeMaxUsed: + errorCode = "PROMO_CODE_MAX_USED" + case service.ErrPromoCodeAlreadyUsed: + errorCode = "PROMO_CODE_ALREADY_USED" + } + + response.Success(c, ValidatePromoCodeResponse{ + Valid: false, + ErrorCode: errorCode, + }) + return + } + + if promoCode == nil { + response.Success(c, ValidatePromoCodeResponse{ + Valid: false, + ErrorCode: "PROMO_CODE_INVALID", + }) + return + } + + response.Success(c, ValidatePromoCodeResponse{ + Valid: true, + BonusAmount: promoCode.BonusAmount, + }) +} diff --git a/backend/internal/handler/auth_linuxdo_oauth.go b/backend/internal/handler/auth_linuxdo_oauth.go new file mode 100644 index 00000000..a16c4cc7 --- /dev/null +++ b/backend/internal/handler/auth_linuxdo_oauth.go @@ -0,0 +1,679 @@ +package handler + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "log" + "net/http" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/oauth" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/imroc/req/v3" + "github.com/tidwall/gjson" +) + +const ( + linuxDoOAuthCookiePath = "/api/v1/auth/oauth/linuxdo" + linuxDoOAuthStateCookieName = "linuxdo_oauth_state" + linuxDoOAuthVerifierCookie = "linuxdo_oauth_verifier" + linuxDoOAuthRedirectCookie = "linuxdo_oauth_redirect" + linuxDoOAuthCookieMaxAgeSec = 10 * 60 // 10 minutes + linuxDoOAuthDefaultRedirectTo = "/dashboard" + linuxDoOAuthDefaultFrontendCB = "/auth/linuxdo/callback" + + linuxDoOAuthMaxRedirectLen = 2048 + linuxDoOAuthMaxFragmentValueLen = 512 + linuxDoOAuthMaxSubjectLen = 64 - len("linuxdo-") +) + +type linuxDoTokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` +} + +type linuxDoTokenExchangeError struct { + StatusCode int + ProviderError string + ProviderDescription string + Body string +} + +func (e *linuxDoTokenExchangeError) Error() string { + if e == nil { + return "" + } + parts := []string{fmt.Sprintf("token exchange status=%d", e.StatusCode)} + if strings.TrimSpace(e.ProviderError) != "" { + parts = append(parts, "error="+strings.TrimSpace(e.ProviderError)) + } + if strings.TrimSpace(e.ProviderDescription) != "" { + parts = append(parts, "error_description="+strings.TrimSpace(e.ProviderDescription)) + } + return strings.Join(parts, " ") +} + +// LinuxDoOAuthStart 启动 LinuxDo Connect OAuth 登录流程。 +// GET /api/v1/auth/oauth/linuxdo/start?redirect=/dashboard +func (h *AuthHandler) LinuxDoOAuthStart(c *gin.Context) { + cfg, err := h.getLinuxDoOAuthConfig(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + state, err := oauth.GenerateState() + if err != nil { + response.ErrorFrom(c, infraerrors.InternalServer("OAUTH_STATE_GEN_FAILED", "failed to generate oauth state").WithCause(err)) + return + } + + redirectTo := sanitizeFrontendRedirectPath(c.Query("redirect")) + if redirectTo == "" { + redirectTo = linuxDoOAuthDefaultRedirectTo + } + + secureCookie := isRequestHTTPS(c) + setCookie(c, linuxDoOAuthStateCookieName, encodeCookieValue(state), linuxDoOAuthCookieMaxAgeSec, secureCookie) + setCookie(c, linuxDoOAuthRedirectCookie, encodeCookieValue(redirectTo), linuxDoOAuthCookieMaxAgeSec, secureCookie) + + codeChallenge := "" + if cfg.UsePKCE { + verifier, err := oauth.GenerateCodeVerifier() + if err != nil { + response.ErrorFrom(c, infraerrors.InternalServer("OAUTH_PKCE_GEN_FAILED", "failed to generate pkce verifier").WithCause(err)) + return + } + codeChallenge = oauth.GenerateCodeChallenge(verifier) + setCookie(c, linuxDoOAuthVerifierCookie, encodeCookieValue(verifier), linuxDoOAuthCookieMaxAgeSec, secureCookie) + } + + redirectURI := strings.TrimSpace(cfg.RedirectURL) + if redirectURI == "" { + response.ErrorFrom(c, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url not configured")) + return + } + + authURL, err := buildLinuxDoAuthorizeURL(cfg, state, codeChallenge, redirectURI) + if err != nil { + response.ErrorFrom(c, infraerrors.InternalServer("OAUTH_BUILD_URL_FAILED", "failed to build oauth authorization url").WithCause(err)) + return + } + + c.Redirect(http.StatusFound, authURL) +} + +// LinuxDoOAuthCallback 处理 OAuth 回调:创建/登录用户,然后重定向到前端。 +// GET /api/v1/auth/oauth/linuxdo/callback?code=...&state=... +func (h *AuthHandler) LinuxDoOAuthCallback(c *gin.Context) { + cfg, cfgErr := h.getLinuxDoOAuthConfig(c.Request.Context()) + if cfgErr != nil { + response.ErrorFrom(c, cfgErr) + return + } + + frontendCallback := strings.TrimSpace(cfg.FrontendRedirectURL) + if frontendCallback == "" { + frontendCallback = linuxDoOAuthDefaultFrontendCB + } + + if providerErr := strings.TrimSpace(c.Query("error")); providerErr != "" { + redirectOAuthError(c, frontendCallback, "provider_error", providerErr, c.Query("error_description")) + return + } + + code := strings.TrimSpace(c.Query("code")) + state := strings.TrimSpace(c.Query("state")) + if code == "" || state == "" { + redirectOAuthError(c, frontendCallback, "missing_params", "missing code/state", "") + return + } + + secureCookie := isRequestHTTPS(c) + defer func() { + clearCookie(c, linuxDoOAuthStateCookieName, secureCookie) + clearCookie(c, linuxDoOAuthVerifierCookie, secureCookie) + clearCookie(c, linuxDoOAuthRedirectCookie, secureCookie) + }() + + expectedState, err := readCookieDecoded(c, linuxDoOAuthStateCookieName) + if err != nil || expectedState == "" || state != expectedState { + redirectOAuthError(c, frontendCallback, "invalid_state", "invalid oauth state", "") + return + } + + redirectTo, _ := readCookieDecoded(c, linuxDoOAuthRedirectCookie) + redirectTo = sanitizeFrontendRedirectPath(redirectTo) + if redirectTo == "" { + redirectTo = linuxDoOAuthDefaultRedirectTo + } + + codeVerifier := "" + if cfg.UsePKCE { + codeVerifier, _ = readCookieDecoded(c, linuxDoOAuthVerifierCookie) + if codeVerifier == "" { + redirectOAuthError(c, frontendCallback, "missing_verifier", "missing pkce verifier", "") + return + } + } + + redirectURI := strings.TrimSpace(cfg.RedirectURL) + if redirectURI == "" { + redirectOAuthError(c, frontendCallback, "config_error", "oauth redirect url not configured", "") + return + } + + tokenResp, err := linuxDoExchangeCode(c.Request.Context(), cfg, code, redirectURI, codeVerifier) + if err != nil { + description := "" + var exchangeErr *linuxDoTokenExchangeError + if errors.As(err, &exchangeErr) && exchangeErr != nil { + log.Printf( + "[LinuxDo OAuth] token exchange failed: status=%d provider_error=%q provider_description=%q body=%s", + exchangeErr.StatusCode, + exchangeErr.ProviderError, + exchangeErr.ProviderDescription, + truncateLogValue(exchangeErr.Body, 2048), + ) + description = exchangeErr.Error() + } else { + log.Printf("[LinuxDo OAuth] token exchange failed: %v", err) + description = err.Error() + } + redirectOAuthError(c, frontendCallback, "token_exchange_failed", "failed to exchange oauth code", singleLine(description)) + return + } + + email, username, subject, err := linuxDoFetchUserInfo(c.Request.Context(), cfg, tokenResp) + if err != nil { + log.Printf("[LinuxDo OAuth] userinfo fetch failed: %v", err) + redirectOAuthError(c, frontendCallback, "userinfo_failed", "failed to fetch user info", "") + return + } + + // 安全考虑:不要把第三方返回的 email 直接映射到本地账号(可能与本地邮箱用户冲突导致账号被接管)。 + // 统一使用基于 subject 的稳定合成邮箱来做账号绑定。 + if subject != "" { + email = linuxDoSyntheticEmail(subject) + } + + jwtToken, _, err := h.authService.LoginOrRegisterOAuth(c.Request.Context(), email, username) + if err != nil { + // 避免把内部细节泄露给客户端;给前端保留结构化原因与提示信息即可。 + redirectOAuthError(c, frontendCallback, "login_failed", infraerrors.Reason(err), infraerrors.Message(err)) + return + } + + fragment := url.Values{} + fragment.Set("access_token", jwtToken) + fragment.Set("token_type", "Bearer") + fragment.Set("redirect", redirectTo) + redirectWithFragment(c, frontendCallback, fragment) +} + +func (h *AuthHandler) getLinuxDoOAuthConfig(ctx context.Context) (config.LinuxDoConnectConfig, error) { + if h != nil && h.settingSvc != nil { + return h.settingSvc.GetLinuxDoConnectOAuthConfig(ctx) + } + if h == nil || h.cfg == nil { + return config.LinuxDoConnectConfig{}, infraerrors.ServiceUnavailable("CONFIG_NOT_READY", "config not loaded") + } + if !h.cfg.LinuxDo.Enabled { + return config.LinuxDoConnectConfig{}, infraerrors.NotFound("OAUTH_DISABLED", "oauth login is disabled") + } + return h.cfg.LinuxDo, nil +} + +func linuxDoExchangeCode( + ctx context.Context, + cfg config.LinuxDoConnectConfig, + code string, + redirectURI string, + codeVerifier string, +) (*linuxDoTokenResponse, error) { + client := req.C().SetTimeout(30 * time.Second) + + form := url.Values{} + form.Set("grant_type", "authorization_code") + form.Set("client_id", cfg.ClientID) + form.Set("code", code) + form.Set("redirect_uri", redirectURI) + if cfg.UsePKCE { + form.Set("code_verifier", codeVerifier) + } + + r := client.R(). + SetContext(ctx). + SetHeader("Accept", "application/json") + + switch strings.ToLower(strings.TrimSpace(cfg.TokenAuthMethod)) { + case "", "client_secret_post": + form.Set("client_secret", cfg.ClientSecret) + case "client_secret_basic": + r.SetBasicAuth(cfg.ClientID, cfg.ClientSecret) + case "none": + default: + return nil, fmt.Errorf("unsupported token_auth_method: %s", cfg.TokenAuthMethod) + } + + resp, err := r.SetFormDataFromValues(form).Post(cfg.TokenURL) + if err != nil { + return nil, fmt.Errorf("request token: %w", err) + } + body := strings.TrimSpace(resp.String()) + if !resp.IsSuccessState() { + providerErr, providerDesc := parseOAuthProviderError(body) + return nil, &linuxDoTokenExchangeError{ + StatusCode: resp.StatusCode, + ProviderError: providerErr, + ProviderDescription: providerDesc, + Body: body, + } + } + + tokenResp, ok := parseLinuxDoTokenResponse(body) + if !ok || strings.TrimSpace(tokenResp.AccessToken) == "" { + return nil, &linuxDoTokenExchangeError{ + StatusCode: resp.StatusCode, + Body: body, + } + } + if strings.TrimSpace(tokenResp.TokenType) == "" { + tokenResp.TokenType = "Bearer" + } + return tokenResp, nil +} + +func linuxDoFetchUserInfo( + ctx context.Context, + cfg config.LinuxDoConnectConfig, + token *linuxDoTokenResponse, +) (email string, username string, subject string, err error) { + client := req.C().SetTimeout(30 * time.Second) + authorization, err := buildBearerAuthorization(token.TokenType, token.AccessToken) + if err != nil { + return "", "", "", fmt.Errorf("invalid token for userinfo request: %w", err) + } + + resp, err := client.R(). + SetContext(ctx). + SetHeader("Accept", "application/json"). + SetHeader("Authorization", authorization). + Get(cfg.UserInfoURL) + if err != nil { + return "", "", "", fmt.Errorf("request userinfo: %w", err) + } + if !resp.IsSuccessState() { + return "", "", "", fmt.Errorf("userinfo status=%d", resp.StatusCode) + } + + return linuxDoParseUserInfo(resp.String(), cfg) +} + +func linuxDoParseUserInfo(body string, cfg config.LinuxDoConnectConfig) (email string, username string, subject string, err error) { + email = firstNonEmpty( + getGJSON(body, cfg.UserInfoEmailPath), + getGJSON(body, "email"), + getGJSON(body, "user.email"), + getGJSON(body, "data.email"), + getGJSON(body, "attributes.email"), + ) + username = firstNonEmpty( + getGJSON(body, cfg.UserInfoUsernamePath), + getGJSON(body, "username"), + getGJSON(body, "preferred_username"), + getGJSON(body, "name"), + getGJSON(body, "user.username"), + getGJSON(body, "user.name"), + ) + subject = firstNonEmpty( + getGJSON(body, cfg.UserInfoIDPath), + getGJSON(body, "sub"), + getGJSON(body, "id"), + getGJSON(body, "user_id"), + getGJSON(body, "uid"), + getGJSON(body, "user.id"), + ) + + subject = strings.TrimSpace(subject) + if subject == "" { + return "", "", "", errors.New("userinfo missing id field") + } + if !isSafeLinuxDoSubject(subject) { + return "", "", "", errors.New("userinfo returned invalid id field") + } + + email = strings.TrimSpace(email) + if email == "" { + // LinuxDo Connect 的 userinfo 可能不提供 email。为兼容现有用户模型(email 必填且唯一),使用稳定的合成邮箱。 + email = linuxDoSyntheticEmail(subject) + } + + username = strings.TrimSpace(username) + if username == "" { + username = "linuxdo_" + subject + } + + return email, username, subject, nil +} + +func buildLinuxDoAuthorizeURL(cfg config.LinuxDoConnectConfig, state string, codeChallenge string, redirectURI string) (string, error) { + u, err := url.Parse(cfg.AuthorizeURL) + if err != nil { + return "", fmt.Errorf("parse authorize_url: %w", err) + } + + q := u.Query() + q.Set("response_type", "code") + q.Set("client_id", cfg.ClientID) + q.Set("redirect_uri", redirectURI) + if strings.TrimSpace(cfg.Scopes) != "" { + q.Set("scope", cfg.Scopes) + } + q.Set("state", state) + if cfg.UsePKCE { + q.Set("code_challenge", codeChallenge) + q.Set("code_challenge_method", "S256") + } + + u.RawQuery = q.Encode() + return u.String(), nil +} + +func redirectOAuthError(c *gin.Context, frontendCallback string, code string, message string, description string) { + fragment := url.Values{} + fragment.Set("error", truncateFragmentValue(code)) + if strings.TrimSpace(message) != "" { + fragment.Set("error_message", truncateFragmentValue(message)) + } + if strings.TrimSpace(description) != "" { + fragment.Set("error_description", truncateFragmentValue(description)) + } + redirectWithFragment(c, frontendCallback, fragment) +} + +func redirectWithFragment(c *gin.Context, frontendCallback string, fragment url.Values) { + u, err := url.Parse(frontendCallback) + if err != nil { + // 兜底:尽力跳转到默认页面,避免卡死在回调页。 + c.Redirect(http.StatusFound, linuxDoOAuthDefaultRedirectTo) + return + } + if u.Scheme != "" && !strings.EqualFold(u.Scheme, "http") && !strings.EqualFold(u.Scheme, "https") { + c.Redirect(http.StatusFound, linuxDoOAuthDefaultRedirectTo) + return + } + u.Fragment = fragment.Encode() + c.Header("Cache-Control", "no-store") + c.Header("Pragma", "no-cache") + c.Redirect(http.StatusFound, u.String()) +} + +func firstNonEmpty(values ...string) string { + for _, v := range values { + v = strings.TrimSpace(v) + if v != "" { + return v + } + } + return "" +} + +func parseOAuthProviderError(body string) (providerErr string, providerDesc string) { + body = strings.TrimSpace(body) + if body == "" { + return "", "" + } + + providerErr = firstNonEmpty( + getGJSON(body, "error"), + getGJSON(body, "code"), + getGJSON(body, "error.code"), + ) + providerDesc = firstNonEmpty( + getGJSON(body, "error_description"), + getGJSON(body, "error.message"), + getGJSON(body, "message"), + getGJSON(body, "detail"), + ) + + if providerErr != "" || providerDesc != "" { + return providerErr, providerDesc + } + + values, err := url.ParseQuery(body) + if err != nil { + return "", "" + } + providerErr = firstNonEmpty(values.Get("error"), values.Get("code")) + providerDesc = firstNonEmpty(values.Get("error_description"), values.Get("error_message"), values.Get("message")) + return providerErr, providerDesc +} + +func parseLinuxDoTokenResponse(body string) (*linuxDoTokenResponse, bool) { + body = strings.TrimSpace(body) + if body == "" { + return nil, false + } + + accessToken := strings.TrimSpace(getGJSON(body, "access_token")) + if accessToken != "" { + tokenType := strings.TrimSpace(getGJSON(body, "token_type")) + refreshToken := strings.TrimSpace(getGJSON(body, "refresh_token")) + scope := strings.TrimSpace(getGJSON(body, "scope")) + expiresIn := gjson.Get(body, "expires_in").Int() + return &linuxDoTokenResponse{ + AccessToken: accessToken, + TokenType: tokenType, + ExpiresIn: expiresIn, + RefreshToken: refreshToken, + Scope: scope, + }, true + } + + values, err := url.ParseQuery(body) + if err != nil { + return nil, false + } + accessToken = strings.TrimSpace(values.Get("access_token")) + if accessToken == "" { + return nil, false + } + expiresIn := int64(0) + if raw := strings.TrimSpace(values.Get("expires_in")); raw != "" { + if v, err := strconv.ParseInt(raw, 10, 64); err == nil { + expiresIn = v + } + } + return &linuxDoTokenResponse{ + AccessToken: accessToken, + TokenType: strings.TrimSpace(values.Get("token_type")), + ExpiresIn: expiresIn, + RefreshToken: strings.TrimSpace(values.Get("refresh_token")), + Scope: strings.TrimSpace(values.Get("scope")), + }, true +} + +func getGJSON(body string, path string) string { + path = strings.TrimSpace(path) + if path == "" { + return "" + } + res := gjson.Get(body, path) + if !res.Exists() { + return "" + } + return res.String() +} + +func truncateLogValue(value string, maxLen int) string { + value = strings.TrimSpace(value) + if value == "" || maxLen <= 0 { + return "" + } + if len(value) <= maxLen { + return value + } + value = value[:maxLen] + for !utf8.ValidString(value) { + value = value[:len(value)-1] + } + return value +} + +func singleLine(value string) string { + value = strings.TrimSpace(value) + if value == "" { + return "" + } + return strings.Join(strings.Fields(value), " ") +} + +func sanitizeFrontendRedirectPath(path string) string { + path = strings.TrimSpace(path) + if path == "" { + return "" + } + if len(path) > linuxDoOAuthMaxRedirectLen { + return "" + } + // 只允许同源相对路径(避免开放重定向)。 + if !strings.HasPrefix(path, "/") { + return "" + } + if strings.HasPrefix(path, "//") { + return "" + } + if strings.Contains(path, "://") { + return "" + } + if strings.ContainsAny(path, "\r\n") { + return "" + } + return path +} + +func isRequestHTTPS(c *gin.Context) bool { + if c.Request.TLS != nil { + return true + } + proto := strings.ToLower(strings.TrimSpace(c.GetHeader("X-Forwarded-Proto"))) + return proto == "https" +} + +func encodeCookieValue(value string) string { + return base64.RawURLEncoding.EncodeToString([]byte(value)) +} + +func decodeCookieValue(value string) (string, error) { + raw, err := base64.RawURLEncoding.DecodeString(value) + if err != nil { + return "", err + } + return string(raw), nil +} + +func readCookieDecoded(c *gin.Context, name string) (string, error) { + ck, err := c.Request.Cookie(name) + if err != nil { + return "", err + } + return decodeCookieValue(ck.Value) +} + +func setCookie(c *gin.Context, name string, value string, maxAgeSec int, secure bool) { + http.SetCookie(c.Writer, &http.Cookie{ + Name: name, + Value: value, + Path: linuxDoOAuthCookiePath, + MaxAge: maxAgeSec, + HttpOnly: true, + Secure: secure, + SameSite: http.SameSiteLaxMode, + }) +} + +func clearCookie(c *gin.Context, name string, secure bool) { + http.SetCookie(c.Writer, &http.Cookie{ + Name: name, + Value: "", + Path: linuxDoOAuthCookiePath, + MaxAge: -1, + HttpOnly: true, + Secure: secure, + SameSite: http.SameSiteLaxMode, + }) +} + +func truncateFragmentValue(value string) string { + value = strings.TrimSpace(value) + if value == "" { + return "" + } + if len(value) > linuxDoOAuthMaxFragmentValueLen { + value = value[:linuxDoOAuthMaxFragmentValueLen] + for !utf8.ValidString(value) { + value = value[:len(value)-1] + } + } + return value +} + +func buildBearerAuthorization(tokenType, accessToken string) (string, error) { + tokenType = strings.TrimSpace(tokenType) + if tokenType == "" { + tokenType = "Bearer" + } + if !strings.EqualFold(tokenType, "Bearer") { + return "", fmt.Errorf("unsupported token_type: %s", tokenType) + } + + accessToken = strings.TrimSpace(accessToken) + if accessToken == "" { + return "", errors.New("missing access_token") + } + if strings.ContainsAny(accessToken, " \t\r\n") { + return "", errors.New("access_token contains whitespace") + } + return "Bearer " + accessToken, nil +} + +func isSafeLinuxDoSubject(subject string) bool { + subject = strings.TrimSpace(subject) + if subject == "" || len(subject) > linuxDoOAuthMaxSubjectLen { + return false + } + for _, r := range subject { + switch { + case r >= '0' && r <= '9': + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r == '_' || r == '-': + default: + return false + } + } + return true +} + +func linuxDoSyntheticEmail(subject string) string { + subject = strings.TrimSpace(subject) + if subject == "" { + return "" + } + return "linuxdo-" + subject + service.LinuxDoConnectSyntheticEmailDomain +} diff --git a/backend/internal/handler/auth_linuxdo_oauth_test.go b/backend/internal/handler/auth_linuxdo_oauth_test.go new file mode 100644 index 00000000..ff169c52 --- /dev/null +++ b/backend/internal/handler/auth_linuxdo_oauth_test.go @@ -0,0 +1,108 @@ +package handler + +import ( + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestSanitizeFrontendRedirectPath(t *testing.T) { + require.Equal(t, "/dashboard", sanitizeFrontendRedirectPath("/dashboard")) + require.Equal(t, "/dashboard", sanitizeFrontendRedirectPath(" /dashboard ")) + require.Equal(t, "", sanitizeFrontendRedirectPath("dashboard")) + require.Equal(t, "", sanitizeFrontendRedirectPath("//evil.com")) + require.Equal(t, "", sanitizeFrontendRedirectPath("https://evil.com")) + require.Equal(t, "", sanitizeFrontendRedirectPath("/\nfoo")) + + long := "/" + strings.Repeat("a", linuxDoOAuthMaxRedirectLen) + require.Equal(t, "", sanitizeFrontendRedirectPath(long)) +} + +func TestBuildBearerAuthorization(t *testing.T) { + auth, err := buildBearerAuthorization("", "token123") + require.NoError(t, err) + require.Equal(t, "Bearer token123", auth) + + auth, err = buildBearerAuthorization("bearer", "token123") + require.NoError(t, err) + require.Equal(t, "Bearer token123", auth) + + _, err = buildBearerAuthorization("MAC", "token123") + require.Error(t, err) + + _, err = buildBearerAuthorization("Bearer", "token 123") + require.Error(t, err) +} + +func TestLinuxDoParseUserInfoParsesIDAndUsername(t *testing.T) { + cfg := config.LinuxDoConnectConfig{ + UserInfoURL: "https://connect.linux.do/api/user", + } + + email, username, subject, err := linuxDoParseUserInfo(`{"id":123,"username":"alice"}`, cfg) + require.NoError(t, err) + require.Equal(t, "123", subject) + require.Equal(t, "alice", username) + require.Equal(t, "linuxdo-123@linuxdo-connect.invalid", email) +} + +func TestLinuxDoParseUserInfoDefaultsUsername(t *testing.T) { + cfg := config.LinuxDoConnectConfig{ + UserInfoURL: "https://connect.linux.do/api/user", + } + + email, username, subject, err := linuxDoParseUserInfo(`{"id":"123"}`, cfg) + require.NoError(t, err) + require.Equal(t, "123", subject) + require.Equal(t, "linuxdo_123", username) + require.Equal(t, "linuxdo-123@linuxdo-connect.invalid", email) +} + +func TestLinuxDoParseUserInfoRejectsUnsafeSubject(t *testing.T) { + cfg := config.LinuxDoConnectConfig{ + UserInfoURL: "https://connect.linux.do/api/user", + } + + _, _, _, err := linuxDoParseUserInfo(`{"id":"123@456"}`, cfg) + require.Error(t, err) + + tooLong := strings.Repeat("a", linuxDoOAuthMaxSubjectLen+1) + _, _, _, err = linuxDoParseUserInfo(`{"id":"`+tooLong+`"}`, cfg) + require.Error(t, err) +} + +func TestParseOAuthProviderErrorJSON(t *testing.T) { + code, desc := parseOAuthProviderError(`{"error":"invalid_client","error_description":"bad secret"}`) + require.Equal(t, "invalid_client", code) + require.Equal(t, "bad secret", desc) +} + +func TestParseOAuthProviderErrorForm(t *testing.T) { + code, desc := parseOAuthProviderError("error=invalid_request&error_description=Missing+code_verifier") + require.Equal(t, "invalid_request", code) + require.Equal(t, "Missing code_verifier", desc) +} + +func TestParseLinuxDoTokenResponseJSON(t *testing.T) { + token, ok := parseLinuxDoTokenResponse(`{"access_token":"t1","token_type":"Bearer","expires_in":3600,"scope":"user"}`) + require.True(t, ok) + require.Equal(t, "t1", token.AccessToken) + require.Equal(t, "Bearer", token.TokenType) + require.Equal(t, int64(3600), token.ExpiresIn) + require.Equal(t, "user", token.Scope) +} + +func TestParseLinuxDoTokenResponseForm(t *testing.T) { + token, ok := parseLinuxDoTokenResponse("access_token=t2&token_type=bearer&expires_in=60") + require.True(t, ok) + require.Equal(t, "t2", token.AccessToken) + require.Equal(t, "bearer", token.TokenType) + require.Equal(t, int64(60), token.ExpiresIn) +} + +func TestSingleLineStripsWhitespace(t *testing.T) { + require.Equal(t, "hello world", singleLine("hello\r\nworld")) + require.Equal(t, "", singleLine("\n\t\r")) +} diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go new file mode 100644 index 00000000..371f4f52 --- /dev/null +++ b/backend/internal/handler/dto/mappers.go @@ -0,0 +1,427 @@ +// Package dto provides data transfer objects for HTTP handlers. +package dto + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func UserFromServiceShallow(u *service.User) *User { + if u == nil { + return nil + } + return &User{ + ID: u.ID, + Email: u.Email, + Username: u.Username, + Notes: u.Notes, + Role: u.Role, + Balance: u.Balance, + Concurrency: u.Concurrency, + Status: u.Status, + AllowedGroups: u.AllowedGroups, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, + } +} + +func UserFromService(u *service.User) *User { + if u == nil { + return nil + } + out := UserFromServiceShallow(u) + if len(u.APIKeys) > 0 { + out.APIKeys = make([]APIKey, 0, len(u.APIKeys)) + for i := range u.APIKeys { + k := u.APIKeys[i] + out.APIKeys = append(out.APIKeys, *APIKeyFromService(&k)) + } + } + if len(u.Subscriptions) > 0 { + out.Subscriptions = make([]UserSubscription, 0, len(u.Subscriptions)) + for i := range u.Subscriptions { + s := u.Subscriptions[i] + out.Subscriptions = append(out.Subscriptions, *UserSubscriptionFromService(&s)) + } + } + return out +} + +func APIKeyFromService(k *service.APIKey) *APIKey { + if k == nil { + return nil + } + return &APIKey{ + ID: k.ID, + UserID: k.UserID, + Key: k.Key, + Name: k.Name, + GroupID: k.GroupID, + Status: k.Status, + IPWhitelist: k.IPWhitelist, + IPBlacklist: k.IPBlacklist, + CreatedAt: k.CreatedAt, + UpdatedAt: k.UpdatedAt, + User: UserFromServiceShallow(k.User), + Group: GroupFromServiceShallow(k.Group), + } +} + +func GroupFromServiceShallow(g *service.Group) *Group { + if g == nil { + return nil + } + return &Group{ + ID: g.ID, + Name: g.Name, + Description: g.Description, + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUSD, + WeeklyLimitUSD: g.WeeklyLimitUSD, + MonthlyLimitUSD: g.MonthlyLimitUSD, + ImagePrice1K: g.ImagePrice1K, + ImagePrice2K: g.ImagePrice2K, + ImagePrice4K: g.ImagePrice4K, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + AccountCount: g.AccountCount, + } +} + +func GroupFromService(g *service.Group) *Group { + if g == nil { + return nil + } + out := GroupFromServiceShallow(g) + if len(g.AccountGroups) > 0 { + out.AccountGroups = make([]AccountGroup, 0, len(g.AccountGroups)) + for i := range g.AccountGroups { + ag := g.AccountGroups[i] + out.AccountGroups = append(out.AccountGroups, *AccountGroupFromService(&ag)) + } + } + return out +} + +func AccountFromServiceShallow(a *service.Account) *Account { + if a == nil { + return nil + } + return &Account{ + ID: a.ID, + Name: a.Name, + Notes: a.Notes, + Platform: a.Platform, + Type: a.Type, + Credentials: a.Credentials, + Extra: a.Extra, + ProxyID: a.ProxyID, + Concurrency: a.Concurrency, + Priority: a.Priority, + RateMultiplier: a.BillingRateMultiplier(), + Status: a.Status, + ErrorMessage: a.ErrorMessage, + LastUsedAt: a.LastUsedAt, + ExpiresAt: timeToUnixSeconds(a.ExpiresAt), + AutoPauseOnExpired: a.AutoPauseOnExpired, + CreatedAt: a.CreatedAt, + UpdatedAt: a.UpdatedAt, + Schedulable: a.Schedulable, + RateLimitedAt: a.RateLimitedAt, + RateLimitResetAt: a.RateLimitResetAt, + OverloadUntil: a.OverloadUntil, + TempUnschedulableUntil: a.TempUnschedulableUntil, + TempUnschedulableReason: a.TempUnschedulableReason, + SessionWindowStart: a.SessionWindowStart, + SessionWindowEnd: a.SessionWindowEnd, + SessionWindowStatus: a.SessionWindowStatus, + GroupIDs: a.GroupIDs, + } +} + +func AccountFromService(a *service.Account) *Account { + if a == nil { + return nil + } + out := AccountFromServiceShallow(a) + out.Proxy = ProxyFromService(a.Proxy) + if len(a.AccountGroups) > 0 { + out.AccountGroups = make([]AccountGroup, 0, len(a.AccountGroups)) + for i := range a.AccountGroups { + ag := a.AccountGroups[i] + out.AccountGroups = append(out.AccountGroups, *AccountGroupFromService(&ag)) + } + } + if len(a.Groups) > 0 { + out.Groups = make([]*Group, 0, len(a.Groups)) + for _, g := range a.Groups { + out.Groups = append(out.Groups, GroupFromServiceShallow(g)) + } + } + return out +} + +func timeToUnixSeconds(value *time.Time) *int64 { + if value == nil { + return nil + } + ts := value.Unix() + return &ts +} + +func AccountGroupFromService(ag *service.AccountGroup) *AccountGroup { + if ag == nil { + return nil + } + return &AccountGroup{ + AccountID: ag.AccountID, + GroupID: ag.GroupID, + Priority: ag.Priority, + CreatedAt: ag.CreatedAt, + Account: AccountFromServiceShallow(ag.Account), + Group: GroupFromServiceShallow(ag.Group), + } +} + +func ProxyFromService(p *service.Proxy) *Proxy { + if p == nil { + return nil + } + return &Proxy{ + ID: p.ID, + Name: p.Name, + Protocol: p.Protocol, + Host: p.Host, + Port: p.Port, + Username: p.Username, + Password: p.Password, + Status: p.Status, + CreatedAt: p.CreatedAt, + UpdatedAt: p.UpdatedAt, + } +} + +func ProxyWithAccountCountFromService(p *service.ProxyWithAccountCount) *ProxyWithAccountCount { + if p == nil { + return nil + } + return &ProxyWithAccountCount{ + Proxy: *ProxyFromService(&p.Proxy), + AccountCount: p.AccountCount, + LatencyMs: p.LatencyMs, + LatencyStatus: p.LatencyStatus, + LatencyMessage: p.LatencyMessage, + IPAddress: p.IPAddress, + Country: p.Country, + CountryCode: p.CountryCode, + Region: p.Region, + City: p.City, + } +} + +func ProxyAccountSummaryFromService(a *service.ProxyAccountSummary) *ProxyAccountSummary { + if a == nil { + return nil + } + return &ProxyAccountSummary{ + ID: a.ID, + Name: a.Name, + Platform: a.Platform, + Type: a.Type, + Notes: a.Notes, + } +} + +func RedeemCodeFromService(rc *service.RedeemCode) *RedeemCode { + if rc == nil { + return nil + } + return &RedeemCode{ + ID: rc.ID, + Code: rc.Code, + Type: rc.Type, + Value: rc.Value, + Status: rc.Status, + UsedBy: rc.UsedBy, + UsedAt: rc.UsedAt, + Notes: rc.Notes, + CreatedAt: rc.CreatedAt, + GroupID: rc.GroupID, + ValidityDays: rc.ValidityDays, + User: UserFromServiceShallow(rc.User), + Group: GroupFromServiceShallow(rc.Group), + } +} + +// AccountSummaryFromService returns a minimal AccountSummary for usage log display. +// Only includes ID and Name - no sensitive fields like Credentials, Proxy, etc. +func AccountSummaryFromService(a *service.Account) *AccountSummary { + if a == nil { + return nil + } + return &AccountSummary{ + ID: a.ID, + Name: a.Name, + } +} + +// usageLogFromServiceBase is a helper that converts service UsageLog to DTO. +// The account parameter allows caller to control what Account info is included. +// The includeIPAddress parameter controls whether to include the IP address (admin-only). +func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, includeIPAddress bool) *UsageLog { + if l == nil { + return nil + } + result := &UsageLog{ + ID: l.ID, + UserID: l.UserID, + APIKeyID: l.APIKeyID, + AccountID: l.AccountID, + RequestID: l.RequestID, + Model: l.Model, + GroupID: l.GroupID, + SubscriptionID: l.SubscriptionID, + InputTokens: l.InputTokens, + OutputTokens: l.OutputTokens, + CacheCreationTokens: l.CacheCreationTokens, + CacheReadTokens: l.CacheReadTokens, + CacheCreation5mTokens: l.CacheCreation5mTokens, + CacheCreation1hTokens: l.CacheCreation1hTokens, + InputCost: l.InputCost, + OutputCost: l.OutputCost, + CacheCreationCost: l.CacheCreationCost, + CacheReadCost: l.CacheReadCost, + TotalCost: l.TotalCost, + ActualCost: l.ActualCost, + RateMultiplier: l.RateMultiplier, + AccountRateMultiplier: l.AccountRateMultiplier, + BillingType: l.BillingType, + Stream: l.Stream, + DurationMs: l.DurationMs, + FirstTokenMs: l.FirstTokenMs, + ImageCount: l.ImageCount, + ImageSize: l.ImageSize, + UserAgent: l.UserAgent, + CreatedAt: l.CreatedAt, + User: UserFromServiceShallow(l.User), + APIKey: APIKeyFromService(l.APIKey), + Account: account, + Group: GroupFromServiceShallow(l.Group), + Subscription: UserSubscriptionFromService(l.Subscription), + } + // IP 地址仅对管理员可见 + if includeIPAddress { + result.IPAddress = l.IPAddress + } + return result +} + +// UsageLogFromService converts a service UsageLog to DTO for regular users. +// It excludes Account details and IP address - users should not see these. +func UsageLogFromService(l *service.UsageLog) *UsageLog { + return usageLogFromServiceBase(l, nil, false) +} + +// UsageLogFromServiceAdmin converts a service UsageLog to DTO for admin users. +// It includes minimal Account info (ID, Name only) and IP address. +func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog { + if l == nil { + return nil + } + return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account), true) +} + +func SettingFromService(s *service.Setting) *Setting { + if s == nil { + return nil + } + return &Setting{ + ID: s.ID, + Key: s.Key, + Value: s.Value, + UpdatedAt: s.UpdatedAt, + } +} + +func UserSubscriptionFromService(sub *service.UserSubscription) *UserSubscription { + if sub == nil { + return nil + } + return &UserSubscription{ + ID: sub.ID, + UserID: sub.UserID, + GroupID: sub.GroupID, + StartsAt: sub.StartsAt, + ExpiresAt: sub.ExpiresAt, + Status: sub.Status, + DailyWindowStart: sub.DailyWindowStart, + WeeklyWindowStart: sub.WeeklyWindowStart, + MonthlyWindowStart: sub.MonthlyWindowStart, + DailyUsageUSD: sub.DailyUsageUSD, + WeeklyUsageUSD: sub.WeeklyUsageUSD, + MonthlyUsageUSD: sub.MonthlyUsageUSD, + AssignedBy: sub.AssignedBy, + AssignedAt: sub.AssignedAt, + Notes: sub.Notes, + CreatedAt: sub.CreatedAt, + UpdatedAt: sub.UpdatedAt, + User: UserFromServiceShallow(sub.User), + Group: GroupFromServiceShallow(sub.Group), + AssignedByUser: UserFromServiceShallow(sub.AssignedByUser), + } +} + +func BulkAssignResultFromService(r *service.BulkAssignResult) *BulkAssignResult { + if r == nil { + return nil + } + subs := make([]UserSubscription, 0, len(r.Subscriptions)) + for i := range r.Subscriptions { + subs = append(subs, *UserSubscriptionFromService(&r.Subscriptions[i])) + } + return &BulkAssignResult{ + SuccessCount: r.SuccessCount, + FailedCount: r.FailedCount, + Subscriptions: subs, + Errors: r.Errors, + } +} + +func PromoCodeFromService(pc *service.PromoCode) *PromoCode { + if pc == nil { + return nil + } + return &PromoCode{ + ID: pc.ID, + Code: pc.Code, + BonusAmount: pc.BonusAmount, + MaxUses: pc.MaxUses, + UsedCount: pc.UsedCount, + Status: pc.Status, + ExpiresAt: pc.ExpiresAt, + Notes: pc.Notes, + CreatedAt: pc.CreatedAt, + UpdatedAt: pc.UpdatedAt, + } +} + +func PromoCodeUsageFromService(u *service.PromoCodeUsage) *PromoCodeUsage { + if u == nil { + return nil + } + return &PromoCodeUsage{ + ID: u.ID, + PromoCodeID: u.PromoCodeID, + UserID: u.UserID, + BonusAmount: u.BonusAmount, + UsedAt: u.UsedAt, + User: UserFromServiceShallow(u.User), + } +} diff --git a/backend/internal/handler/dto/settings.go b/backend/internal/handler/dto/settings.go new file mode 100644 index 00000000..81206def --- /dev/null +++ b/backend/internal/handler/dto/settings.go @@ -0,0 +1,77 @@ +package dto + +// SystemSettings represents the admin settings API response payload. +type SystemSettings struct { + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + + SMTPHost string `json:"smtp_host"` + SMTPPort int `json:"smtp_port"` + SMTPUsername string `json:"smtp_username"` + SMTPPasswordConfigured bool `json:"smtp_password_configured"` + SMTPFrom string `json:"smtp_from_email"` + SMTPFromName string `json:"smtp_from_name"` + SMTPUseTLS bool `json:"smtp_use_tls"` + + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key"` + TurnstileSecretKeyConfigured bool `json:"turnstile_secret_key_configured"` + + LinuxDoConnectEnabled bool `json:"linuxdo_connect_enabled"` + LinuxDoConnectClientID string `json:"linuxdo_connect_client_id"` + LinuxDoConnectClientSecretConfigured bool `json:"linuxdo_connect_client_secret_configured"` + LinuxDoConnectRedirectURL string `json:"linuxdo_connect_redirect_url"` + + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + + DefaultConcurrency int `json:"default_concurrency"` + DefaultBalance float64 `json:"default_balance"` + + // Model fallback configuration + EnableModelFallback bool `json:"enable_model_fallback"` + FallbackModelAnthropic string `json:"fallback_model_anthropic"` + FallbackModelOpenAI string `json:"fallback_model_openai"` + FallbackModelGemini string `json:"fallback_model_gemini"` + FallbackModelAntigravity string `json:"fallback_model_antigravity"` + + // Identity patch configuration (Claude -> Gemini) + EnableIdentityPatch bool `json:"enable_identity_patch"` + IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled bool `json:"ops_monitoring_enabled"` + OpsRealtimeMonitoringEnabled bool `json:"ops_realtime_monitoring_enabled"` + OpsQueryModeDefault string `json:"ops_query_mode_default"` + OpsMetricsIntervalSeconds int `json:"ops_metrics_interval_seconds"` +} + +type PublicSettings struct { + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo"` + SiteSubtitle string `json:"site_subtitle"` + APIBaseURL string `json:"api_base_url"` + ContactInfo string `json:"contact_info"` + DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version"` +} + +// StreamTimeoutSettings 流超时处理配置 DTO +type StreamTimeoutSettings struct { + Enabled bool `json:"enabled"` + Action string `json:"action"` + TempUnschedMinutes int `json:"temp_unsched_minutes"` + ThresholdCount int `json:"threshold_count"` + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go new file mode 100644 index 00000000..0cbc809b --- /dev/null +++ b/backend/internal/handler/dto/types.go @@ -0,0 +1,295 @@ +package dto + +import "time" + +type User struct { + ID int64 `json:"id"` + Email string `json:"email"` + Username string `json:"username"` + Notes string `json:"notes"` + Role string `json:"role"` + Balance float64 `json:"balance"` + Concurrency int `json:"concurrency"` + Status string `json:"status"` + AllowedGroups []int64 `json:"allowed_groups"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + APIKeys []APIKey `json:"api_keys,omitempty"` + Subscriptions []UserSubscription `json:"subscriptions,omitempty"` +} + +type APIKey struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + Key string `json:"key"` + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + Status string `json:"status"` + IPWhitelist []string `json:"ip_whitelist"` + IPBlacklist []string `json:"ip_blacklist"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + User *User `json:"user,omitempty"` + Group *Group `json:"group,omitempty"` +} + +type Group struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + RateMultiplier float64 `json:"rate_multiplier"` + IsExclusive bool `json:"is_exclusive"` + Status string `json:"status"` + + SubscriptionType string `json:"subscription_type"` + DailyLimitUSD *float64 `json:"daily_limit_usd"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd"` + + // 图片生成计费配置(仅 antigravity 平台使用) + ImagePrice1K *float64 `json:"image_price_1k"` + ImagePrice2K *float64 `json:"image_price_2k"` + ImagePrice4K *float64 `json:"image_price_4k"` + + // Claude Code 客户端限制 + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + AccountGroups []AccountGroup `json:"account_groups,omitempty"` + AccountCount int64 `json:"account_count,omitempty"` +} + +type Account struct { + ID int64 `json:"id"` + Name string `json:"name"` + Notes *string `json:"notes"` + Platform string `json:"platform"` + Type string `json:"type"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` + ProxyID *int64 `json:"proxy_id"` + Concurrency int `json:"concurrency"` + Priority int `json:"priority"` + RateMultiplier float64 `json:"rate_multiplier"` + Status string `json:"status"` + ErrorMessage string `json:"error_message"` + LastUsedAt *time.Time `json:"last_used_at"` + ExpiresAt *int64 `json:"expires_at"` + AutoPauseOnExpired bool `json:"auto_pause_on_expired"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + Schedulable bool `json:"schedulable"` + + RateLimitedAt *time.Time `json:"rate_limited_at"` + RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` + OverloadUntil *time.Time `json:"overload_until"` + + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until"` + TempUnschedulableReason string `json:"temp_unschedulable_reason"` + + SessionWindowStart *time.Time `json:"session_window_start"` + SessionWindowEnd *time.Time `json:"session_window_end"` + SessionWindowStatus string `json:"session_window_status"` + + Proxy *Proxy `json:"proxy,omitempty"` + AccountGroups []AccountGroup `json:"account_groups,omitempty"` + + GroupIDs []int64 `json:"group_ids,omitempty"` + Groups []*Group `json:"groups,omitempty"` +} + +type AccountGroup struct { + AccountID int64 `json:"account_id"` + GroupID int64 `json:"group_id"` + Priority int `json:"priority"` + CreatedAt time.Time `json:"created_at"` + + Account *Account `json:"account,omitempty"` + Group *Group `json:"group,omitempty"` +} + +type Proxy struct { + ID int64 `json:"id"` + Name string `json:"name"` + Protocol string `json:"protocol"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"-"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type ProxyWithAccountCount struct { + Proxy + AccountCount int64 `json:"account_count"` + LatencyMs *int64 `json:"latency_ms,omitempty"` + LatencyStatus string `json:"latency_status,omitempty"` + LatencyMessage string `json:"latency_message,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` + Region string `json:"region,omitempty"` + City string `json:"city,omitempty"` +} + +type ProxyAccountSummary struct { + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Type string `json:"type"` + Notes *string `json:"notes,omitempty"` +} + +type RedeemCode struct { + ID int64 `json:"id"` + Code string `json:"code"` + Type string `json:"type"` + Value float64 `json:"value"` + Status string `json:"status"` + UsedBy *int64 `json:"used_by"` + UsedAt *time.Time `json:"used_at"` + Notes string `json:"notes"` + CreatedAt time.Time `json:"created_at"` + + GroupID *int64 `json:"group_id"` + ValidityDays int `json:"validity_days"` + + User *User `json:"user,omitempty"` + Group *Group `json:"group,omitempty"` +} + +type UsageLog struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + APIKeyID int64 `json:"api_key_id"` + AccountID int64 `json:"account_id"` + RequestID string `json:"request_id"` + Model string `json:"model"` + + GroupID *int64 `json:"group_id"` + SubscriptionID *int64 `json:"subscription_id"` + + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationTokens int `json:"cache_creation_tokens"` + CacheReadTokens int `json:"cache_read_tokens"` + + CacheCreation5mTokens int `json:"cache_creation_5m_tokens"` + CacheCreation1hTokens int `json:"cache_creation_1h_tokens"` + + InputCost float64 `json:"input_cost"` + OutputCost float64 `json:"output_cost"` + CacheCreationCost float64 `json:"cache_creation_cost"` + CacheReadCost float64 `json:"cache_read_cost"` + TotalCost float64 `json:"total_cost"` + ActualCost float64 `json:"actual_cost"` + RateMultiplier float64 `json:"rate_multiplier"` + AccountRateMultiplier *float64 `json:"account_rate_multiplier"` + + BillingType int8 `json:"billing_type"` + Stream bool `json:"stream"` + DurationMs *int `json:"duration_ms"` + FirstTokenMs *int `json:"first_token_ms"` + + // 图片生成字段 + ImageCount int `json:"image_count"` + ImageSize *string `json:"image_size"` + + // User-Agent + UserAgent *string `json:"user_agent"` + + // IP 地址(仅管理员可见) + IPAddress *string `json:"ip_address,omitempty"` + + CreatedAt time.Time `json:"created_at"` + + User *User `json:"user,omitempty"` + APIKey *APIKey `json:"api_key,omitempty"` + Account *AccountSummary `json:"account,omitempty"` // Use minimal AccountSummary to prevent data leakage + Group *Group `json:"group,omitempty"` + Subscription *UserSubscription `json:"subscription,omitempty"` +} + +// AccountSummary is a minimal account info for usage log display. +// It intentionally excludes sensitive fields like Credentials, Proxy, etc. +type AccountSummary struct { + ID int64 `json:"id"` + Name string `json:"name"` +} + +type Setting struct { + ID int64 `json:"id"` + Key string `json:"key"` + Value string `json:"value"` + UpdatedAt time.Time `json:"updated_at"` +} + +type UserSubscription struct { + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + GroupID int64 `json:"group_id"` + + StartsAt time.Time `json:"starts_at"` + ExpiresAt time.Time `json:"expires_at"` + Status string `json:"status"` + + DailyWindowStart *time.Time `json:"daily_window_start"` + WeeklyWindowStart *time.Time `json:"weekly_window_start"` + MonthlyWindowStart *time.Time `json:"monthly_window_start"` + + DailyUsageUSD float64 `json:"daily_usage_usd"` + WeeklyUsageUSD float64 `json:"weekly_usage_usd"` + MonthlyUsageUSD float64 `json:"monthly_usage_usd"` + + AssignedBy *int64 `json:"assigned_by"` + AssignedAt time.Time `json:"assigned_at"` + Notes string `json:"notes"` + + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + + User *User `json:"user,omitempty"` + Group *Group `json:"group,omitempty"` + AssignedByUser *User `json:"assigned_by_user,omitempty"` +} + +type BulkAssignResult struct { + SuccessCount int `json:"success_count"` + FailedCount int `json:"failed_count"` + Subscriptions []UserSubscription `json:"subscriptions"` + Errors []string `json:"errors"` +} + +// PromoCode 注册优惠码 +type PromoCode struct { + ID int64 `json:"id"` + Code string `json:"code"` + BonusAmount float64 `json:"bonus_amount"` + MaxUses int `json:"max_uses"` + UsedCount int `json:"used_count"` + Status string `json:"status"` + ExpiresAt *time.Time `json:"expires_at"` + Notes string `json:"notes"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// PromoCodeUsage 优惠码使用记录 +type PromoCodeUsage struct { + ID int64 `json:"id"` + PromoCodeID int64 `json:"promo_code_id"` + UserID int64 `json:"user_id"` + BonusAmount float64 `json:"bonus_amount"` + UsedAt time.Time `json:"used_at"` + + User *User `json:"user,omitempty"` +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go new file mode 100644 index 00000000..b60618a8 --- /dev/null +++ b/backend/internal/handler/gateway_handler.go @@ -0,0 +1,874 @@ +package handler + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// GatewayHandler handles API gateway requests +type GatewayHandler struct { + gatewayService *service.GatewayService + geminiCompatService *service.GeminiMessagesCompatService + antigravityGatewayService *service.AntigravityGatewayService + userService *service.UserService + billingCacheService *service.BillingCacheService + concurrencyHelper *ConcurrencyHelper +} + +// NewGatewayHandler creates a new GatewayHandler +func NewGatewayHandler( + gatewayService *service.GatewayService, + geminiCompatService *service.GeminiMessagesCompatService, + antigravityGatewayService *service.AntigravityGatewayService, + userService *service.UserService, + concurrencyService *service.ConcurrencyService, + billingCacheService *service.BillingCacheService, + cfg *config.Config, +) *GatewayHandler { + pingInterval := time.Duration(0) + if cfg != nil { + pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second + } + return &GatewayHandler{ + gatewayService: gatewayService, + geminiCompatService: geminiCompatService, + antigravityGatewayService: antigravityGatewayService, + userService: userService, + billingCacheService: billingCacheService, + concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatClaude, pingInterval), + } +} + +// Messages handles Claude API compatible messages endpoint +// POST /v1/messages +func (h *GatewayHandler) Messages(c *gin.Context) { + // 从context获取apiKey和user(ApiKeyAuth中间件已设置) + apiKey, ok := middleware2.GetAPIKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found") + return + } + + // 读取请求体 + body, err := io.ReadAll(c.Request.Body) + if err != nil { + if maxErr, ok := extractMaxBytesError(err); ok { + h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) + return + } + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to read request body") + return + } + + if len(body) == 0 { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Request body is empty") + return + } + + // 检查是否为 Claude Code 客户端,设置到 context 中 + SetClaudeCodeClientContext(c, body) + + setOpsRequestContext(c, "", false, body) + + parsedReq, err := service.ParseGatewayRequest(body) + if err != nil { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") + return + } + reqModel := parsedReq.Model + reqStream := parsedReq.Stream + + setOpsRequestContext(c, reqModel, reqStream, body) + + // 验证 model 必填 + if reqModel == "" { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required") + return + } + + // Track if we've started streaming (for error handling) + streamStarted := false + + // 获取订阅信息(可能为nil)- 提前获取用于后续检查 + subscription, _ := middleware2.GetSubscriptionFromContext(c) + + // 0. 检查wait队列是否已满 + maxWait := service.CalculateMaxWait(subject.Concurrency) + canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait) + waitCounted := false + if err != nil { + log.Printf("Increment wait count failed: %v", err) + // On error, allow request to proceed + } else if !canWait { + h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") + return + } + if err == nil && canWait { + waitCounted = true + } + // Ensure we decrement if we exit before acquiring the user slot. + defer func() { + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + } + }() + + // 1. 首先获取用户并发槽位 + userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted) + if err != nil { + log.Printf("User concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "user", streamStarted) + return + } + // User slot acquired: no longer waiting in the queue. + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + waitCounted = false + } + // 在请求结束或 Context 取消时确保释放槽位,避免客户端断开造成泄漏 + userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) + if userReleaseFunc != nil { + defer userReleaseFunc() + } + + // 2. 【新增】Wait后二次检查余额/订阅 + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), apiKey.User, apiKey, apiKey.Group, subscription); err != nil { + log.Printf("Billing eligibility check failed after wait: %v", err) + status, code, message := billingErrorDetails(err) + h.handleStreamingAwareError(c, status, code, message, streamStarted) + return + } + + // 计算粘性会话hash + sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + + // 获取平台:优先使用强制平台(/antigravity 路由,中间件已设置 request.Context),否则使用分组平台 + platform := "" + if forcePlatform, ok := middleware2.GetForcePlatformFromContext(c); ok { + platform = forcePlatform + } else if apiKey.Group != nil { + platform = apiKey.Group.Platform + } + sessionKey := sessionHash + if platform == service.PlatformGemini && sessionHash != "" { + sessionKey = "gemini:" + sessionHash + } + + if platform == service.PlatformGemini { + const maxAccountSwitches = 3 + switchCount := 0 + failedAccountIDs := make(map[int64]struct{}) + lastFailoverStatus := 0 + + for { + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + if err != nil { + if len(failedAccountIDs) == 0 { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) + return + } + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + account := selection.Account + setOpsSelectedAccount(c, account.ID) + + // 检查预热请求拦截(在账号选择后、转发前检查) + if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + if reqStream { + sendMockWarmupStream(c, reqModel) + } else { + sendMockWarmupResponse(c, reqModel) + } + return + } + + // 3. 获取账号并发槽位 + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + accountWaitCounted := false + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + if err == nil && canWait { + accountWaitCounted = true + } + // Ensure the wait counter is decremented if we exit before acquiring the slot. + defer func() { + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + }() + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + // Slot acquired: no longer waiting in queue. + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } + } + // 账号槽位/等待计数需要在超时或断开时安全回收 + accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) + + // 转发请求 - 根据账号平台分流 + var result *service.ForwardResult + if account.Platform == service.PlatformAntigravity { + result, err = h.antigravityGatewayService.ForwardGemini(c.Request.Context(), c, account, reqModel, "generateContent", reqStream, body) + } else { + result, err = h.geminiCompatService.Forward(c.Request.Context(), c, account, body) + } + if accountReleaseFunc != nil { + accountReleaseFunc() + } + if err != nil { + var failoverErr *service.UpstreamFailoverError + if errors.As(err, &failoverErr) { + failedAccountIDs[account.ID] = struct{}{} + lastFailoverStatus = failoverErr.StatusCode + if switchCount >= maxAccountSwitches { + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + switchCount++ + log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + continue + } + // 错误响应已在Forward中处理,这里只记录日志 + log.Printf("Forward request failed: %v", err) + return + } + + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + + // 异步记录使用量(subscription已在函数开头获取) + go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: usedAccount, + Subscription: subscription, + UserAgent: ua, + IPAddress: clientIP, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }(result, account, userAgent, clientIP) + return + } + } + + const maxAccountSwitches = 10 + switchCount := 0 + failedAccountIDs := make(map[int64]struct{}) + lastFailoverStatus := 0 + + for { + // 选择支持该模型的账号 + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + if err != nil { + if len(failedAccountIDs) == 0 { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) + return + } + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + account := selection.Account + setOpsSelectedAccount(c, account.ID) + + // 检查预热请求拦截(在账号选择后、转发前检查) + if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { + if selection.Acquired && selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } + if reqStream { + sendMockWarmupStream(c, reqModel) + } else { + sendMockWarmupResponse(c, reqModel) + } + return + } + + // 3. 获取账号并发槽位 + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + accountWaitCounted := false + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + }() + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } + } + // 账号槽位/等待计数需要在超时或断开时安全回收 + accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) + + // 转发请求 - 根据账号平台分流 + var result *service.ForwardResult + if account.Platform == service.PlatformAntigravity { + result, err = h.antigravityGatewayService.Forward(c.Request.Context(), c, account, body) + } else { + result, err = h.gatewayService.Forward(c.Request.Context(), c, account, parsedReq) + } + if accountReleaseFunc != nil { + accountReleaseFunc() + } + if err != nil { + var failoverErr *service.UpstreamFailoverError + if errors.As(err, &failoverErr) { + failedAccountIDs[account.ID] = struct{}{} + lastFailoverStatus = failoverErr.StatusCode + if switchCount >= maxAccountSwitches { + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + switchCount++ + log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + continue + } + // 错误响应已在Forward中处理,这里只记录日志 + log.Printf("Account %d: Forward request failed: %v", account.ID, err) + return + } + + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + + // 异步记录使用量(subscription已在函数开头获取) + go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: usedAccount, + Subscription: subscription, + UserAgent: ua, + IPAddress: clientIP, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }(result, account, userAgent, clientIP) + return + } +} + +// Models handles listing available models +// GET /v1/models +// Returns models based on account configurations (model_mapping whitelist) +// Falls back to default models if no whitelist is configured +func (h *GatewayHandler) Models(c *gin.Context) { + apiKey, _ := middleware2.GetAPIKeyFromContext(c) + + var groupID *int64 + var platform string + + if apiKey != nil && apiKey.Group != nil { + groupID = &apiKey.Group.ID + platform = apiKey.Group.Platform + } + + // Get available models from account configurations (without platform filter) + availableModels := h.gatewayService.GetAvailableModels(c.Request.Context(), groupID, "") + + if len(availableModels) > 0 { + // Build model list from whitelist + models := make([]claude.Model, 0, len(availableModels)) + for _, modelID := range availableModels { + models = append(models, claude.Model{ + ID: modelID, + Type: "model", + DisplayName: modelID, + CreatedAt: "2024-01-01T00:00:00Z", + }) + } + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": models, + }) + return + } + + // Fallback to default models + if platform == "openai" { + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": openai.DefaultModels, + }) + return + } + + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": claude.DefaultModels, + }) +} + +// AntigravityModels 返回 Antigravity 支持的全部模型 +// GET /antigravity/models +func (h *GatewayHandler) AntigravityModels(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "object": "list", + "data": antigravity.DefaultModels(), + }) +} + +// Usage handles getting account balance for CC Switch integration +// GET /v1/usage +func (h *GatewayHandler) Usage(c *gin.Context) { + apiKey, ok := middleware2.GetAPIKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + // 订阅模式:返回订阅限额信息 + if apiKey.Group != nil && apiKey.Group.IsSubscriptionType() { + subscription, ok := middleware2.GetSubscriptionFromContext(c) + if !ok { + h.errorResponse(c, http.StatusForbidden, "subscription_error", "No active subscription") + return + } + + remaining := h.calculateSubscriptionRemaining(apiKey.Group, subscription) + c.JSON(http.StatusOK, gin.H{ + "isValid": true, + "planName": apiKey.Group.Name, + "remaining": remaining, + "unit": "USD", + }) + return + } + + // 余额模式:返回钱包余额 + latestUser, err := h.userService.GetByID(c.Request.Context(), subject.UserID) + if err != nil { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to get user info") + return + } + + c.JSON(http.StatusOK, gin.H{ + "isValid": true, + "planName": "钱包余额", + "remaining": latestUser.Balance, + "unit": "USD", + }) +} + +// calculateSubscriptionRemaining 计算订阅剩余可用额度 +// 逻辑: +// 1. 如果日/周/月任一限额达到100%,返回0 +// 2. 否则返回所有已配置周期中剩余额度的最小值 +func (h *GatewayHandler) calculateSubscriptionRemaining(group *service.Group, sub *service.UserSubscription) float64 { + var remainingValues []float64 + + // 检查日限额 + if group.HasDailyLimit() { + remaining := *group.DailyLimitUSD - sub.DailyUsageUSD + if remaining <= 0 { + return 0 + } + remainingValues = append(remainingValues, remaining) + } + + // 检查周限额 + if group.HasWeeklyLimit() { + remaining := *group.WeeklyLimitUSD - sub.WeeklyUsageUSD + if remaining <= 0 { + return 0 + } + remainingValues = append(remainingValues, remaining) + } + + // 检查月限额 + if group.HasMonthlyLimit() { + remaining := *group.MonthlyLimitUSD - sub.MonthlyUsageUSD + if remaining <= 0 { + return 0 + } + remainingValues = append(remainingValues, remaining) + } + + // 如果没有配置任何限额,返回-1表示无限制 + if len(remainingValues) == 0 { + return -1 + } + + // 返回最小值 + min := remainingValues[0] + for _, v := range remainingValues[1:] { + if v < min { + min = v + } + } + return min +} + +// handleConcurrencyError handles concurrency-related errors with proper 429 response +func (h *GatewayHandler) handleConcurrencyError(c *gin.Context, err error, slotType string, streamStarted bool) { + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", + fmt.Sprintf("Concurrency limit exceeded for %s, please retry later", slotType), streamStarted) +} + +func (h *GatewayHandler) handleFailoverExhausted(c *gin.Context, statusCode int, streamStarted bool) { + status, errType, errMsg := h.mapUpstreamError(statusCode) + h.handleStreamingAwareError(c, status, errType, errMsg, streamStarted) +} + +func (h *GatewayHandler) mapUpstreamError(statusCode int) (int, string, string) { + switch statusCode { + case 401: + return http.StatusBadGateway, "upstream_error", "Upstream authentication failed, please contact administrator" + case 403: + return http.StatusBadGateway, "upstream_error", "Upstream access forbidden, please contact administrator" + case 429: + return http.StatusTooManyRequests, "rate_limit_error", "Upstream rate limit exceeded, please retry later" + case 529: + return http.StatusServiceUnavailable, "overloaded_error", "Upstream service overloaded, please retry later" + case 500, 502, 503, 504: + return http.StatusBadGateway, "upstream_error", "Upstream service temporarily unavailable" + default: + return http.StatusBadGateway, "upstream_error", "Upstream request failed" + } +} + +// handleStreamingAwareError handles errors that may occur after streaming has started +func (h *GatewayHandler) handleStreamingAwareError(c *gin.Context, status int, errType, message string, streamStarted bool) { + if streamStarted { + // Stream already started, send error as SSE event then close + flusher, ok := c.Writer.(http.Flusher) + if ok { + // Send error event in SSE format with proper JSON marshaling + errorData := map[string]any{ + "type": "error", + "error": map[string]string{ + "type": errType, + "message": message, + }, + } + jsonBytes, err := json.Marshal(errorData) + if err != nil { + _ = c.Error(err) + return + } + errorEvent := fmt.Sprintf("data: %s\n\n", string(jsonBytes)) + if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil { + _ = c.Error(err) + } + flusher.Flush() + } + return + } + + // Normal case: return JSON response with proper status code + h.errorResponse(c, status, errType, message) +} + +// errorResponse 返回Claude API格式的错误响应 +func (h *GatewayHandler) errorResponse(c *gin.Context, status int, errType, message string) { + c.JSON(status, gin.H{ + "type": "error", + "error": gin.H{ + "type": errType, + "message": message, + }, + }) +} + +// CountTokens handles token counting endpoint +// POST /v1/messages/count_tokens +// 特点:校验订阅/余额,但不计算并发、不记录使用量 +func (h *GatewayHandler) CountTokens(c *gin.Context) { + // 从context获取apiKey和user(ApiKeyAuth中间件已设置) + apiKey, ok := middleware2.GetAPIKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + _, ok = middleware2.GetAuthSubjectFromContext(c) + if !ok { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found") + return + } + + // 读取请求体 + body, err := io.ReadAll(c.Request.Body) + if err != nil { + if maxErr, ok := extractMaxBytesError(err); ok { + h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) + return + } + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to read request body") + return + } + + if len(body) == 0 { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Request body is empty") + return + } + + setOpsRequestContext(c, "", false, body) + + parsedReq, err := service.ParseGatewayRequest(body) + if err != nil { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") + return + } + + // 验证 model 必填 + if parsedReq.Model == "" { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required") + return + } + + setOpsRequestContext(c, parsedReq.Model, parsedReq.Stream, body) + + // 获取订阅信息(可能为nil) + subscription, _ := middleware2.GetSubscriptionFromContext(c) + + // 校验 billing eligibility(订阅/余额) + // 【注意】不计算并发,但需要校验订阅/余额 + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), apiKey.User, apiKey, apiKey.Group, subscription); err != nil { + status, code, message := billingErrorDetails(err) + h.errorResponse(c, status, code, message) + return + } + + // 计算粘性会话 hash + sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + + // 选择支持该模型的账号 + account, err := h.gatewayService.SelectAccountForModel(c.Request.Context(), apiKey.GroupID, sessionHash, parsedReq.Model) + if err != nil { + h.errorResponse(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error()) + return + } + setOpsSelectedAccount(c, account.ID) + + // 转发请求(不记录使用量) + if err := h.gatewayService.ForwardCountTokens(c.Request.Context(), c, account, parsedReq); err != nil { + log.Printf("Forward count_tokens request failed: %v", err) + // 错误响应已在 ForwardCountTokens 中处理 + return + } +} + +// isWarmupRequest 检测是否为预热请求(标题生成、Warmup等) +func isWarmupRequest(body []byte) bool { + // 快速检查:如果body不包含关键字,直接返回false + bodyStr := string(body) + if !strings.Contains(bodyStr, "title") && !strings.Contains(bodyStr, "Warmup") { + return false + } + + // 解析完整请求 + var req struct { + Messages []struct { + Content []struct { + Type string `json:"type"` + Text string `json:"text"` + } `json:"content"` + } `json:"messages"` + System []struct { + Text string `json:"text"` + } `json:"system"` + } + if err := json.Unmarshal(body, &req); err != nil { + return false + } + + // 检查 messages 中的标题提示模式 + for _, msg := range req.Messages { + for _, content := range msg.Content { + if content.Type == "text" { + if strings.Contains(content.Text, "Please write a 5-10 word title for the following conversation:") || + content.Text == "Warmup" { + return true + } + } + } + } + + // 检查 system 中的标题提取模式 + for _, system := range req.System { + if strings.Contains(system.Text, "nalyze if this message indicates a new conversation topic. If it does, extract a 2-3 word title") { + return true + } + } + + return false +} + +// sendMockWarmupStream 发送流式 mock 响应(用于预热请求拦截) +func sendMockWarmupStream(c *gin.Context, model string) { + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + // Build message_start event with proper JSON marshaling + messageStart := map[string]any{ + "type": "message_start", + "message": map[string]any{ + "id": "msg_mock_warmup", + "type": "message", + "role": "assistant", + "model": model, + "content": []any{}, + "stop_reason": nil, + "stop_sequence": nil, + "usage": map[string]int{ + "input_tokens": 10, + "output_tokens": 0, + }, + }, + } + messageStartJSON, _ := json.Marshal(messageStart) + + events := []string{ + `event: message_start` + "\n" + `data: ` + string(messageStartJSON), + `event: content_block_start` + "\n" + `data: {"content_block":{"text":"","type":"text"},"index":0,"type":"content_block_start"}`, + `event: content_block_delta` + "\n" + `data: {"delta":{"text":"New","type":"text_delta"},"index":0,"type":"content_block_delta"}`, + `event: content_block_delta` + "\n" + `data: {"delta":{"text":" Conversation","type":"text_delta"},"index":0,"type":"content_block_delta"}`, + `event: content_block_stop` + "\n" + `data: {"index":0,"type":"content_block_stop"}`, + `event: message_delta` + "\n" + `data: {"delta":{"stop_reason":"end_turn","stop_sequence":null},"type":"message_delta","usage":{"input_tokens":10,"output_tokens":2}}`, + `event: message_stop` + "\n" + `data: {"type":"message_stop"}`, + } + + for _, event := range events { + _, _ = c.Writer.WriteString(event + "\n\n") + c.Writer.Flush() + time.Sleep(20 * time.Millisecond) + } +} + +// sendMockWarmupResponse 发送非流式 mock 响应(用于预热请求拦截) +func sendMockWarmupResponse(c *gin.Context, model string) { + c.JSON(http.StatusOK, gin.H{ + "id": "msg_mock_warmup", + "type": "message", + "role": "assistant", + "model": model, + "content": []gin.H{{"type": "text", "text": "New Conversation"}}, + "stop_reason": "end_turn", + "usage": gin.H{ + "input_tokens": 10, + "output_tokens": 2, + }, + }) +} + +func billingErrorDetails(err error) (status int, code, message string) { + if errors.Is(err, service.ErrBillingServiceUnavailable) { + msg := pkgerrors.Message(err) + if msg == "" { + msg = "Billing service temporarily unavailable. Please retry later." + } + return http.StatusServiceUnavailable, "billing_service_error", msg + } + msg := pkgerrors.Message(err) + if msg == "" { + msg = err.Error() + } + return http.StatusForbidden, "billing_error", msg +} diff --git a/backend/internal/handler/gateway_helper.go b/backend/internal/handler/gateway_helper.go new file mode 100644 index 00000000..0393f954 --- /dev/null +++ b/backend/internal/handler/gateway_helper.go @@ -0,0 +1,323 @@ +package handler + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// claudeCodeValidator is a singleton validator for Claude Code client detection +var claudeCodeValidator = service.NewClaudeCodeValidator() + +// SetClaudeCodeClientContext 检查请求是否来自 Claude Code 客户端,并设置到 context 中 +// 返回更新后的 context +func SetClaudeCodeClientContext(c *gin.Context, body []byte) { + // 解析请求体为 map + var bodyMap map[string]any + if len(body) > 0 { + _ = json.Unmarshal(body, &bodyMap) + } + + // 验证是否为 Claude Code 客户端 + isClaudeCode := claudeCodeValidator.Validate(c.Request, bodyMap) + + // 更新 request context + ctx := service.SetClaudeCodeClient(c.Request.Context(), isClaudeCode) + c.Request = c.Request.WithContext(ctx) +} + +// 并发槽位等待相关常量 +// +// 性能优化说明: +// 原实现使用固定间隔(100ms)轮询并发槽位,存在以下问题: +// 1. 高并发时频繁轮询增加 Redis 压力 +// 2. 固定间隔可能导致多个请求同时重试(惊群效应) +// +// 新实现使用指数退避 + 抖动算法: +// 1. 初始退避 100ms,每次乘以 1.5,最大 2s +// 2. 添加 ±20% 的随机抖动,分散重试时间点 +// 3. 减少 Redis 压力,避免惊群效应 +const ( + // maxConcurrencyWait 等待并发槽位的最大时间 + maxConcurrencyWait = 30 * time.Second + // defaultPingInterval 流式响应等待时发送 ping 的默认间隔 + defaultPingInterval = 10 * time.Second + // initialBackoff 初始退避时间 + initialBackoff = 100 * time.Millisecond + // backoffMultiplier 退避时间乘数(指数退避) + backoffMultiplier = 1.5 + // maxBackoff 最大退避时间 + maxBackoff = 2 * time.Second +) + +// SSEPingFormat defines the format of SSE ping events for different platforms +type SSEPingFormat string + +const ( + // SSEPingFormatClaude is the Claude/Anthropic SSE ping format + SSEPingFormatClaude SSEPingFormat = "data: {\"type\": \"ping\"}\n\n" + // SSEPingFormatNone indicates no ping should be sent (e.g., OpenAI has no ping spec) + SSEPingFormatNone SSEPingFormat = "" + // SSEPingFormatComment is an SSE comment ping for OpenAI/Codex CLI clients + SSEPingFormatComment SSEPingFormat = ":\n\n" +) + +// ConcurrencyError represents a concurrency limit error with context +type ConcurrencyError struct { + SlotType string + IsTimeout bool +} + +func (e *ConcurrencyError) Error() string { + if e.IsTimeout { + return fmt.Sprintf("timeout waiting for %s concurrency slot", e.SlotType) + } + return fmt.Sprintf("%s concurrency limit reached", e.SlotType) +} + +// ConcurrencyHelper provides common concurrency slot management for gateway handlers +type ConcurrencyHelper struct { + concurrencyService *service.ConcurrencyService + pingFormat SSEPingFormat + pingInterval time.Duration +} + +// NewConcurrencyHelper creates a new ConcurrencyHelper +func NewConcurrencyHelper(concurrencyService *service.ConcurrencyService, pingFormat SSEPingFormat, pingInterval time.Duration) *ConcurrencyHelper { + if pingInterval <= 0 { + pingInterval = defaultPingInterval + } + return &ConcurrencyHelper{ + concurrencyService: concurrencyService, + pingFormat: pingFormat, + pingInterval: pingInterval, + } +} + +// wrapReleaseOnDone ensures release runs at most once and still triggers on context cancellation. +// 用于避免客户端断开或上游超时导致的并发槽位泄漏。 +// 修复:添加 quit channel 确保 goroutine 及时退出,避免泄露 +func wrapReleaseOnDone(ctx context.Context, releaseFunc func()) func() { + if releaseFunc == nil { + return nil + } + var once sync.Once + quit := make(chan struct{}) + + release := func() { + once.Do(func() { + releaseFunc() + close(quit) // 通知监听 goroutine 退出 + }) + } + + go func() { + select { + case <-ctx.Done(): + // Context 取消时释放资源 + release() + case <-quit: + // 正常释放已完成,goroutine 退出 + return + } + }() + + return release +} + +// IncrementWaitCount increments the wait count for a user +func (h *ConcurrencyHelper) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + return h.concurrencyService.IncrementWaitCount(ctx, userID, maxWait) +} + +// DecrementWaitCount decrements the wait count for a user +func (h *ConcurrencyHelper) DecrementWaitCount(ctx context.Context, userID int64) { + h.concurrencyService.DecrementWaitCount(ctx, userID) +} + +// IncrementAccountWaitCount increments the wait count for an account +func (h *ConcurrencyHelper) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + return h.concurrencyService.IncrementAccountWaitCount(ctx, accountID, maxWait) +} + +// DecrementAccountWaitCount decrements the wait count for an account +func (h *ConcurrencyHelper) DecrementAccountWaitCount(ctx context.Context, accountID int64) { + h.concurrencyService.DecrementAccountWaitCount(ctx, accountID) +} + +// AcquireUserSlotWithWait acquires a user concurrency slot, waiting if necessary. +// For streaming requests, sends ping events during the wait. +// streamStarted is updated if streaming response has begun. +func (h *ConcurrencyHelper) AcquireUserSlotWithWait(c *gin.Context, userID int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { + ctx := c.Request.Context() + + // Try to acquire immediately + result, err := h.concurrencyService.AcquireUserSlot(ctx, userID, maxConcurrency) + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + + // Need to wait - handle streaming ping if needed + return h.waitForSlotWithPing(c, "user", userID, maxConcurrency, isStream, streamStarted) +} + +// AcquireAccountSlotWithWait acquires an account concurrency slot, waiting if necessary. +// For streaming requests, sends ping events during the wait. +// streamStarted is updated if streaming response has begun. +func (h *ConcurrencyHelper) AcquireAccountSlotWithWait(c *gin.Context, accountID int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { + ctx := c.Request.Context() + + // Try to acquire immediately + result, err := h.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + + // Need to wait - handle streaming ping if needed + return h.waitForSlotWithPing(c, "account", accountID, maxConcurrency, isStream, streamStarted) +} + +// waitForSlotWithPing waits for a concurrency slot, sending ping events for streaming requests. +// streamStarted pointer is updated when streaming begins (for proper error handling by caller). +func (h *ConcurrencyHelper) waitForSlotWithPing(c *gin.Context, slotType string, id int64, maxConcurrency int, isStream bool, streamStarted *bool) (func(), error) { + return h.waitForSlotWithPingTimeout(c, slotType, id, maxConcurrency, maxConcurrencyWait, isStream, streamStarted) +} + +// waitForSlotWithPingTimeout waits for a concurrency slot with a custom timeout. +func (h *ConcurrencyHelper) waitForSlotWithPingTimeout(c *gin.Context, slotType string, id int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { + ctx, cancel := context.WithTimeout(c.Request.Context(), timeout) + defer cancel() + + // Try immediate acquire first (avoid unnecessary wait) + var result *service.AcquireResult + var err error + if slotType == "user" { + result, err = h.concurrencyService.AcquireUserSlot(ctx, id, maxConcurrency) + } else { + result, err = h.concurrencyService.AcquireAccountSlot(ctx, id, maxConcurrency) + } + if err != nil { + return nil, err + } + if result.Acquired { + return result.ReleaseFunc, nil + } + + // Determine if ping is needed (streaming + ping format defined) + needPing := isStream && h.pingFormat != "" + + var flusher http.Flusher + if needPing { + var ok bool + flusher, ok = c.Writer.(http.Flusher) + if !ok { + return nil, fmt.Errorf("streaming not supported") + } + } + + // Only create ping ticker if ping is needed + var pingCh <-chan time.Time + if needPing { + pingTicker := time.NewTicker(h.pingInterval) + defer pingTicker.Stop() + pingCh = pingTicker.C + } + + backoff := initialBackoff + timer := time.NewTimer(backoff) + defer timer.Stop() + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + for { + select { + case <-ctx.Done(): + return nil, &ConcurrencyError{ + SlotType: slotType, + IsTimeout: true, + } + + case <-pingCh: + // Send ping to keep connection alive + if !*streamStarted { + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + *streamStarted = true + } + if _, err := fmt.Fprint(c.Writer, string(h.pingFormat)); err != nil { + return nil, err + } + flusher.Flush() + + case <-timer.C: + // Try to acquire slot + var result *service.AcquireResult + var err error + + if slotType == "user" { + result, err = h.concurrencyService.AcquireUserSlot(ctx, id, maxConcurrency) + } else { + result, err = h.concurrencyService.AcquireAccountSlot(ctx, id, maxConcurrency) + } + + if err != nil { + return nil, err + } + + if result.Acquired { + return result.ReleaseFunc, nil + } + backoff = nextBackoff(backoff, rng) + timer.Reset(backoff) + } + } +} + +// AcquireAccountSlotWithWaitTimeout acquires an account slot with a custom timeout (keeps SSE ping). +func (h *ConcurrencyHelper) AcquireAccountSlotWithWaitTimeout(c *gin.Context, accountID int64, maxConcurrency int, timeout time.Duration, isStream bool, streamStarted *bool) (func(), error) { + return h.waitForSlotWithPingTimeout(c, "account", accountID, maxConcurrency, timeout, isStream, streamStarted) +} + +// nextBackoff 计算下一次退避时间 +// 性能优化:使用指数退避 + 随机抖动,避免惊群效应 +// current: 当前退避时间 +// rng: 随机数生成器(可为 nil,此时不添加抖动) +// 返回值:下一次退避时间(100ms ~ 2s 之间) +func nextBackoff(current time.Duration, rng *rand.Rand) time.Duration { + // 指数退避:当前时间 * 1.5 + next := time.Duration(float64(current) * backoffMultiplier) + if next > maxBackoff { + next = maxBackoff + } + if rng == nil { + return next + } + // 添加 ±20% 的随机抖动(jitter 范围 0.8 ~ 1.2) + // 抖动可以分散多个请求的重试时间点,避免同时冲击 Redis + jitter := 0.8 + rng.Float64()*0.4 + jittered := time.Duration(float64(next) * jitter) + if jittered < initialBackoff { + return initialBackoff + } + if jittered > maxBackoff { + return maxBackoff + } + return jittered +} diff --git a/backend/internal/handler/gateway_helper_test.go b/backend/internal/handler/gateway_helper_test.go new file mode 100644 index 00000000..664258f8 --- /dev/null +++ b/backend/internal/handler/gateway_helper_test.go @@ -0,0 +1,141 @@ +package handler + +import ( + "context" + "runtime" + "sync/atomic" + "testing" + "time" +) + +// TestWrapReleaseOnDone_NoGoroutineLeak 验证 wrapReleaseOnDone 修复后不会泄露 goroutine +func TestWrapReleaseOnDone_NoGoroutineLeak(t *testing.T) { + // 记录测试开始时的 goroutine 数量 + runtime.GC() + time.Sleep(100 * time.Millisecond) + initialGoroutines := runtime.NumGoroutine() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var releaseCount int32 + release := wrapReleaseOnDone(ctx, func() { + atomic.AddInt32(&releaseCount, 1) + }) + + // 正常释放 + release() + + // 等待足够时间确保 goroutine 退出 + time.Sleep(200 * time.Millisecond) + + // 验证只释放一次 + if count := atomic.LoadInt32(&releaseCount); count != 1 { + t.Errorf("expected release count to be 1, got %d", count) + } + + // 强制 GC,清理已退出的 goroutine + runtime.GC() + time.Sleep(100 * time.Millisecond) + + // 验证 goroutine 数量没有增加(允许±2的误差,考虑到测试框架本身可能创建的 goroutine) + finalGoroutines := runtime.NumGoroutine() + if finalGoroutines > initialGoroutines+2 { + t.Errorf("goroutine leak detected: initial=%d, final=%d, leaked=%d", + initialGoroutines, finalGoroutines, finalGoroutines-initialGoroutines) + } +} + +// TestWrapReleaseOnDone_ContextCancellation 验证 context 取消时也能正确释放 +func TestWrapReleaseOnDone_ContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + var releaseCount int32 + _ = wrapReleaseOnDone(ctx, func() { + atomic.AddInt32(&releaseCount, 1) + }) + + // 取消 context,应该触发释放 + cancel() + + // 等待释放完成 + time.Sleep(100 * time.Millisecond) + + // 验证释放被调用 + if count := atomic.LoadInt32(&releaseCount); count != 1 { + t.Errorf("expected release count to be 1, got %d", count) + } +} + +// TestWrapReleaseOnDone_MultipleCallsOnlyReleaseOnce 验证多次调用 release 只释放一次 +func TestWrapReleaseOnDone_MultipleCallsOnlyReleaseOnce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var releaseCount int32 + release := wrapReleaseOnDone(ctx, func() { + atomic.AddInt32(&releaseCount, 1) + }) + + // 调用多次 + release() + release() + release() + + // 等待执行完成 + time.Sleep(100 * time.Millisecond) + + // 验证只释放一次 + if count := atomic.LoadInt32(&releaseCount); count != 1 { + t.Errorf("expected release count to be 1, got %d", count) + } +} + +// TestWrapReleaseOnDone_NilReleaseFunc 验证 nil releaseFunc 不会 panic +func TestWrapReleaseOnDone_NilReleaseFunc(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + release := wrapReleaseOnDone(ctx, nil) + + if release != nil { + t.Error("expected nil release function when releaseFunc is nil") + } +} + +// TestWrapReleaseOnDone_ConcurrentCalls 验证并发调用的安全性 +func TestWrapReleaseOnDone_ConcurrentCalls(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var releaseCount int32 + release := wrapReleaseOnDone(ctx, func() { + atomic.AddInt32(&releaseCount, 1) + }) + + // 并发调用 release + const numGoroutines = 10 + for i := 0; i < numGoroutines; i++ { + go release() + } + + // 等待所有 goroutine 完成 + time.Sleep(200 * time.Millisecond) + + // 验证只释放一次 + if count := atomic.LoadInt32(&releaseCount); count != 1 { + t.Errorf("expected release count to be 1, got %d", count) + } +} + +// BenchmarkWrapReleaseOnDone 性能基准测试 +func BenchmarkWrapReleaseOnDone(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + release := wrapReleaseOnDone(ctx, func() {}) + release() + } +} diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go new file mode 100644 index 00000000..2dddb856 --- /dev/null +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -0,0 +1,435 @@ +package handler + +import ( + "context" + "errors" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/Wei-Shaw/sub2api/internal/pkg/gemini" + "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// GeminiV1BetaListModels proxies: +// GET /v1beta/models +func (h *GatewayHandler) GeminiV1BetaListModels(c *gin.Context) { + apiKey, ok := middleware.GetAPIKeyFromContext(c) + if !ok || apiKey == nil { + googleError(c, http.StatusUnauthorized, "Invalid API key") + return + } + // 检查平台:优先使用强制平台(/antigravity 路由),否则要求 gemini 分组 + forcePlatform, hasForcePlatform := middleware.GetForcePlatformFromContext(c) + if !hasForcePlatform && (apiKey.Group == nil || apiKey.Group.Platform != service.PlatformGemini) { + googleError(c, http.StatusBadRequest, "API key group platform is not gemini") + return + } + + // 强制 antigravity 模式:返回 antigravity 支持的模型列表 + if forcePlatform == service.PlatformAntigravity { + c.JSON(http.StatusOK, antigravity.FallbackGeminiModelsList()) + return + } + + account, err := h.geminiCompatService.SelectAccountForAIStudioEndpoints(c.Request.Context(), apiKey.GroupID) + if err != nil { + // 没有 gemini 账户,检查是否有 antigravity 账户可用 + hasAntigravity, _ := h.geminiCompatService.HasAntigravityAccounts(c.Request.Context(), apiKey.GroupID) + if hasAntigravity { + // antigravity 账户使用静态模型列表 + c.JSON(http.StatusOK, gemini.FallbackModelsList()) + return + } + googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) + return + } + + res, err := h.geminiCompatService.ForwardAIStudioGET(c.Request.Context(), account, "/v1beta/models") + if err != nil { + googleError(c, http.StatusBadGateway, err.Error()) + return + } + if shouldFallbackGeminiModels(res) { + c.JSON(http.StatusOK, gemini.FallbackModelsList()) + return + } + writeUpstreamResponse(c, res) +} + +// GeminiV1BetaGetModel proxies: +// GET /v1beta/models/{model} +func (h *GatewayHandler) GeminiV1BetaGetModel(c *gin.Context) { + apiKey, ok := middleware.GetAPIKeyFromContext(c) + if !ok || apiKey == nil { + googleError(c, http.StatusUnauthorized, "Invalid API key") + return + } + // 检查平台:优先使用强制平台(/antigravity 路由),否则要求 gemini 分组 + forcePlatform, hasForcePlatform := middleware.GetForcePlatformFromContext(c) + if !hasForcePlatform && (apiKey.Group == nil || apiKey.Group.Platform != service.PlatformGemini) { + googleError(c, http.StatusBadRequest, "API key group platform is not gemini") + return + } + + modelName := strings.TrimSpace(c.Param("model")) + if modelName == "" { + googleError(c, http.StatusBadRequest, "Missing model in URL") + return + } + + // 强制 antigravity 模式:返回 antigravity 模型信息 + if forcePlatform == service.PlatformAntigravity { + c.JSON(http.StatusOK, antigravity.FallbackGeminiModel(modelName)) + return + } + + account, err := h.geminiCompatService.SelectAccountForAIStudioEndpoints(c.Request.Context(), apiKey.GroupID) + if err != nil { + // 没有 gemini 账户,检查是否有 antigravity 账户可用 + hasAntigravity, _ := h.geminiCompatService.HasAntigravityAccounts(c.Request.Context(), apiKey.GroupID) + if hasAntigravity { + // antigravity 账户使用静态模型信息 + c.JSON(http.StatusOK, gemini.FallbackModel(modelName)) + return + } + googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) + return + } + + res, err := h.geminiCompatService.ForwardAIStudioGET(c.Request.Context(), account, "/v1beta/models/"+modelName) + if err != nil { + googleError(c, http.StatusBadGateway, err.Error()) + return + } + if shouldFallbackGeminiModels(res) { + c.JSON(http.StatusOK, gemini.FallbackModel(modelName)) + return + } + writeUpstreamResponse(c, res) +} + +// GeminiV1BetaModels proxies Gemini native REST endpoints like: +// POST /v1beta/models/{model}:generateContent +// POST /v1beta/models/{model}:streamGenerateContent?alt=sse +func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { + apiKey, ok := middleware.GetAPIKeyFromContext(c) + if !ok || apiKey == nil { + googleError(c, http.StatusUnauthorized, "Invalid API key") + return + } + authSubject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok { + googleError(c, http.StatusInternalServerError, "User context not found") + return + } + + // 检查平台:优先使用强制平台(/antigravity 路由,中间件已设置 request.Context),否则要求 gemini 分组 + if !middleware.HasForcePlatform(c) { + if apiKey.Group == nil || apiKey.Group.Platform != service.PlatformGemini { + googleError(c, http.StatusBadRequest, "API key group platform is not gemini") + return + } + } + + modelName, action, err := parseGeminiModelAction(strings.TrimPrefix(c.Param("modelAction"), "/")) + if err != nil { + googleError(c, http.StatusNotFound, err.Error()) + return + } + + stream := action == "streamGenerateContent" + + body, err := io.ReadAll(c.Request.Body) + if err != nil { + if maxErr, ok := extractMaxBytesError(err); ok { + googleError(c, http.StatusRequestEntityTooLarge, buildBodyTooLargeMessage(maxErr.Limit)) + return + } + googleError(c, http.StatusBadRequest, "Failed to read request body") + return + } + if len(body) == 0 { + googleError(c, http.StatusBadRequest, "Request body is empty") + return + } + + setOpsRequestContext(c, modelName, stream, body) + + // Get subscription (may be nil) + subscription, _ := middleware.GetSubscriptionFromContext(c) + + // For Gemini native API, do not send Claude-style ping frames. + geminiConcurrency := NewConcurrencyHelper(h.concurrencyHelper.concurrencyService, SSEPingFormatNone, 0) + + // 0) wait queue check + maxWait := service.CalculateMaxWait(authSubject.Concurrency) + canWait, err := geminiConcurrency.IncrementWaitCount(c.Request.Context(), authSubject.UserID, maxWait) + waitCounted := false + if err != nil { + log.Printf("Increment wait count failed: %v", err) + } else if !canWait { + googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") + return + } + if err == nil && canWait { + waitCounted = true + } + defer func() { + if waitCounted { + geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID) + } + }() + + // 1) user concurrency slot + streamStarted := false + userReleaseFunc, err := geminiConcurrency.AcquireUserSlotWithWait(c, authSubject.UserID, authSubject.Concurrency, stream, &streamStarted) + if err != nil { + googleError(c, http.StatusTooManyRequests, err.Error()) + return + } + if waitCounted { + geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID) + waitCounted = false + } + // 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏 + userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) + if userReleaseFunc != nil { + defer userReleaseFunc() + } + + // 2) billing eligibility check (after wait) + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), apiKey.User, apiKey, apiKey.Group, subscription); err != nil { + status, _, message := billingErrorDetails(err) + googleError(c, status, message) + return + } + + // 3) select account (sticky session based on request body) + parsedReq, _ := service.ParseGatewayRequest(body) + sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) + sessionKey := sessionHash + if sessionHash != "" { + sessionKey = "gemini:" + sessionHash + } + const maxAccountSwitches = 3 + switchCount := 0 + failedAccountIDs := make(map[int64]struct{}) + lastFailoverStatus := 0 + + for { + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs) + if err != nil { + if len(failedAccountIDs) == 0 { + googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) + return + } + handleGeminiFailoverExhausted(c, lastFailoverStatus) + return + } + account := selection.Account + setOpsSelectedAccount(c, account.ID) + + // 4) account concurrency slot + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts") + return + } + accountWaitCounted := false + canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") + return + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { + geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + }() + + accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + stream, + &streamStarted, + ) + if err != nil { + googleError(c, http.StatusTooManyRequests, err.Error()) + return + } + if accountWaitCounted { + geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } + } + // 账号槽位/等待计数需要在超时或断开时安全回收 + accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) + + // 5) forward (根据平台分流) + var result *service.ForwardResult + if account.Platform == service.PlatformAntigravity { + result, err = h.antigravityGatewayService.ForwardGemini(c.Request.Context(), c, account, modelName, action, stream, body) + } else { + result, err = h.geminiCompatService.ForwardNative(c.Request.Context(), c, account, modelName, action, stream, body) + } + if accountReleaseFunc != nil { + accountReleaseFunc() + } + if err != nil { + var failoverErr *service.UpstreamFailoverError + if errors.As(err, &failoverErr) { + failedAccountIDs[account.ID] = struct{}{} + if switchCount >= maxAccountSwitches { + lastFailoverStatus = failoverErr.StatusCode + handleGeminiFailoverExhausted(c, lastFailoverStatus) + return + } + lastFailoverStatus = failoverErr.StatusCode + switchCount++ + log.Printf("Gemini account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + continue + } + // ForwardNative already wrote the response + log.Printf("Gemini native forward failed: %v", err) + return + } + + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + + // 6) record usage async + go func(result *service.ForwardResult, usedAccount *service.Account, ua, ip string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: usedAccount, + Subscription: subscription, + UserAgent: ua, + IPAddress: ip, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }(result, account, userAgent, clientIP) + return + } +} + +func parseGeminiModelAction(rest string) (model string, action string, err error) { + rest = strings.TrimSpace(rest) + if rest == "" { + return "", "", &pathParseError{"missing path"} + } + + // Standard: {model}:{action} + if i := strings.Index(rest, ":"); i > 0 && i < len(rest)-1 { + return rest[:i], rest[i+1:], nil + } + + // Fallback: {model}/{action} + if i := strings.Index(rest, "/"); i > 0 && i < len(rest)-1 { + return rest[:i], rest[i+1:], nil + } + + return "", "", &pathParseError{"invalid model action path"} +} + +func handleGeminiFailoverExhausted(c *gin.Context, statusCode int) { + status, message := mapGeminiUpstreamError(statusCode) + googleError(c, status, message) +} + +func mapGeminiUpstreamError(statusCode int) (int, string) { + switch statusCode { + case 401: + return http.StatusBadGateway, "Upstream authentication failed, please contact administrator" + case 403: + return http.StatusBadGateway, "Upstream access forbidden, please contact administrator" + case 429: + return http.StatusTooManyRequests, "Upstream rate limit exceeded, please retry later" + case 529: + return http.StatusServiceUnavailable, "Upstream service overloaded, please retry later" + case 500, 502, 503, 504: + return http.StatusBadGateway, "Upstream service temporarily unavailable" + default: + return http.StatusBadGateway, "Upstream request failed" + } +} + +type pathParseError struct{ msg string } + +func (e *pathParseError) Error() string { return e.msg } + +func googleError(c *gin.Context, status int, message string) { + c.JSON(status, gin.H{ + "error": gin.H{ + "code": status, + "message": message, + "status": googleapi.HTTPStatusToGoogleStatus(status), + }, + }) +} + +func writeUpstreamResponse(c *gin.Context, res *service.UpstreamHTTPResult) { + if res == nil { + googleError(c, http.StatusBadGateway, "Empty upstream response") + return + } + for k, vv := range res.Headers { + // Avoid overriding content-length and hop-by-hop headers. + if strings.EqualFold(k, "Content-Length") || strings.EqualFold(k, "Transfer-Encoding") || strings.EqualFold(k, "Connection") { + continue + } + for _, v := range vv { + c.Writer.Header().Add(k, v) + } + } + contentType := res.Headers.Get("Content-Type") + if contentType == "" { + contentType = "application/json" + } + c.Data(res.StatusCode, contentType, res.Body) +} + +func shouldFallbackGeminiModels(res *service.UpstreamHTTPResult) bool { + if res == nil { + return true + } + if res.StatusCode != http.StatusUnauthorized && res.StatusCode != http.StatusForbidden { + return false + } + if strings.Contains(strings.ToLower(res.Headers.Get("Www-Authenticate")), "insufficient_scope") { + return true + } + if strings.Contains(strings.ToLower(string(res.Body)), "insufficient authentication scopes") { + return true + } + if strings.Contains(strings.ToLower(string(res.Body)), "access_token_scope_insufficient") { + return true + } + return false +} diff --git a/backend/internal/handler/gemini_v1beta_handler_test.go b/backend/internal/handler/gemini_v1beta_handler_test.go new file mode 100644 index 00000000..82b30ee4 --- /dev/null +++ b/backend/internal/handler/gemini_v1beta_handler_test.go @@ -0,0 +1,143 @@ +//go:build unit + +package handler + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +// TestGeminiV1BetaHandler_PlatformRoutingInvariant 文档化并验证 Handler 层的平台路由逻辑不变量 +// 该测试确保 gemini 和 antigravity 平台的路由逻辑符合预期 +func TestGeminiV1BetaHandler_PlatformRoutingInvariant(t *testing.T) { + tests := []struct { + name string + platform string + expectedService string + description string + }{ + { + name: "Gemini平台使用ForwardNative", + platform: service.PlatformGemini, + expectedService: "GeminiMessagesCompatService.ForwardNative", + description: "Gemini OAuth 账户直接调用 Google API", + }, + { + name: "Antigravity平台使用ForwardGemini", + platform: service.PlatformAntigravity, + expectedService: "AntigravityGatewayService.ForwardGemini", + description: "Antigravity 账户通过 CRS 中转,支持 Gemini 协议", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 模拟 GeminiV1BetaModels 中的路由决策 (lines 199-205 in gemini_v1beta_handler.go) + var routedService string + if tt.platform == service.PlatformAntigravity { + routedService = "AntigravityGatewayService.ForwardGemini" + } else { + routedService = "GeminiMessagesCompatService.ForwardNative" + } + + require.Equal(t, tt.expectedService, routedService, + "平台 %s 应该路由到 %s: %s", + tt.platform, tt.expectedService, tt.description) + }) + } +} + +// TestGeminiV1BetaHandler_ListModelsAntigravityFallback 验证 ListModels 的 antigravity 降级逻辑 +// 当没有 gemini 账户但有 antigravity 账户时,应返回静态模型列表 +func TestGeminiV1BetaHandler_ListModelsAntigravityFallback(t *testing.T) { + tests := []struct { + name string + hasGeminiAccount bool + hasAntigravity bool + expectedBehavior string + }{ + { + name: "有Gemini账户-调用ForwardAIStudioGET", + hasGeminiAccount: true, + hasAntigravity: false, + expectedBehavior: "forward_to_upstream", + }, + { + name: "无Gemini有Antigravity-返回静态列表", + hasGeminiAccount: false, + hasAntigravity: true, + expectedBehavior: "static_fallback", + }, + { + name: "无任何账户-返回503", + hasGeminiAccount: false, + hasAntigravity: false, + expectedBehavior: "service_unavailable", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 模拟 GeminiV1BetaListModels 的逻辑 (lines 33-44 in gemini_v1beta_handler.go) + var behavior string + + if tt.hasGeminiAccount { + behavior = "forward_to_upstream" + } else if tt.hasAntigravity { + behavior = "static_fallback" + } else { + behavior = "service_unavailable" + } + + require.Equal(t, tt.expectedBehavior, behavior) + }) + } +} + +// TestGeminiV1BetaHandler_GetModelAntigravityFallback 验证 GetModel 的 antigravity 降级逻辑 +func TestGeminiV1BetaHandler_GetModelAntigravityFallback(t *testing.T) { + tests := []struct { + name string + hasGeminiAccount bool + hasAntigravity bool + expectedBehavior string + }{ + { + name: "有Gemini账户-调用ForwardAIStudioGET", + hasGeminiAccount: true, + hasAntigravity: false, + expectedBehavior: "forward_to_upstream", + }, + { + name: "无Gemini有Antigravity-返回静态模型信息", + hasGeminiAccount: false, + hasAntigravity: true, + expectedBehavior: "static_model_info", + }, + { + name: "无任何账户-返回503", + hasGeminiAccount: false, + hasAntigravity: false, + expectedBehavior: "service_unavailable", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // 模拟 GeminiV1BetaGetModel 的逻辑 (lines 77-87 in gemini_v1beta_handler.go) + var behavior string + + if tt.hasGeminiAccount { + behavior = "forward_to_upstream" + } else if tt.hasAntigravity { + behavior = "static_model_info" + } else { + behavior = "service_unavailable" + } + + require.Equal(t, tt.expectedBehavior, behavior) + }) + } +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go new file mode 100644 index 00000000..5b1b317d --- /dev/null +++ b/backend/internal/handler/handler.go @@ -0,0 +1,46 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/admin" +) + +// AdminHandlers contains all admin-related HTTP handlers +type AdminHandlers struct { + Dashboard *admin.DashboardHandler + User *admin.UserHandler + Group *admin.GroupHandler + Account *admin.AccountHandler + OAuth *admin.OAuthHandler + OpenAIOAuth *admin.OpenAIOAuthHandler + GeminiOAuth *admin.GeminiOAuthHandler + AntigravityOAuth *admin.AntigravityOAuthHandler + Proxy *admin.ProxyHandler + Redeem *admin.RedeemHandler + Promo *admin.PromoHandler + Setting *admin.SettingHandler + Ops *admin.OpsHandler + System *admin.SystemHandler + Subscription *admin.SubscriptionHandler + Usage *admin.UsageHandler + UserAttribute *admin.UserAttributeHandler +} + +// Handlers contains all HTTP handlers +type Handlers struct { + Auth *AuthHandler + User *UserHandler + APIKey *APIKeyHandler + Usage *UsageHandler + Redeem *RedeemHandler + Subscription *SubscriptionHandler + Admin *AdminHandlers + Gateway *GatewayHandler + OpenAIGateway *OpenAIGatewayHandler + Setting *SettingHandler +} + +// BuildInfo contains build-time information +type BuildInfo struct { + Version string + BuildType string // "source" for manual builds, "release" for CI builds +} diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go new file mode 100644 index 00000000..c4cfabc3 --- /dev/null +++ b/backend/internal/handler/openai_gateway_handler.go @@ -0,0 +1,367 @@ +package handler + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// OpenAIGatewayHandler handles OpenAI API gateway requests +type OpenAIGatewayHandler struct { + gatewayService *service.OpenAIGatewayService + billingCacheService *service.BillingCacheService + concurrencyHelper *ConcurrencyHelper +} + +// NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler +func NewOpenAIGatewayHandler( + gatewayService *service.OpenAIGatewayService, + concurrencyService *service.ConcurrencyService, + billingCacheService *service.BillingCacheService, + cfg *config.Config, +) *OpenAIGatewayHandler { + pingInterval := time.Duration(0) + if cfg != nil { + pingInterval = time.Duration(cfg.Concurrency.PingInterval) * time.Second + } + return &OpenAIGatewayHandler{ + gatewayService: gatewayService, + billingCacheService: billingCacheService, + concurrencyHelper: NewConcurrencyHelper(concurrencyService, SSEPingFormatComment, pingInterval), + } +} + +// Responses handles OpenAI Responses API endpoint +// POST /openai/v1/responses +func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { + // Get apiKey and user from context (set by ApiKeyAuth middleware) + apiKey, ok := middleware2.GetAPIKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found") + return + } + + // Read request body + body, err := io.ReadAll(c.Request.Body) + if err != nil { + if maxErr, ok := extractMaxBytesError(err); ok { + h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) + return + } + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to read request body") + return + } + + if len(body) == 0 { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Request body is empty") + return + } + + setOpsRequestContext(c, "", false, body) + + // Parse request body to map for potential modification + var reqBody map[string]any + if err := json.Unmarshal(body, &reqBody); err != nil { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") + return + } + + // Extract model and stream + reqModel, _ := reqBody["model"].(string) + reqStream, _ := reqBody["stream"].(bool) + + // 验证 model 必填 + if reqModel == "" { + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required") + return + } + + userAgent := c.GetHeader("User-Agent") + if !openai.IsCodexCLIRequest(userAgent) { + existingInstructions, _ := reqBody["instructions"].(string) + if strings.TrimSpace(existingInstructions) == "" { + if instructions := strings.TrimSpace(service.GetOpenCodeInstructions()); instructions != "" { + reqBody["instructions"] = instructions + // Re-serialize body + body, err = json.Marshal(reqBody) + if err != nil { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request") + return + } + } + } + } + + setOpsRequestContext(c, reqModel, reqStream, body) + + // 提前校验 function_call_output 是否具备可关联上下文,避免上游 400。 + // 要求 previous_response_id,或 input 内存在带 call_id 的 tool_call/function_call, + // 或带 id 且与 call_id 匹配的 item_reference。 + if service.HasFunctionCallOutput(reqBody) { + previousResponseID, _ := reqBody["previous_response_id"].(string) + if strings.TrimSpace(previousResponseID) == "" && !service.HasToolCallContext(reqBody) { + if service.HasFunctionCallOutputMissingCallID(reqBody) { + log.Printf("[OpenAI Handler] function_call_output 缺少 call_id: model=%s", reqModel) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id") + return + } + callIDs := service.FunctionCallOutputCallIDs(reqBody) + if !service.HasItemReferenceForCallIDs(reqBody, callIDs) { + log.Printf("[OpenAI Handler] function_call_output 缺少匹配的 item_reference: model=%s", reqModel) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id") + return + } + } + } + + // Track if we've started streaming (for error handling) + streamStarted := false + + // Get subscription info (may be nil) + subscription, _ := middleware2.GetSubscriptionFromContext(c) + + // 0. Check if wait queue is full + maxWait := service.CalculateMaxWait(subject.Concurrency) + canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait) + waitCounted := false + if err != nil { + log.Printf("Increment wait count failed: %v", err) + // On error, allow request to proceed + } else if !canWait { + h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") + return + } + if err == nil && canWait { + waitCounted = true + } + defer func() { + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + } + }() + + // 1. First acquire user concurrency slot + userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted) + if err != nil { + log.Printf("User concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "user", streamStarted) + return + } + // User slot acquired: no longer waiting. + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + waitCounted = false + } + // 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏 + userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) + if userReleaseFunc != nil { + defer userReleaseFunc() + } + + // 2. Re-check billing eligibility after wait + if err := h.billingCacheService.CheckBillingEligibility(c.Request.Context(), apiKey.User, apiKey, apiKey.Group, subscription); err != nil { + log.Printf("Billing eligibility check failed after wait: %v", err) + status, code, message := billingErrorDetails(err) + h.handleStreamingAwareError(c, status, code, message, streamStarted) + return + } + + // Generate session hash (from header for OpenAI) + sessionHash := h.gatewayService.GenerateSessionHash(c) + + const maxAccountSwitches = 3 + switchCount := 0 + failedAccountIDs := make(map[int64]struct{}) + lastFailoverStatus := 0 + + for { + // Select account supporting the requested model + log.Printf("[OpenAI Handler] Selecting account: groupID=%v model=%s", apiKey.GroupID, reqModel) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + if err != nil { + log.Printf("[OpenAI Handler] SelectAccount failed: %v", err) + if len(failedAccountIDs) == 0 { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) + return + } + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + account := selection.Account + log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name) + setOpsSelectedAccount(c, account.ID) + + // 3. Acquire account concurrency slot + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + accountWaitCounted := false + canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) + if err != nil { + log.Printf("Increment account wait count failed: %v", err) + } else if !canWait { + log.Printf("Account wait queue full: account=%d", account.ID) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) + return + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + } + }() + + accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + &streamStarted, + ) + if err != nil { + log.Printf("Account concurrency acquire failed: %v", err) + h.handleConcurrencyError(c, err, "account", streamStarted) + return + } + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } + if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil { + log.Printf("Bind sticky session failed: %v", err) + } + } + // 账号槽位/等待计数需要在超时或断开时安全回收 + accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) + + // Forward request + result, err := h.gatewayService.Forward(c.Request.Context(), c, account, body) + if accountReleaseFunc != nil { + accountReleaseFunc() + } + if err != nil { + var failoverErr *service.UpstreamFailoverError + if errors.As(err, &failoverErr) { + failedAccountIDs[account.ID] = struct{}{} + if switchCount >= maxAccountSwitches { + lastFailoverStatus = failoverErr.StatusCode + h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) + return + } + lastFailoverStatus = failoverErr.StatusCode + switchCount++ + log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) + continue + } + // Error response already handled in Forward, just log + log.Printf("Account %d: Forward request failed: %v", account.ID, err) + return + } + + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + + // Async record usage + go func(result *service.OpenAIForwardResult, usedAccount *service.Account, ua, ip string) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := h.gatewayService.RecordUsage(ctx, &service.OpenAIRecordUsageInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: usedAccount, + Subscription: subscription, + UserAgent: ua, + IPAddress: ip, + }); err != nil { + log.Printf("Record usage failed: %v", err) + } + }(result, account, userAgent, clientIP) + return + } +} + +// handleConcurrencyError handles concurrency-related errors with proper 429 response +func (h *OpenAIGatewayHandler) handleConcurrencyError(c *gin.Context, err error, slotType string, streamStarted bool) { + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", + fmt.Sprintf("Concurrency limit exceeded for %s, please retry later", slotType), streamStarted) +} + +func (h *OpenAIGatewayHandler) handleFailoverExhausted(c *gin.Context, statusCode int, streamStarted bool) { + status, errType, errMsg := h.mapUpstreamError(statusCode) + h.handleStreamingAwareError(c, status, errType, errMsg, streamStarted) +} + +func (h *OpenAIGatewayHandler) mapUpstreamError(statusCode int) (int, string, string) { + switch statusCode { + case 401: + return http.StatusBadGateway, "upstream_error", "Upstream authentication failed, please contact administrator" + case 403: + return http.StatusBadGateway, "upstream_error", "Upstream access forbidden, please contact administrator" + case 429: + return http.StatusTooManyRequests, "rate_limit_error", "Upstream rate limit exceeded, please retry later" + case 529: + return http.StatusServiceUnavailable, "upstream_error", "Upstream service overloaded, please retry later" + case 500, 502, 503, 504: + return http.StatusBadGateway, "upstream_error", "Upstream service temporarily unavailable" + default: + return http.StatusBadGateway, "upstream_error", "Upstream request failed" + } +} + +// handleStreamingAwareError handles errors that may occur after streaming has started +func (h *OpenAIGatewayHandler) handleStreamingAwareError(c *gin.Context, status int, errType, message string, streamStarted bool) { + if streamStarted { + // Stream already started, send error as SSE event then close + flusher, ok := c.Writer.(http.Flusher) + if ok { + // Send error event in OpenAI SSE format + errorEvent := fmt.Sprintf(`event: error`+"\n"+`data: {"error": {"type": "%s", "message": "%s"}}`+"\n\n", errType, message) + if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil { + _ = c.Error(err) + } + flusher.Flush() + } + return + } + + // Normal case: return JSON response with proper status code + h.errorResponse(c, status, errType, message) +} + +// errorResponse returns OpenAI API format error response +func (h *OpenAIGatewayHandler) errorResponse(c *gin.Context, status int, errType, message string) { + c.JSON(status, gin.H{ + "error": gin.H{ + "type": errType, + "message": message, + }, + }) +} diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go new file mode 100644 index 00000000..f62e6b3e --- /dev/null +++ b/backend/internal/handler/ops_error_logger.go @@ -0,0 +1,1015 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "log" + "runtime" + "runtime/debug" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +const ( + opsModelKey = "ops_model" + opsStreamKey = "ops_stream" + opsRequestBodyKey = "ops_request_body" + opsAccountIDKey = "ops_account_id" +) + +const ( + opsErrorLogTimeout = 5 * time.Second + opsErrorLogDrainTimeout = 10 * time.Second + + opsErrorLogMinWorkerCount = 4 + opsErrorLogMaxWorkerCount = 32 + + opsErrorLogQueueSizePerWorker = 128 + opsErrorLogMinQueueSize = 256 + opsErrorLogMaxQueueSize = 8192 +) + +type opsErrorLogJob struct { + ops *service.OpsService + entry *service.OpsInsertErrorLogInput + requestBody []byte +} + +var ( + opsErrorLogOnce sync.Once + opsErrorLogQueue chan opsErrorLogJob + + opsErrorLogStopOnce sync.Once + opsErrorLogWorkersWg sync.WaitGroup + opsErrorLogMu sync.RWMutex + opsErrorLogStopping bool + opsErrorLogQueueLen atomic.Int64 + opsErrorLogEnqueued atomic.Int64 + opsErrorLogDropped atomic.Int64 + opsErrorLogProcessed atomic.Int64 + + opsErrorLogLastDropLogAt atomic.Int64 + + opsErrorLogShutdownCh = make(chan struct{}) + opsErrorLogShutdownOnce sync.Once + opsErrorLogDrained atomic.Bool +) + +func startOpsErrorLogWorkers() { + opsErrorLogMu.Lock() + defer opsErrorLogMu.Unlock() + + if opsErrorLogStopping { + return + } + + workerCount, queueSize := opsErrorLogConfig() + opsErrorLogQueue = make(chan opsErrorLogJob, queueSize) + opsErrorLogQueueLen.Store(0) + + opsErrorLogWorkersWg.Add(workerCount) + for i := 0; i < workerCount; i++ { + go func() { + defer opsErrorLogWorkersWg.Done() + for job := range opsErrorLogQueue { + opsErrorLogQueueLen.Add(-1) + if job.ops == nil || job.entry == nil { + continue + } + func() { + defer func() { + if r := recover(); r != nil { + log.Printf("[OpsErrorLogger] worker panic: %v\n%s", r, debug.Stack()) + } + }() + ctx, cancel := context.WithTimeout(context.Background(), opsErrorLogTimeout) + _ = job.ops.RecordError(ctx, job.entry, job.requestBody) + cancel() + opsErrorLogProcessed.Add(1) + }() + } + }() + } +} + +func enqueueOpsErrorLog(ops *service.OpsService, entry *service.OpsInsertErrorLogInput, requestBody []byte) { + if ops == nil || entry == nil { + return + } + select { + case <-opsErrorLogShutdownCh: + return + default: + } + + opsErrorLogMu.RLock() + stopping := opsErrorLogStopping + opsErrorLogMu.RUnlock() + if stopping { + return + } + + opsErrorLogOnce.Do(startOpsErrorLogWorkers) + + opsErrorLogMu.RLock() + defer opsErrorLogMu.RUnlock() + if opsErrorLogStopping || opsErrorLogQueue == nil { + return + } + + select { + case opsErrorLogQueue <- opsErrorLogJob{ops: ops, entry: entry, requestBody: requestBody}: + opsErrorLogQueueLen.Add(1) + opsErrorLogEnqueued.Add(1) + default: + // Queue is full; drop to avoid blocking request handling. + opsErrorLogDropped.Add(1) + maybeLogOpsErrorLogDrop() + } +} + +func StopOpsErrorLogWorkers() bool { + opsErrorLogStopOnce.Do(func() { + opsErrorLogShutdownOnce.Do(func() { + close(opsErrorLogShutdownCh) + }) + opsErrorLogDrained.Store(stopOpsErrorLogWorkers()) + }) + return opsErrorLogDrained.Load() +} + +func stopOpsErrorLogWorkers() bool { + opsErrorLogMu.Lock() + opsErrorLogStopping = true + ch := opsErrorLogQueue + if ch != nil { + close(ch) + } + opsErrorLogQueue = nil + opsErrorLogMu.Unlock() + + if ch == nil { + opsErrorLogQueueLen.Store(0) + return true + } + + done := make(chan struct{}) + go func() { + opsErrorLogWorkersWg.Wait() + close(done) + }() + + select { + case <-done: + opsErrorLogQueueLen.Store(0) + return true + case <-time.After(opsErrorLogDrainTimeout): + return false + } +} + +func OpsErrorLogQueueLength() int64 { + return opsErrorLogQueueLen.Load() +} + +func OpsErrorLogQueueCapacity() int { + opsErrorLogMu.RLock() + ch := opsErrorLogQueue + opsErrorLogMu.RUnlock() + if ch == nil { + return 0 + } + return cap(ch) +} + +func OpsErrorLogDroppedTotal() int64 { + return opsErrorLogDropped.Load() +} + +func OpsErrorLogEnqueuedTotal() int64 { + return opsErrorLogEnqueued.Load() +} + +func OpsErrorLogProcessedTotal() int64 { + return opsErrorLogProcessed.Load() +} + +func maybeLogOpsErrorLogDrop() { + now := time.Now().Unix() + + for { + last := opsErrorLogLastDropLogAt.Load() + if last != 0 && now-last < 60 { + return + } + if opsErrorLogLastDropLogAt.CompareAndSwap(last, now) { + break + } + } + + queued := opsErrorLogQueueLen.Load() + queueCap := OpsErrorLogQueueCapacity() + + log.Printf( + "[OpsErrorLogger] queue is full; dropping logs (queued=%d cap=%d enqueued_total=%d dropped_total=%d processed_total=%d)", + queued, + queueCap, + opsErrorLogEnqueued.Load(), + opsErrorLogDropped.Load(), + opsErrorLogProcessed.Load(), + ) +} + +func opsErrorLogConfig() (workerCount int, queueSize int) { + workerCount = runtime.GOMAXPROCS(0) * 2 + if workerCount < opsErrorLogMinWorkerCount { + workerCount = opsErrorLogMinWorkerCount + } + if workerCount > opsErrorLogMaxWorkerCount { + workerCount = opsErrorLogMaxWorkerCount + } + + queueSize = workerCount * opsErrorLogQueueSizePerWorker + if queueSize < opsErrorLogMinQueueSize { + queueSize = opsErrorLogMinQueueSize + } + if queueSize > opsErrorLogMaxQueueSize { + queueSize = opsErrorLogMaxQueueSize + } + + return workerCount, queueSize +} + +func setOpsRequestContext(c *gin.Context, model string, stream bool, requestBody []byte) { + if c == nil { + return + } + c.Set(opsModelKey, model) + c.Set(opsStreamKey, stream) + if len(requestBody) > 0 { + c.Set(opsRequestBodyKey, requestBody) + } +} + +func setOpsSelectedAccount(c *gin.Context, accountID int64) { + if c == nil || accountID <= 0 { + return + } + c.Set(opsAccountIDKey, accountID) +} + +type opsCaptureWriter struct { + gin.ResponseWriter + limit int + buf bytes.Buffer +} + +func (w *opsCaptureWriter) Write(b []byte) (int, error) { + if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(b) > remaining { + _, _ = w.buf.Write(b[:remaining]) + } else { + _, _ = w.buf.Write(b) + } + } + return w.ResponseWriter.Write(b) +} + +func (w *opsCaptureWriter) WriteString(s string) (int, error) { + if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(s) > remaining { + _, _ = w.buf.WriteString(s[:remaining]) + } else { + _, _ = w.buf.WriteString(s) + } + } + return w.ResponseWriter.WriteString(s) +} + +// OpsErrorLoggerMiddleware records error responses (status >= 400) into ops_error_logs. +// +// Notes: +// - It buffers response bodies only when status >= 400 to avoid overhead for successful traffic. +// - Streaming errors after the response has started (SSE) may still need explicit logging. +func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc { + return func(c *gin.Context) { + w := &opsCaptureWriter{ResponseWriter: c.Writer, limit: 64 * 1024} + c.Writer = w + c.Next() + + if ops == nil { + return + } + if !ops.IsMonitoringEnabled(c.Request.Context()) { + return + } + + status := c.Writer.Status() + if status < 400 { + // Even when the client request succeeds, we still want to persist upstream error attempts + // (retries/failover) so ops can observe upstream instability that gets "covered" by retries. + var events []*service.OpsUpstreamErrorEvent + if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok { + if arr, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(arr) > 0 { + events = arr + } + } + // Also accept single upstream fields set by gateway services (rare for successful requests). + hasUpstreamContext := len(events) > 0 + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + hasUpstreamContext = t > 0 + case int64: + hasUpstreamContext = t > 0 + } + } + } + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + hasUpstreamContext = true + } + } + } + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + hasUpstreamContext = true + } + } + } + if !hasUpstreamContext { + return + } + + apiKey, _ := middleware2.GetAPIKeyFromContext(c) + clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string) + + model, _ := c.Get(opsModelKey) + streamV, _ := c.Get(opsStreamKey) + accountIDV, _ := c.Get(opsAccountIDKey) + + var modelName string + if s, ok := model.(string); ok { + modelName = s + } + stream := false + if b, ok := streamV.(bool); ok { + stream = b + } + + // Prefer showing the account that experienced the upstream error (if we have events), + // otherwise fall back to the final selected account (best-effort). + var accountID *int64 + if len(events) > 0 { + if last := events[len(events)-1]; last != nil && last.AccountID > 0 { + v := last.AccountID + accountID = &v + } + } + if accountID == nil { + if v, ok := accountIDV.(int64); ok && v > 0 { + accountID = &v + } + } + + fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path) + platform := resolveOpsPlatform(apiKey, fallbackPlatform) + + requestID := c.Writer.Header().Get("X-Request-Id") + if requestID == "" { + requestID = c.Writer.Header().Get("x-request-id") + } + + // Best-effort backfill single upstream fields from the last event (if present). + var upstreamStatusCode *int + var upstreamErrorMessage *string + var upstreamErrorDetail *string + if len(events) > 0 { + last := events[len(events)-1] + if last != nil { + if last.UpstreamStatusCode > 0 { + code := last.UpstreamStatusCode + upstreamStatusCode = &code + } + if msg := strings.TrimSpace(last.Message); msg != "" { + upstreamErrorMessage = &msg + } + if detail := strings.TrimSpace(last.Detail); detail != "" { + upstreamErrorDetail = &detail + } + } + } + + if upstreamStatusCode == nil { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + if t > 0 { + code := t + upstreamStatusCode = &code + } + case int64: + if t > 0 { + code := int(t) + upstreamStatusCode = &code + } + } + } + } + if upstreamErrorMessage == nil { + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + msg := strings.TrimSpace(s) + upstreamErrorMessage = &msg + } + } + } + if upstreamErrorDetail == nil { + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + detail := strings.TrimSpace(s) + upstreamErrorDetail = &detail + } + } + } + + // If we still have nothing meaningful, skip. + if upstreamStatusCode == nil && upstreamErrorMessage == nil && upstreamErrorDetail == nil && len(events) == 0 { + return + } + + effectiveUpstreamStatus := 0 + if upstreamStatusCode != nil { + effectiveUpstreamStatus = *upstreamStatusCode + } + + recoveredMsg := "Recovered upstream error" + if effectiveUpstreamStatus > 0 { + recoveredMsg += " " + strconvItoa(effectiveUpstreamStatus) + } + if upstreamErrorMessage != nil && strings.TrimSpace(*upstreamErrorMessage) != "" { + recoveredMsg += ": " + strings.TrimSpace(*upstreamErrorMessage) + } + recoveredMsg = truncateString(recoveredMsg, 2048) + + entry := &service.OpsInsertErrorLogInput{ + RequestID: requestID, + ClientRequestID: clientRequestID, + + AccountID: accountID, + Platform: platform, + Model: modelName, + RequestPath: func() string { + if c.Request != nil && c.Request.URL != nil { + return c.Request.URL.Path + } + return "" + }(), + Stream: stream, + UserAgent: c.GetHeader("User-Agent"), + + ErrorPhase: "upstream", + ErrorType: "upstream_error", + // Severity/retryability should reflect the upstream failure, not the final client status (200). + Severity: classifyOpsSeverity("upstream_error", effectiveUpstreamStatus), + StatusCode: status, + IsBusinessLimited: false, + IsCountTokens: isCountTokensRequest(c), + + ErrorMessage: recoveredMsg, + ErrorBody: "", + + ErrorSource: "upstream_http", + ErrorOwner: "provider", + + UpstreamStatusCode: upstreamStatusCode, + UpstreamErrorMessage: upstreamErrorMessage, + UpstreamErrorDetail: upstreamErrorDetail, + UpstreamErrors: events, + + IsRetryable: classifyOpsIsRetryable("upstream_error", effectiveUpstreamStatus), + RetryCount: 0, + CreatedAt: time.Now(), + } + + if apiKey != nil { + entry.APIKeyID = &apiKey.ID + if apiKey.User != nil { + entry.UserID = &apiKey.User.ID + } + if apiKey.GroupID != nil { + entry.GroupID = apiKey.GroupID + } + // Prefer group platform if present (more stable than inferring from path). + if apiKey.Group != nil && apiKey.Group.Platform != "" { + entry.Platform = apiKey.Group.Platform + } + } + + var clientIP string + if ip := strings.TrimSpace(ip.GetClientIP(c)); ip != "" { + clientIP = ip + entry.ClientIP = &clientIP + } + + var requestBody []byte + if v, ok := c.Get(opsRequestBodyKey); ok { + if b, ok := v.([]byte); ok && len(b) > 0 { + requestBody = b + } + } + // Store request headers/body only when an upstream error occurred to keep overhead minimal. + entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c) + + enqueueOpsErrorLog(ops, entry, requestBody) + return + } + + body := w.buf.Bytes() + parsed := parseOpsErrorResponse(body) + + // Skip logging if the error should be filtered based on settings + if shouldSkipOpsErrorLog(c.Request.Context(), ops, parsed.Message, string(body), c.Request.URL.Path) { + return + } + + apiKey, _ := middleware2.GetAPIKeyFromContext(c) + + clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string) + + model, _ := c.Get(opsModelKey) + streamV, _ := c.Get(opsStreamKey) + accountIDV, _ := c.Get(opsAccountIDKey) + + var modelName string + if s, ok := model.(string); ok { + modelName = s + } + stream := false + if b, ok := streamV.(bool); ok { + stream = b + } + var accountID *int64 + if v, ok := accountIDV.(int64); ok && v > 0 { + accountID = &v + } + + fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path) + platform := resolveOpsPlatform(apiKey, fallbackPlatform) + + requestID := c.Writer.Header().Get("X-Request-Id") + if requestID == "" { + requestID = c.Writer.Header().Get("x-request-id") + } + + phase := classifyOpsPhase(parsed.ErrorType, parsed.Message, parsed.Code) + isBusinessLimited := classifyOpsIsBusinessLimited(parsed.ErrorType, phase, parsed.Code, status, parsed.Message) + + errorOwner := classifyOpsErrorOwner(phase, parsed.Message) + errorSource := classifyOpsErrorSource(phase, parsed.Message) + + entry := &service.OpsInsertErrorLogInput{ + RequestID: requestID, + ClientRequestID: clientRequestID, + + AccountID: accountID, + Platform: platform, + Model: modelName, + RequestPath: func() string { + if c.Request != nil && c.Request.URL != nil { + return c.Request.URL.Path + } + return "" + }(), + Stream: stream, + UserAgent: c.GetHeader("User-Agent"), + + ErrorPhase: phase, + ErrorType: normalizeOpsErrorType(parsed.ErrorType, parsed.Code), + Severity: classifyOpsSeverity(parsed.ErrorType, status), + StatusCode: status, + IsBusinessLimited: isBusinessLimited, + IsCountTokens: isCountTokensRequest(c), + + ErrorMessage: parsed.Message, + // Keep the full captured error body (capture is already capped at 64KB) so the + // service layer can sanitize JSON before truncating for storage. + ErrorBody: string(body), + ErrorSource: errorSource, + ErrorOwner: errorOwner, + + IsRetryable: classifyOpsIsRetryable(parsed.ErrorType, status), + RetryCount: 0, + CreatedAt: time.Now(), + } + + // Capture upstream error context set by gateway services (if present). + // This does NOT affect the client response; it enriches Ops troubleshooting data. + { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + if t > 0 { + code := t + entry.UpstreamStatusCode = &code + } + case int64: + if t > 0 { + code := int(t) + entry.UpstreamStatusCode = &code + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok { + if msg := strings.TrimSpace(s); msg != "" { + entry.UpstreamErrorMessage = &msg + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok { + if detail := strings.TrimSpace(s); detail != "" { + entry.UpstreamErrorDetail = &detail + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok { + if events, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(events) > 0 { + entry.UpstreamErrors = events + // Best-effort backfill the single upstream fields from the last event when missing. + last := events[len(events)-1] + if last != nil { + if entry.UpstreamStatusCode == nil && last.UpstreamStatusCode > 0 { + code := last.UpstreamStatusCode + entry.UpstreamStatusCode = &code + } + if entry.UpstreamErrorMessage == nil && strings.TrimSpace(last.Message) != "" { + msg := strings.TrimSpace(last.Message) + entry.UpstreamErrorMessage = &msg + } + if entry.UpstreamErrorDetail == nil && strings.TrimSpace(last.Detail) != "" { + detail := strings.TrimSpace(last.Detail) + entry.UpstreamErrorDetail = &detail + } + } + } + } + } + + if apiKey != nil { + entry.APIKeyID = &apiKey.ID + if apiKey.User != nil { + entry.UserID = &apiKey.User.ID + } + if apiKey.GroupID != nil { + entry.GroupID = apiKey.GroupID + } + // Prefer group platform if present (more stable than inferring from path). + if apiKey.Group != nil && apiKey.Group.Platform != "" { + entry.Platform = apiKey.Group.Platform + } + } + + var clientIP string + if ip := strings.TrimSpace(ip.GetClientIP(c)); ip != "" { + clientIP = ip + entry.ClientIP = &clientIP + } + + var requestBody []byte + if v, ok := c.Get(opsRequestBodyKey); ok { + if b, ok := v.([]byte); ok && len(b) > 0 { + requestBody = b + } + } + // Persist only a minimal, whitelisted set of request headers to improve retry fidelity. + // Do NOT store Authorization/Cookie/etc. + entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c) + + enqueueOpsErrorLog(ops, entry, requestBody) + } +} + +var opsRetryRequestHeaderAllowlist = []string{ + "anthropic-beta", + "anthropic-version", +} + +// isCountTokensRequest checks if the request is a count_tokens request +func isCountTokensRequest(c *gin.Context) bool { + if c == nil || c.Request == nil || c.Request.URL == nil { + return false + } + return strings.Contains(c.Request.URL.Path, "/count_tokens") +} + +func extractOpsRetryRequestHeaders(c *gin.Context) *string { + if c == nil || c.Request == nil { + return nil + } + + headers := make(map[string]string, 4) + for _, key := range opsRetryRequestHeaderAllowlist { + v := strings.TrimSpace(c.GetHeader(key)) + if v == "" { + continue + } + // Keep headers small even if a client sends something unexpected. + headers[key] = truncateString(v, 512) + } + if len(headers) == 0 { + return nil + } + + raw, err := json.Marshal(headers) + if err != nil { + return nil + } + s := string(raw) + return &s +} + +type parsedOpsError struct { + ErrorType string + Message string + Code string +} + +func parseOpsErrorResponse(body []byte) parsedOpsError { + if len(body) == 0 { + return parsedOpsError{} + } + + // Fast path: attempt to decode into a generic map. + var m map[string]any + if err := json.Unmarshal(body, &m); err != nil { + return parsedOpsError{Message: truncateString(string(body), 1024)} + } + + // Claude/OpenAI-style gateway error: { type:"error", error:{ type, message } } + if errObj, ok := m["error"].(map[string]any); ok { + t, _ := errObj["type"].(string) + msg, _ := errObj["message"].(string) + // Gemini googleError also uses "error": { code, message, status } + if msg == "" { + if v, ok := errObj["message"]; ok { + msg, _ = v.(string) + } + } + if t == "" { + // Gemini error does not have "type" field. + t = "api_error" + } + // For gemini error, capture numeric code as string for business-limited mapping if needed. + var code string + if v, ok := errObj["code"]; ok { + switch n := v.(type) { + case float64: + code = strconvItoa(int(n)) + case int: + code = strconvItoa(n) + } + } + return parsedOpsError{ErrorType: t, Message: msg, Code: code} + } + + // APIKeyAuth-style: { code:"INSUFFICIENT_BALANCE", message:"..." } + code, _ := m["code"].(string) + msg, _ := m["message"].(string) + if code != "" || msg != "" { + return parsedOpsError{ErrorType: "api_error", Message: msg, Code: code} + } + + return parsedOpsError{Message: truncateString(string(body), 1024)} +} + +func resolveOpsPlatform(apiKey *service.APIKey, fallback string) string { + if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform != "" { + return apiKey.Group.Platform + } + return fallback +} + +func guessPlatformFromPath(path string) string { + p := strings.ToLower(path) + switch { + case strings.HasPrefix(p, "/antigravity/"): + return service.PlatformAntigravity + case strings.HasPrefix(p, "/v1beta/"): + return service.PlatformGemini + case strings.Contains(p, "/responses"): + return service.PlatformOpenAI + default: + return "" + } +} + +func normalizeOpsErrorType(errType string, code string) string { + if errType != "" { + return errType + } + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE": + return "billing_error" + case "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return "subscription_error" + default: + return "api_error" + } +} + +func classifyOpsPhase(errType, message, code string) string { + msg := strings.ToLower(message) + // Standardized phases: request|auth|routing|upstream|network|internal + // Map billing/concurrency/response => request; scheduling => routing. + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return "request" + } + + switch errType { + case "authentication_error": + return "auth" + case "billing_error", "subscription_error": + return "request" + case "rate_limit_error": + if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") || strings.Contains(msg, "queue") { + return "request" + } + return "upstream" + case "invalid_request_error": + return "request" + case "upstream_error", "overloaded_error": + return "upstream" + case "api_error": + if strings.Contains(msg, "no available accounts") { + return "routing" + } + return "internal" + default: + return "internal" + } +} + +func classifyOpsSeverity(errType string, status int) string { + switch errType { + case "invalid_request_error", "authentication_error", "billing_error", "subscription_error": + return "P3" + } + if status >= 500 { + return "P1" + } + if status == 429 { + return "P1" + } + if status >= 400 { + return "P2" + } + return "P3" +} + +func classifyOpsIsRetryable(errType string, statusCode int) bool { + switch errType { + case "authentication_error", "invalid_request_error": + return false + case "timeout_error": + return true + case "rate_limit_error": + // May be transient (upstream or queue); retry can help. + return true + case "billing_error", "subscription_error": + return false + case "upstream_error", "overloaded_error": + return statusCode >= 500 || statusCode == 429 || statusCode == 529 + default: + return statusCode >= 500 + } +} + +func classifyOpsIsBusinessLimited(errType, phase, code string, status int, message string) bool { + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return true + } + if phase == "billing" || phase == "concurrency" { + // SLA/错误率排除“用户级业务限制” + return true + } + // Avoid treating upstream rate limits as business-limited. + if errType == "rate_limit_error" && strings.Contains(strings.ToLower(message), "upstream") { + return false + } + _ = status + return false +} + +func classifyOpsErrorOwner(phase string, message string) string { + // Standardized owners: client|provider|platform + switch phase { + case "upstream", "network": + return "provider" + case "request", "auth": + return "client" + case "routing", "internal": + return "platform" + default: + if strings.Contains(strings.ToLower(message), "upstream") { + return "provider" + } + return "platform" + } +} + +func classifyOpsErrorSource(phase string, message string) string { + // Standardized sources: client_request|upstream_http|gateway + switch phase { + case "upstream": + return "upstream_http" + case "network": + return "gateway" + case "request", "auth": + return "client_request" + case "routing", "internal": + return "gateway" + default: + if strings.Contains(strings.ToLower(message), "upstream") { + return "upstream_http" + } + return "gateway" + } +} + +func truncateString(s string, max int) string { + if max <= 0 { + return "" + } + if len(s) <= max { + return s + } + cut := s[:max] + // Ensure truncation does not split multi-byte characters. + for len(cut) > 0 && !utf8.ValidString(cut) { + cut = cut[:len(cut)-1] + } + return cut +} + +func strconvItoa(v int) string { + return strconv.Itoa(v) +} + +// shouldSkipOpsErrorLog determines if an error should be skipped from logging based on settings. +// Returns true for errors that should be filtered according to OpsAdvancedSettings. +func shouldSkipOpsErrorLog(ctx context.Context, ops *service.OpsService, message, body, requestPath string) bool { + if ops == nil { + return false + } + + // Get advanced settings to check filter configuration + settings, err := ops.GetOpsAdvancedSettings(ctx) + if err != nil || settings == nil { + // If we can't get settings, don't skip (fail open) + return false + } + + msgLower := strings.ToLower(message) + bodyLower := strings.ToLower(body) + + // Check if count_tokens errors should be ignored + if settings.IgnoreCountTokensErrors && strings.Contains(requestPath, "/count_tokens") { + return true + } + + // Check if context canceled errors should be ignored (client disconnects) + if settings.IgnoreContextCanceled { + if strings.Contains(msgLower, "context canceled") || strings.Contains(bodyLower, "context canceled") { + return true + } + } + + // Check if "no available accounts" errors should be ignored + if settings.IgnoreNoAvailableAccounts { + if strings.Contains(msgLower, "no available accounts") || strings.Contains(bodyLower, "no available accounts") { + return true + } + } + + return false +} diff --git a/backend/internal/handler/redeem_handler.go b/backend/internal/handler/redeem_handler.go new file mode 100644 index 00000000..1b63f418 --- /dev/null +++ b/backend/internal/handler/redeem_handler.go @@ -0,0 +1,85 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// RedeemHandler handles redeem code-related requests +type RedeemHandler struct { + redeemService *service.RedeemService +} + +// NewRedeemHandler creates a new RedeemHandler +func NewRedeemHandler(redeemService *service.RedeemService) *RedeemHandler { + return &RedeemHandler{ + redeemService: redeemService, + } +} + +// RedeemRequest represents the redeem code request payload +type RedeemRequest struct { + Code string `json:"code" binding:"required"` +} + +// RedeemResponse represents the redeem response +type RedeemResponse struct { + Message string `json:"message"` + Type string `json:"type"` + Value float64 `json:"value"` + NewBalance *float64 `json:"new_balance,omitempty"` + NewConcurrency *int `json:"new_concurrency,omitempty"` +} + +// Redeem handles redeeming a code +// POST /api/v1/redeem +func (h *RedeemHandler) Redeem(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req RedeemRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + result, err := h.redeemService.Redeem(c.Request.Context(), subject.UserID, req.Code) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.RedeemCodeFromService(result)) +} + +// GetHistory returns the user's redemption history +// GET /api/v1/redeem/history +func (h *RedeemHandler) GetHistory(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + // Default limit is 25 + limit := 25 + + codes, err := h.redeemService.GetUserHistory(c.Request.Context(), subject.UserID, limit) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.RedeemCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.RedeemCodeFromService(&codes[i])) + } + response.Success(c, out) +} diff --git a/backend/internal/handler/request_body_limit.go b/backend/internal/handler/request_body_limit.go new file mode 100644 index 00000000..d746673b --- /dev/null +++ b/backend/internal/handler/request_body_limit.go @@ -0,0 +1,27 @@ +package handler + +import ( + "errors" + "fmt" + "net/http" +) + +func extractMaxBytesError(err error) (*http.MaxBytesError, bool) { + var maxErr *http.MaxBytesError + if errors.As(err, &maxErr) { + return maxErr, true + } + return nil, false +} + +func formatBodyLimit(limit int64) string { + const mb = 1024 * 1024 + if limit >= mb { + return fmt.Sprintf("%dMB", limit/mb) + } + return fmt.Sprintf("%dB", limit) +} + +func buildBodyTooLargeMessage(limit int64) string { + return fmt.Sprintf("Request body too large, limit is %s", formatBodyLimit(limit)) +} diff --git a/backend/internal/handler/request_body_limit_test.go b/backend/internal/handler/request_body_limit_test.go new file mode 100644 index 00000000..bd9b8177 --- /dev/null +++ b/backend/internal/handler/request_body_limit_test.go @@ -0,0 +1,45 @@ +package handler + +import ( + "bytes" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestRequestBodyLimitTooLarge(t *testing.T) { + gin.SetMode(gin.TestMode) + + limit := int64(16) + router := gin.New() + router.Use(middleware.RequestBodyLimit(limit)) + router.POST("/test", func(c *gin.Context) { + _, err := io.ReadAll(c.Request.Body) + if err != nil { + if maxErr, ok := extractMaxBytesError(err); ok { + c.JSON(http.StatusRequestEntityTooLarge, gin.H{ + "error": buildBodyTooLargeMessage(maxErr.Limit), + }) + return + } + c.JSON(http.StatusBadRequest, gin.H{ + "error": "read_failed", + }) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + payload := bytes.Repeat([]byte("a"), int(limit+1)) + req := httptest.NewRequest(http.MethodPost, "/test", bytes.NewReader(payload)) + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusRequestEntityTooLarge, recorder.Code) + require.Contains(t, recorder.Body.String(), buildBodyTooLargeMessage(limit)) +} diff --git a/backend/internal/handler/setting_handler.go b/backend/internal/handler/setting_handler.go new file mode 100644 index 00000000..cac79e9c --- /dev/null +++ b/backend/internal/handler/setting_handler.go @@ -0,0 +1,49 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// SettingHandler 公开设置处理器(无需认证) +type SettingHandler struct { + settingService *service.SettingService + version string +} + +// NewSettingHandler 创建公开设置处理器 +func NewSettingHandler(settingService *service.SettingService, version string) *SettingHandler { + return &SettingHandler{ + settingService: settingService, + version: version, + } +} + +// GetPublicSettings 获取公开设置 +// GET /api/v1/settings/public +func (h *SettingHandler) GetPublicSettings(c *gin.Context) { + settings, err := h.settingService.GetPublicSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PublicSettings{ + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: h.version, + }) +} diff --git a/backend/internal/handler/subscription_handler.go b/backend/internal/handler/subscription_handler.go new file mode 100644 index 00000000..b40df833 --- /dev/null +++ b/backend/internal/handler/subscription_handler.go @@ -0,0 +1,188 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// SubscriptionSummaryItem represents a subscription item in summary +type SubscriptionSummaryItem struct { + ID int64 `json:"id"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Status string `json:"status"` + DailyUsedUSD float64 `json:"daily_used_usd,omitempty"` + DailyLimitUSD float64 `json:"daily_limit_usd,omitempty"` + WeeklyUsedUSD float64 `json:"weekly_used_usd,omitempty"` + WeeklyLimitUSD float64 `json:"weekly_limit_usd,omitempty"` + MonthlyUsedUSD float64 `json:"monthly_used_usd,omitempty"` + MonthlyLimitUSD float64 `json:"monthly_limit_usd,omitempty"` + ExpiresAt *string `json:"expires_at,omitempty"` +} + +// SubscriptionProgressInfo represents subscription with progress info +type SubscriptionProgressInfo struct { + Subscription *dto.UserSubscription `json:"subscription"` + Progress *service.SubscriptionProgress `json:"progress"` +} + +// SubscriptionHandler handles user subscription operations +type SubscriptionHandler struct { + subscriptionService *service.SubscriptionService +} + +// NewSubscriptionHandler creates a new user subscription handler +func NewSubscriptionHandler(subscriptionService *service.SubscriptionService) *SubscriptionHandler { + return &SubscriptionHandler{ + subscriptionService: subscriptionService, + } +} + +// List handles listing current user's subscriptions +// GET /api/v1/subscriptions +func (h *SubscriptionHandler) List(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + subscriptions, err := h.subscriptionService.ListUserSubscriptions(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserSubscription, 0, len(subscriptions)) + for i := range subscriptions { + out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + } + response.Success(c, out) +} + +// GetActive handles getting current user's active subscriptions +// GET /api/v1/subscriptions/active +func (h *SubscriptionHandler) GetActive(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + subscriptions, err := h.subscriptionService.ListActiveUserSubscriptions(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UserSubscription, 0, len(subscriptions)) + for i := range subscriptions { + out = append(out, *dto.UserSubscriptionFromService(&subscriptions[i])) + } + response.Success(c, out) +} + +// GetProgress handles getting subscription progress for current user +// GET /api/v1/subscriptions/progress +func (h *SubscriptionHandler) GetProgress(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + // Get all active subscriptions with progress + subscriptions, err := h.subscriptionService.ListActiveUserSubscriptions(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + result := make([]SubscriptionProgressInfo, 0, len(subscriptions)) + for i := range subscriptions { + sub := &subscriptions[i] + progress, err := h.subscriptionService.GetSubscriptionProgress(c.Request.Context(), sub.ID) + if err != nil { + // Skip subscriptions with errors + continue + } + result = append(result, SubscriptionProgressInfo{ + Subscription: dto.UserSubscriptionFromService(sub), + Progress: progress, + }) + } + + response.Success(c, result) +} + +// GetSummary handles getting a summary of current user's subscription status +// GET /api/v1/subscriptions/summary +func (h *SubscriptionHandler) GetSummary(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not found in context") + return + } + + // Get all active subscriptions + subscriptions, err := h.subscriptionService.ListActiveUserSubscriptions(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + var totalUsed float64 + items := make([]SubscriptionSummaryItem, 0, len(subscriptions)) + + for _, sub := range subscriptions { + item := SubscriptionSummaryItem{ + ID: sub.ID, + GroupID: sub.GroupID, + Status: sub.Status, + DailyUsedUSD: sub.DailyUsageUSD, + WeeklyUsedUSD: sub.WeeklyUsageUSD, + MonthlyUsedUSD: sub.MonthlyUsageUSD, + } + + // Add group info if preloaded + if sub.Group != nil { + item.GroupName = sub.Group.Name + if sub.Group.DailyLimitUSD != nil { + item.DailyLimitUSD = *sub.Group.DailyLimitUSD + } + if sub.Group.WeeklyLimitUSD != nil { + item.WeeklyLimitUSD = *sub.Group.WeeklyLimitUSD + } + if sub.Group.MonthlyLimitUSD != nil { + item.MonthlyLimitUSD = *sub.Group.MonthlyLimitUSD + } + } + + // Format expiration time + if !sub.ExpiresAt.IsZero() { + formatted := sub.ExpiresAt.Format("2006-01-02T15:04:05Z07:00") + item.ExpiresAt = &formatted + } + + // Track total usage (use monthly as the most comprehensive) + totalUsed += sub.MonthlyUsageUSD + + items = append(items, item) + } + + summary := struct { + ActiveCount int `json:"active_count"` + TotalUsedUSD float64 `json:"total_used_usd"` + Subscriptions []SubscriptionSummaryItem `json:"subscriptions"` + }{ + ActiveCount: len(subscriptions), + TotalUsedUSD: totalUsed, + Subscriptions: items, + } + + response.Success(c, summary) +} diff --git a/backend/internal/handler/usage_handler.go b/backend/internal/handler/usage_handler.go new file mode 100644 index 00000000..129dbfa6 --- /dev/null +++ b/backend/internal/handler/usage_handler.go @@ -0,0 +1,402 @@ +package handler + +import ( + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UsageHandler handles usage-related requests +type UsageHandler struct { + usageService *service.UsageService + apiKeyService *service.APIKeyService +} + +// NewUsageHandler creates a new UsageHandler +func NewUsageHandler(usageService *service.UsageService, apiKeyService *service.APIKeyService) *UsageHandler { + return &UsageHandler{ + usageService: usageService, + apiKeyService: apiKeyService, + } +} + +// List handles listing usage records with pagination +// GET /api/v1/usage +func (h *UsageHandler) List(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + page, pageSize := response.ParsePagination(c) + + var apiKeyID int64 + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + id, err := strconv.ParseInt(apiKeyIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid api_key_id") + return + } + + // [Security Fix] Verify API Key ownership to prevent horizontal privilege escalation + apiKey, err := h.apiKeyService.GetByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + if apiKey.UserID != subject.UserID { + response.Forbidden(c, "Not authorized to access this API key's usage records") + return + } + + apiKeyID = id + } + + // Parse additional filters + model := c.Query("model") + + var stream *bool + if streamStr := c.Query("stream"); streamStr != "" { + val, err := strconv.ParseBool(streamStr) + if err != nil { + response.BadRequest(c, "Invalid stream value, use true or false") + return + } + stream = &val + } + + var billingType *int8 + if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { + val, err := strconv.ParseInt(billingTypeStr, 10, 8) + if err != nil { + response.BadRequest(c, "Invalid billing_type") + return + } + bt := int8(val) + billingType = &bt + } + + // Parse date range + var startTime, endTime *time.Time + userTZ := c.Query("timezone") // Get user's timezone from request + if startDateStr := c.Query("start_date"); startDateStr != "" { + t, err := timezone.ParseInUserLocation("2006-01-02", startDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + startTime = &t + } + + if endDateStr := c.Query("end_date"); endDateStr != "" { + t, err := timezone.ParseInUserLocation("2006-01-02", endDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + // Set end time to end of day + t = t.Add(24*time.Hour - time.Nanosecond) + endTime = &t + } + + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + filters := usagestats.UsageLogFilters{ + UserID: subject.UserID, // Always filter by current user for security + APIKeyID: apiKeyID, + Model: model, + Stream: stream, + BillingType: billingType, + StartTime: startTime, + EndTime: endTime, + } + + records, result, err := h.usageService.ListWithFilters(c.Request.Context(), params, filters) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.UsageLog, 0, len(records)) + for i := range records { + out = append(out, *dto.UsageLogFromService(&records[i])) + } + response.Paginated(c, out, result.Total, page, pageSize) +} + +// GetByID handles getting a single usage record +// GET /api/v1/usage/:id +func (h *UsageHandler) GetByID(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + usageID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid usage ID") + return + } + + record, err := h.usageService.GetByID(c.Request.Context(), usageID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 验证所有权 + if record.UserID != subject.UserID { + response.Forbidden(c, "Not authorized to access this record") + return + } + + response.Success(c, dto.UsageLogFromService(record)) +} + +// Stats handles getting usage statistics +// GET /api/v1/usage/stats +func (h *UsageHandler) Stats(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var apiKeyID int64 + if apiKeyIDStr := c.Query("api_key_id"); apiKeyIDStr != "" { + id, err := strconv.ParseInt(apiKeyIDStr, 10, 64) + if err != nil { + response.BadRequest(c, "Invalid api_key_id") + return + } + + // [Security Fix] Verify API Key ownership to prevent horizontal privilege escalation + apiKey, err := h.apiKeyService.GetByID(c.Request.Context(), id) + if err != nil { + response.NotFound(c, "API key not found") + return + } + if apiKey.UserID != subject.UserID { + response.Forbidden(c, "Not authorized to access this API key's statistics") + return + } + + apiKeyID = id + } + + // 获取时间范围参数 + userTZ := c.Query("timezone") // Get user's timezone from request + now := timezone.NowInUserLocation(userTZ) + var startTime, endTime time.Time + + // 优先使用 start_date 和 end_date 参数 + startDateStr := c.Query("start_date") + endDateStr := c.Query("end_date") + + if startDateStr != "" && endDateStr != "" { + // 使用自定义日期范围 + var err error + startTime, err = timezone.ParseInUserLocation("2006-01-02", startDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid start_date format, use YYYY-MM-DD") + return + } + endTime, err = timezone.ParseInUserLocation("2006-01-02", endDateStr, userTZ) + if err != nil { + response.BadRequest(c, "Invalid end_date format, use YYYY-MM-DD") + return + } + // 设置结束时间为当天结束 + endTime = endTime.Add(24*time.Hour - time.Nanosecond) + } else { + // 使用 period 参数 + period := c.DefaultQuery("period", "today") + switch period { + case "today": + startTime = timezone.StartOfDayInUserLocation(now, userTZ) + case "week": + startTime = now.AddDate(0, 0, -7) + case "month": + startTime = now.AddDate(0, -1, 0) + default: + startTime = timezone.StartOfDayInUserLocation(now, userTZ) + } + endTime = now + } + + var stats *service.UsageStats + var err error + if apiKeyID > 0 { + stats, err = h.usageService.GetStatsByAPIKey(c.Request.Context(), apiKeyID, startTime, endTime) + } else { + stats, err = h.usageService.GetStatsByUser(c.Request.Context(), subject.UserID, startTime, endTime) + } + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} + +// parseUserTimeRange parses start_date, end_date query parameters for user dashboard +// Uses user's timezone if provided, otherwise falls back to server timezone +func parseUserTimeRange(c *gin.Context) (time.Time, time.Time) { + userTZ := c.Query("timezone") // Get user's timezone from request + now := timezone.NowInUserLocation(userTZ) + startDate := c.Query("start_date") + endDate := c.Query("end_date") + + var startTime, endTime time.Time + + if startDate != "" { + if t, err := timezone.ParseInUserLocation("2006-01-02", startDate, userTZ); err == nil { + startTime = t + } else { + startTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, -7), userTZ) + } + } else { + startTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, -7), userTZ) + } + + if endDate != "" { + if t, err := timezone.ParseInUserLocation("2006-01-02", endDate, userTZ); err == nil { + endTime = t.Add(24 * time.Hour) // Include the end date + } else { + endTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, 1), userTZ) + } + } else { + endTime = timezone.StartOfDayInUserLocation(now.AddDate(0, 0, 1), userTZ) + } + + return startTime, endTime +} + +// DashboardStats handles getting user dashboard statistics +// GET /api/v1/usage/dashboard/stats +func (h *UsageHandler) DashboardStats(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + stats, err := h.usageService.GetUserDashboardStats(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, stats) +} + +// DashboardTrend handles getting user usage trend data +// GET /api/v1/usage/dashboard/trend +func (h *UsageHandler) DashboardTrend(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + startTime, endTime := parseUserTimeRange(c) + granularity := c.DefaultQuery("granularity", "day") + + trend, err := h.usageService.GetUserUsageTrendByUserID(c.Request.Context(), subject.UserID, startTime, endTime, granularity) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{ + "trend": trend, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + "granularity": granularity, + }) +} + +// DashboardModels handles getting user model usage statistics +// GET /api/v1/usage/dashboard/models +func (h *UsageHandler) DashboardModels(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + startTime, endTime := parseUserTimeRange(c) + + stats, err := h.usageService.GetUserModelStats(c.Request.Context(), subject.UserID, startTime, endTime) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{ + "models": stats, + "start_date": startTime.Format("2006-01-02"), + "end_date": endTime.Add(-24 * time.Hour).Format("2006-01-02"), + }) +} + +// BatchAPIKeysUsageRequest represents the request for batch API keys usage +type BatchAPIKeysUsageRequest struct { + APIKeyIDs []int64 `json:"api_key_ids" binding:"required"` +} + +// DashboardAPIKeysUsage handles getting usage stats for user's own API keys +// POST /api/v1/usage/dashboard/api-keys-usage +func (h *UsageHandler) DashboardAPIKeysUsage(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req BatchAPIKeysUsageRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.APIKeyIDs) == 0 { + response.Success(c, gin.H{"stats": map[string]any{}}) + return + } + + // Limit the number of API key IDs to prevent SQL parameter overflow + if len(req.APIKeyIDs) > 100 { + response.BadRequest(c, "Too many API key IDs (maximum 100 allowed)") + return + } + + validAPIKeyIDs, err := h.apiKeyService.VerifyOwnership(c.Request.Context(), subject.UserID, req.APIKeyIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + if len(validAPIKeyIDs) == 0 { + response.Success(c, gin.H{"stats": map[string]any{}}) + return + } + + stats, err := h.usageService.GetBatchAPIKeyUsageStats(c.Request.Context(), validAPIKeyIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"stats": stats}) +} diff --git a/backend/internal/handler/user_handler.go b/backend/internal/handler/user_handler.go new file mode 100644 index 00000000..d968951c --- /dev/null +++ b/backend/internal/handler/user_handler.go @@ -0,0 +1,112 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// UserHandler handles user-related requests +type UserHandler struct { + userService *service.UserService +} + +// NewUserHandler creates a new UserHandler +func NewUserHandler(userService *service.UserService) *UserHandler { + return &UserHandler{ + userService: userService, + } +} + +// ChangePasswordRequest represents the change password request payload +type ChangePasswordRequest struct { + OldPassword string `json:"old_password" binding:"required"` + NewPassword string `json:"new_password" binding:"required,min=6"` +} + +// UpdateProfileRequest represents the update profile request payload +type UpdateProfileRequest struct { + Username *string `json:"username"` +} + +// GetProfile handles getting user profile +// GET /api/v1/users/me +func (h *UserHandler) GetProfile(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + userData, err := h.userService.GetByID(c.Request.Context(), subject.UserID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 清空notes字段,普通用户不应看到备注 + userData.Notes = "" + + response.Success(c, dto.UserFromService(userData)) +} + +// ChangePassword handles changing user password +// POST /api/v1/users/me/password +func (h *UserHandler) ChangePassword(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req ChangePasswordRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + svcReq := service.ChangePasswordRequest{ + CurrentPassword: req.OldPassword, + NewPassword: req.NewPassword, + } + err := h.userService.ChangePassword(c.Request.Context(), subject.UserID, svcReq) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Password changed successfully"}) +} + +// UpdateProfile handles updating user profile +// PUT /api/v1/users/me +func (h *UserHandler) UpdateProfile(c *gin.Context) { + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + response.Unauthorized(c, "User not authenticated") + return + } + + var req UpdateProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + svcReq := service.UpdateProfileRequest{ + Username: req.Username, + } + updatedUser, err := h.userService.UpdateProfile(c.Request.Context(), subject.UserID, svcReq) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // 清空notes字段,普通用户不应看到备注 + updatedUser.Notes = "" + + response.Success(c, dto.UserFromService(updatedUser)) +} diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go new file mode 100644 index 00000000..2af7905e --- /dev/null +++ b/backend/internal/handler/wire.go @@ -0,0 +1,123 @@ +package handler + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler/admin" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/google/wire" +) + +// ProvideAdminHandlers creates the AdminHandlers struct +func ProvideAdminHandlers( + dashboardHandler *admin.DashboardHandler, + userHandler *admin.UserHandler, + groupHandler *admin.GroupHandler, + accountHandler *admin.AccountHandler, + oauthHandler *admin.OAuthHandler, + openaiOAuthHandler *admin.OpenAIOAuthHandler, + geminiOAuthHandler *admin.GeminiOAuthHandler, + antigravityOAuthHandler *admin.AntigravityOAuthHandler, + proxyHandler *admin.ProxyHandler, + redeemHandler *admin.RedeemHandler, + promoHandler *admin.PromoHandler, + settingHandler *admin.SettingHandler, + opsHandler *admin.OpsHandler, + systemHandler *admin.SystemHandler, + subscriptionHandler *admin.SubscriptionHandler, + usageHandler *admin.UsageHandler, + userAttributeHandler *admin.UserAttributeHandler, +) *AdminHandlers { + return &AdminHandlers{ + Dashboard: dashboardHandler, + User: userHandler, + Group: groupHandler, + Account: accountHandler, + OAuth: oauthHandler, + OpenAIOAuth: openaiOAuthHandler, + GeminiOAuth: geminiOAuthHandler, + AntigravityOAuth: antigravityOAuthHandler, + Proxy: proxyHandler, + Redeem: redeemHandler, + Promo: promoHandler, + Setting: settingHandler, + Ops: opsHandler, + System: systemHandler, + Subscription: subscriptionHandler, + Usage: usageHandler, + UserAttribute: userAttributeHandler, + } +} + +// ProvideSystemHandler creates admin.SystemHandler with UpdateService +func ProvideSystemHandler(updateService *service.UpdateService) *admin.SystemHandler { + return admin.NewSystemHandler(updateService) +} + +// ProvideSettingHandler creates SettingHandler with version from BuildInfo +func ProvideSettingHandler(settingService *service.SettingService, buildInfo BuildInfo) *SettingHandler { + return NewSettingHandler(settingService, buildInfo.Version) +} + +// ProvideHandlers creates the Handlers struct +func ProvideHandlers( + authHandler *AuthHandler, + userHandler *UserHandler, + apiKeyHandler *APIKeyHandler, + usageHandler *UsageHandler, + redeemHandler *RedeemHandler, + subscriptionHandler *SubscriptionHandler, + adminHandlers *AdminHandlers, + gatewayHandler *GatewayHandler, + openaiGatewayHandler *OpenAIGatewayHandler, + settingHandler *SettingHandler, +) *Handlers { + return &Handlers{ + Auth: authHandler, + User: userHandler, + APIKey: apiKeyHandler, + Usage: usageHandler, + Redeem: redeemHandler, + Subscription: subscriptionHandler, + Admin: adminHandlers, + Gateway: gatewayHandler, + OpenAIGateway: openaiGatewayHandler, + Setting: settingHandler, + } +} + +// ProviderSet is the Wire provider set for all handlers +var ProviderSet = wire.NewSet( + // Top-level handlers + NewAuthHandler, + NewUserHandler, + NewAPIKeyHandler, + NewUsageHandler, + NewRedeemHandler, + NewSubscriptionHandler, + NewGatewayHandler, + NewOpenAIGatewayHandler, + ProvideSettingHandler, + + // Admin handlers + admin.NewDashboardHandler, + admin.NewUserHandler, + admin.NewGroupHandler, + admin.NewAccountHandler, + admin.NewOAuthHandler, + admin.NewOpenAIOAuthHandler, + admin.NewGeminiOAuthHandler, + admin.NewAntigravityOAuthHandler, + admin.NewProxyHandler, + admin.NewRedeemHandler, + admin.NewPromoHandler, + admin.NewSettingHandler, + admin.NewOpsHandler, + ProvideSystemHandler, + admin.NewSubscriptionHandler, + admin.NewUsageHandler, + admin.NewUserAttributeHandler, + + // AdminHandlers and Handlers constructors + ProvideAdminHandlers, + ProvideHandlers, +) diff --git a/backend/internal/integration/e2e_gateway_test.go b/backend/internal/integration/e2e_gateway_test.go new file mode 100644 index 00000000..ec0b29f7 --- /dev/null +++ b/backend/internal/integration/e2e_gateway_test.go @@ -0,0 +1,799 @@ +//go:build e2e + +package integration + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "strings" + "testing" + "time" +) + +var ( + baseURL = getEnv("BASE_URL", "http://localhost:8080") + // ENDPOINT_PREFIX: 端点前缀,支持混合模式和非混合模式测试 + // - "" (默认): 使用 /v1/messages, /v1beta/models(混合模式,可调度 antigravity 账户) + // - "/antigravity": 使用 /antigravity/v1/messages, /antigravity/v1beta/models(非混合模式,仅 antigravity 账户) + endpointPrefix = getEnv("ENDPOINT_PREFIX", "") + claudeAPIKey = "sk-8e572bc3b3de92ace4f41f4256c28600ca11805732a7b693b5c44741346bbbb3" + geminiAPIKey = "sk-5950197a2085b38bbe5a1b229cc02b8ece914963fc44cacc06d497ae8b87410f" + testInterval = 1 * time.Second // 测试间隔,防止限流 +) + +func getEnv(key, defaultVal string) string { + if v := os.Getenv(key); v != "" { + return v + } + return defaultVal +} + +// Claude 模型列表 +var claudeModels = []string{ + // Opus 系列 + "claude-opus-4-5-thinking", // 直接支持 + "claude-opus-4", // 映射到 claude-opus-4-5-thinking + "claude-opus-4-5-20251101", // 映射到 claude-opus-4-5-thinking + // Sonnet 系列 + "claude-sonnet-4-5", // 直接支持 + "claude-sonnet-4-5-thinking", // 直接支持 + "claude-sonnet-4-5-20250929", // 映射到 claude-sonnet-4-5-thinking + "claude-3-5-sonnet-20241022", // 映射到 claude-sonnet-4-5 + // Haiku 系列(映射到 gemini-3-flash) + "claude-haiku-4", + "claude-haiku-4-5", + "claude-haiku-4-5-20251001", + "claude-3-haiku-20240307", +} + +// Gemini 模型列表 +var geminiModels = []string{ + "gemini-2.5-flash", + "gemini-2.5-flash-lite", + "gemini-3-flash", + "gemini-3-pro-low", + "gemini-3-pro-high", +} + +func TestMain(m *testing.M) { + mode := "混合模式" + if endpointPrefix != "" { + mode = "Antigravity 模式" + } + fmt.Printf("\n🚀 E2E Gateway Tests - %s (prefix=%q, %s)\n\n", baseURL, endpointPrefix, mode) + os.Exit(m.Run()) +} + +// TestClaudeModelsList 测试 GET /v1/models +func TestClaudeModelsList(t *testing.T) { + url := baseURL + endpointPrefix + "/v1/models" + + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Authorization", "Bearer "+claudeAPIKey) + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]any + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + + if result["object"] != "list" { + t.Errorf("期望 object=list, 得到 %v", result["object"]) + } + + data, ok := result["data"].([]any) + if !ok { + t.Fatal("响应缺少 data 数组") + } + t.Logf("✅ 返回 %d 个模型", len(data)) +} + +// TestGeminiModelsList 测试 GET /v1beta/models +func TestGeminiModelsList(t *testing.T) { + url := baseURL + endpointPrefix + "/v1beta/models" + + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Authorization", "Bearer "+geminiAPIKey) + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + body, _ := io.ReadAll(resp.Body) + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(body)) + } + + var result map[string]any + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + + models, ok := result["models"].([]any) + if !ok { + t.Fatal("响应缺少 models 数组") + } + t.Logf("✅ 返回 %d 个模型", len(models)) +} + +// TestClaudeMessages 测试 Claude /v1/messages 接口 +func TestClaudeMessages(t *testing.T) { + for i, model := range claudeModels { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_非流式", func(t *testing.T) { + testClaudeMessage(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_流式", func(t *testing.T) { + testClaudeMessage(t, model, true) + }) + } +} + +func testClaudeMessage(t *testing.T, model string, stream bool) { + url := baseURL + endpointPrefix + "/v1/messages" + + payload := map[string]any{ + "model": model, + "max_tokens": 50, + "stream": stream, + "messages": []map[string]string{ + {"role": "user", "content": "Say 'hello' in one word."}, + }, + } + body, _ := json.Marshal(payload) + + req, _ := http.NewRequest("POST", url, bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+claudeAPIKey) + req.Header.Set("anthropic-version", "2023-06-01") + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + respBody, _ := io.ReadAll(resp.Body) + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + if stream { + // 流式:读取 SSE 事件 + scanner := bufio.NewScanner(resp.Body) + eventCount := 0 + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "data:") { + eventCount++ + if eventCount >= 3 { + break + } + } + } + if eventCount == 0 { + t.Fatal("未收到任何 SSE 事件") + } + t.Logf("✅ 收到 %d+ 个 SSE 事件", eventCount) + } else { + // 非流式:解析 JSON 响应 + var result map[string]any + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + if result["type"] != "message" { + t.Errorf("期望 type=message, 得到 %v", result["type"]) + } + t.Logf("✅ 收到消息响应 id=%v", result["id"]) + } +} + +// TestGeminiGenerateContent 测试 Gemini /v1beta/models/:model 接口 +func TestGeminiGenerateContent(t *testing.T) { + for i, model := range geminiModels { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_非流式", func(t *testing.T) { + testGeminiGenerate(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_流式", func(t *testing.T) { + testGeminiGenerate(t, model, true) + }) + } +} + +func testGeminiGenerate(t *testing.T, model string, stream bool) { + action := "generateContent" + if stream { + action = "streamGenerateContent" + } + url := fmt.Sprintf("%s%s/v1beta/models/%s:%s", baseURL, endpointPrefix, model, action) + if stream { + url += "?alt=sse" + } + + payload := map[string]any{ + "contents": []map[string]any{ + { + "role": "user", + "parts": []map[string]string{ + {"text": "Say 'hello' in one word."}, + }, + }, + }, + "generationConfig": map[string]int{ + "maxOutputTokens": 50, + }, + } + body, _ := json.Marshal(payload) + + req, _ := http.NewRequest("POST", url, bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+geminiAPIKey) + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + respBody, _ := io.ReadAll(resp.Body) + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + if stream { + // 流式:读取 SSE 事件 + scanner := bufio.NewScanner(resp.Body) + eventCount := 0 + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "data:") { + eventCount++ + if eventCount >= 3 { + break + } + } + } + if eventCount == 0 { + t.Fatal("未收到任何 SSE 事件") + } + t.Logf("✅ 收到 %d+ 个 SSE 事件", eventCount) + } else { + // 非流式:解析 JSON 响应 + var result map[string]any + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + if _, ok := result["candidates"]; !ok { + t.Error("响应缺少 candidates 字段") + } + t.Log("✅ 收到 candidates 响应") + } +} + +// TestClaudeMessagesWithComplexTools 测试带复杂工具 schema 的请求 +// 模拟 Claude Code 发送的请求,包含需要清理的 JSON Schema 字段 +func TestClaudeMessagesWithComplexTools(t *testing.T) { + // 测试模型列表(只测试几个代表性模型) + models := []string{ + "claude-opus-4-5-20251101", // Claude 模型 + "claude-haiku-4-5-20251001", // 映射到 Gemini + } + + for i, model := range models { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_复杂工具", func(t *testing.T) { + testClaudeMessageWithTools(t, model) + }) + } +} + +func testClaudeMessageWithTools(t *testing.T, model string) { + url := baseURL + endpointPrefix + "/v1/messages" + + // 构造包含复杂 schema 的工具定义(模拟 Claude Code 的工具) + // 这些字段需要被 cleanJSONSchema 清理 + tools := []map[string]any{ + { + "name": "read_file", + "description": "Read file contents", + "input_schema": map[string]any{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": map[string]any{ + "path": map[string]any{ + "type": "string", + "description": "File path", + "minLength": 1, + "maxLength": 4096, + "pattern": "^[^\\x00]+$", + }, + "encoding": map[string]any{ + "type": []string{"string", "null"}, + "default": "utf-8", + "enum": []string{"utf-8", "ascii", "latin-1"}, + }, + }, + "required": []string{"path"}, + "additionalProperties": false, + }, + }, + { + "name": "write_file", + "description": "Write content to file", + "input_schema": map[string]any{ + "type": "object", + "properties": map[string]any{ + "path": map[string]any{ + "type": "string", + "minLength": 1, + }, + "content": map[string]any{ + "type": "string", + "maxLength": 1048576, + }, + }, + "required": []string{"path", "content"}, + "additionalProperties": false, + "strict": true, + }, + }, + { + "name": "list_files", + "description": "List files in directory", + "input_schema": map[string]any{ + "$id": "https://example.com/list-files.schema.json", + "type": "object", + "properties": map[string]any{ + "directory": map[string]any{ + "type": "string", + }, + "patterns": map[string]any{ + "type": "array", + "items": map[string]any{ + "type": "string", + "minLength": 1, + }, + "minItems": 1, + "maxItems": 100, + "uniqueItems": true, + }, + "recursive": map[string]any{ + "type": "boolean", + "default": false, + }, + }, + "required": []string{"directory"}, + "additionalProperties": false, + }, + }, + { + "name": "search_code", + "description": "Search code in files", + "input_schema": map[string]any{ + "type": "object", + "properties": map[string]any{ + "query": map[string]any{ + "type": "string", + "minLength": 1, + "format": "regex", + }, + "max_results": map[string]any{ + "type": "integer", + "minimum": 1, + "maximum": 1000, + "exclusiveMinimum": 0, + "default": 100, + }, + }, + "required": []string{"query"}, + "additionalProperties": false, + "examples": []map[string]any{ + {"query": "function.*test", "max_results": 50}, + }, + }, + }, + // 测试 required 引用不存在的属性(应被自动过滤) + { + "name": "invalid_required_tool", + "description": "Tool with invalid required field", + "input_schema": map[string]any{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{ + "type": "string", + }, + }, + // "nonexistent_field" 不存在于 properties 中,应被过滤掉 + "required": []string{"name", "nonexistent_field"}, + }, + }, + // 测试没有 properties 的 schema(应自动添加空 properties) + { + "name": "no_properties_tool", + "description": "Tool without properties", + "input_schema": map[string]any{ + "type": "object", + "required": []string{"should_be_removed"}, + }, + }, + // 测试没有 type 的 schema(应自动添加 type: OBJECT) + { + "name": "no_type_tool", + "description": "Tool without type", + "input_schema": map[string]any{ + "properties": map[string]any{ + "value": map[string]any{ + "type": "string", + }, + }, + }, + }, + } + + payload := map[string]any{ + "model": model, + "max_tokens": 100, + "stream": false, + "messages": []map[string]string{ + {"role": "user", "content": "List files in the current directory"}, + }, + "tools": tools, + } + body, _ := json.Marshal(payload) + + req, _ := http.NewRequest("POST", url, bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+claudeAPIKey) + req.Header.Set("anthropic-version", "2023-06-01") + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + respBody, _ := io.ReadAll(resp.Body) + + // 400 错误说明 schema 清理不完整 + if resp.StatusCode == 400 { + t.Fatalf("Schema 清理失败,收到 400 错误: %s", string(respBody)) + } + + // 503 可能是账号限流,不算测试失败 + if resp.StatusCode == 503 { + t.Skipf("账号暂时不可用 (503): %s", string(respBody)) + } + + // 429 是限流 + if resp.StatusCode == 429 { + t.Skipf("请求被限流 (429): %s", string(respBody)) + } + + if resp.StatusCode != 200 { + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + var result map[string]any + if err := json.Unmarshal(respBody, &result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + + if result["type"] != "message" { + t.Errorf("期望 type=message, 得到 %v", result["type"]) + } + t.Logf("✅ 复杂工具 schema 测试通过, id=%v", result["id"]) +} + +// TestClaudeMessagesWithThinkingAndTools 测试 thinking 模式下带工具调用的场景 +// 验证:当历史 assistant 消息包含 tool_use 但没有 signature 时, +// 系统应自动添加 dummy thought_signature 避免 Gemini 400 错误 +func TestClaudeMessagesWithThinkingAndTools(t *testing.T) { + models := []string{ + "claude-haiku-4-5-20251001", // gemini-3-flash + } + for i, model := range models { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_thinking模式工具调用", func(t *testing.T) { + testClaudeThinkingWithToolHistory(t, model) + }) + } +} + +func testClaudeThinkingWithToolHistory(t *testing.T, model string) { + url := baseURL + endpointPrefix + "/v1/messages" + + // 模拟历史对话:用户请求 → assistant 调用工具 → 工具返回 → 继续对话 + // 注意:tool_use 块故意不包含 signature,测试系统是否能正确添加 dummy signature + payload := map[string]any{ + "model": model, + "max_tokens": 200, + "stream": false, + // 开启 thinking 模式 + "thinking": map[string]any{ + "type": "enabled", + "budget_tokens": 1024, + }, + "messages": []any{ + map[string]any{ + "role": "user", + "content": "List files in the current directory", + }, + // assistant 消息包含 tool_use 但没有 signature + map[string]any{ + "role": "assistant", + "content": []map[string]any{ + { + "type": "text", + "text": "I'll list the files for you.", + }, + { + "type": "tool_use", + "id": "toolu_01XGmNv", + "name": "Bash", + "input": map[string]any{"command": "ls -la"}, + // 故意不包含 signature + }, + }, + }, + // 工具结果 + map[string]any{ + "role": "user", + "content": []map[string]any{ + { + "type": "tool_result", + "tool_use_id": "toolu_01XGmNv", + "content": "file1.txt\nfile2.txt\ndir1/", + }, + }, + }, + }, + "tools": []map[string]any{ + { + "name": "Bash", + "description": "Execute bash commands", + "input_schema": map[string]any{ + "type": "object", + "properties": map[string]any{ + "command": map[string]any{ + "type": "string", + }, + }, + "required": []string{"command"}, + }, + }, + }, + } + body, _ := json.Marshal(payload) + + req, _ := http.NewRequest("POST", url, bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+claudeAPIKey) + req.Header.Set("anthropic-version", "2023-06-01") + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + respBody, _ := io.ReadAll(resp.Body) + + // 400 错误说明 thought_signature 处理失败 + if resp.StatusCode == 400 { + t.Fatalf("thought_signature 处理失败,收到 400 错误: %s", string(respBody)) + } + + // 503 可能是账号限流,不算测试失败 + if resp.StatusCode == 503 { + t.Skipf("账号暂时不可用 (503): %s", string(respBody)) + } + + // 429 是限流 + if resp.StatusCode == 429 { + t.Skipf("请求被限流 (429): %s", string(respBody)) + } + + if resp.StatusCode != 200 { + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + var result map[string]any + if err := json.Unmarshal(respBody, &result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + + if result["type"] != "message" { + t.Errorf("期望 type=message, 得到 %v", result["type"]) + } + t.Logf("✅ thinking 模式工具调用测试通过, id=%v", result["id"]) +} + +// TestClaudeMessagesWithGeminiModel 测试在 Claude 端点使用 Gemini 模型 +// 验证:通过 /v1/messages 端点传入 gemini 模型名的场景(含前缀映射) +// 仅在 Antigravity 模式下运行(ENDPOINT_PREFIX="/antigravity") +func TestClaudeMessagesWithGeminiModel(t *testing.T) { + if endpointPrefix != "/antigravity" { + t.Skip("仅在 Antigravity 模式下运行") + } + + // 测试通过 Claude 端点调用 Gemini 模型 + geminiViaClaude := []string{ + "gemini-3-flash", // 直接支持 + "gemini-3-pro-low", // 直接支持 + "gemini-3-pro-high", // 直接支持 + "gemini-3-pro", // 前缀映射 -> gemini-3-pro-high + "gemini-3-pro-preview", // 前缀映射 -> gemini-3-pro-high + } + + for i, model := range geminiViaClaude { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_通过Claude端点", func(t *testing.T) { + testClaudeMessage(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_通过Claude端点_流式", func(t *testing.T) { + testClaudeMessage(t, model, true) + }) + } +} + +// TestClaudeMessagesWithNoSignature 测试历史 thinking block 不带 signature 的场景 +// 验证:Gemini 模型接受没有 signature 的 thinking block +func TestClaudeMessagesWithNoSignature(t *testing.T) { + models := []string{ + "claude-haiku-4-5-20251001", // gemini-3-flash - 支持无 signature + } + for i, model := range models { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_无signature", func(t *testing.T) { + testClaudeWithNoSignature(t, model) + }) + } +} + +func testClaudeWithNoSignature(t *testing.T, model string) { + url := baseURL + endpointPrefix + "/v1/messages" + + // 模拟历史对话包含 thinking block 但没有 signature + payload := map[string]any{ + "model": model, + "max_tokens": 200, + "stream": false, + // 开启 thinking 模式 + "thinking": map[string]any{ + "type": "enabled", + "budget_tokens": 1024, + }, + "messages": []any{ + map[string]any{ + "role": "user", + "content": "What is 2+2?", + }, + // assistant 消息包含 thinking block 但没有 signature + map[string]any{ + "role": "assistant", + "content": []map[string]any{ + { + "type": "thinking", + "thinking": "Let me calculate 2+2...", + // 故意不包含 signature + }, + { + "type": "text", + "text": "2+2 equals 4.", + }, + }, + }, + map[string]any{ + "role": "user", + "content": "What is 3+3?", + }, + }, + } + body, _ := json.Marshal(payload) + + req, _ := http.NewRequest("POST", url, bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+claudeAPIKey) + req.Header.Set("anthropic-version", "2023-06-01") + + client := &http.Client{Timeout: 60 * time.Second} + resp, err := client.Do(req) + if err != nil { + t.Fatalf("请求失败: %v", err) + } + defer resp.Body.Close() + + respBody, _ := io.ReadAll(resp.Body) + + if resp.StatusCode == 400 { + t.Fatalf("无 signature thinking 处理失败,收到 400 错误: %s", string(respBody)) + } + + if resp.StatusCode == 503 { + t.Skipf("账号暂时不可用 (503): %s", string(respBody)) + } + + if resp.StatusCode == 429 { + t.Skipf("请求被限流 (429): %s", string(respBody)) + } + + if resp.StatusCode != 200 { + t.Fatalf("HTTP %d: %s", resp.StatusCode, string(respBody)) + } + + var result map[string]any + if err := json.Unmarshal(respBody, &result); err != nil { + t.Fatalf("解析响应失败: %v", err) + } + + if result["type"] != "message" { + t.Errorf("期望 type=message, 得到 %v", result["type"]) + } + t.Logf("✅ 无 signature thinking 处理测试通过, id=%v", result["id"]) +} + +// TestGeminiEndpointWithClaudeModel 测试通过 Gemini 端点调用 Claude 模型 +// 仅在 Antigravity 模式下运行(ENDPOINT_PREFIX="/antigravity") +func TestGeminiEndpointWithClaudeModel(t *testing.T) { + if endpointPrefix != "/antigravity" { + t.Skip("仅在 Antigravity 模式下运行") + } + + // 测试通过 Gemini 端点调用 Claude 模型 + claudeViaGemini := []string{ + "claude-sonnet-4-5", + "claude-opus-4-5-thinking", + } + + for i, model := range claudeViaGemini { + if i > 0 { + time.Sleep(testInterval) + } + t.Run(model+"_通过Gemini端点", func(t *testing.T) { + testGeminiGenerate(t, model, false) + }) + time.Sleep(testInterval) + t.Run(model+"_通过Gemini端点_流式", func(t *testing.T) { + testGeminiGenerate(t, model, true) + }) + } +} diff --git a/backend/internal/middleware/rate_limiter.go b/backend/internal/middleware/rate_limiter.go new file mode 100644 index 00000000..819d74c2 --- /dev/null +++ b/backend/internal/middleware/rate_limiter.go @@ -0,0 +1,161 @@ +package middleware + +import ( + "context" + "fmt" + "log" + "net/http" + "strconv" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" +) + +// RateLimitFailureMode Redis 故障策略 +type RateLimitFailureMode int + +const ( + RateLimitFailOpen RateLimitFailureMode = iota + RateLimitFailClose +) + +// RateLimitOptions 限流可选配置 +type RateLimitOptions struct { + FailureMode RateLimitFailureMode +} + +var rateLimitScript = redis.NewScript(` +local current = redis.call('INCR', KEYS[1]) +local ttl = redis.call('PTTL', KEYS[1]) +local repaired = 0 +if current == 1 then + redis.call('PEXPIRE', KEYS[1], ARGV[1]) +elseif ttl == -1 then + redis.call('PEXPIRE', KEYS[1], ARGV[1]) + repaired = 1 +end +return {current, repaired} +`) + +// rateLimitRun 允许测试覆写脚本执行逻辑 +var rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, bool, error) { + values, err := rateLimitScript.Run(ctx, client, []string{key}, windowMillis).Slice() + if err != nil { + return 0, false, err + } + if len(values) < 2 { + return 0, false, fmt.Errorf("rate limit script returned %d values", len(values)) + } + count, err := parseInt64(values[0]) + if err != nil { + return 0, false, err + } + repaired, err := parseInt64(values[1]) + if err != nil { + return 0, false, err + } + return count, repaired == 1, nil +} + +// RateLimiter Redis 速率限制器 +type RateLimiter struct { + redis *redis.Client + prefix string +} + +// NewRateLimiter 创建速率限制器实例 +func NewRateLimiter(redisClient *redis.Client) *RateLimiter { + return &RateLimiter{ + redis: redisClient, + prefix: "rate_limit:", + } +} + +// Limit 返回速率限制中间件 +// key: 限制类型标识 +// limit: 时间窗口内最大请求数 +// window: 时间窗口 +func (r *RateLimiter) Limit(key string, limit int, window time.Duration) gin.HandlerFunc { + return r.LimitWithOptions(key, limit, window, RateLimitOptions{}) +} + +// LimitWithOptions 返回速率限制中间件(带可选配置) +func (r *RateLimiter) LimitWithOptions(key string, limit int, window time.Duration, opts RateLimitOptions) gin.HandlerFunc { + failureMode := opts.FailureMode + if failureMode != RateLimitFailClose { + failureMode = RateLimitFailOpen + } + + return func(c *gin.Context) { + ip := c.ClientIP() + redisKey := r.prefix + key + ":" + ip + + ctx := c.Request.Context() + + windowMillis := windowTTLMillis(window) + + // 使用 Lua 脚本原子操作增加计数并设置过期 + count, repaired, err := rateLimitRun(ctx, r.redis, redisKey, windowMillis) + if err != nil { + log.Printf("[RateLimit] redis error: key=%s mode=%s err=%v", redisKey, failureModeLabel(failureMode), err) + if failureMode == RateLimitFailClose { + abortRateLimit(c) + return + } + // Redis 错误时放行,避免影响正常服务 + c.Next() + return + } + if repaired { + log.Printf("[RateLimit] ttl repaired: key=%s window_ms=%d", redisKey, windowMillis) + } + + // 超过限制 + if count > int64(limit) { + abortRateLimit(c) + return + } + + c.Next() + } +} + +func windowTTLMillis(window time.Duration) int64 { + ttl := window.Milliseconds() + if ttl < 1 { + return 1 + } + return ttl +} + +func abortRateLimit(c *gin.Context) { + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{ + "error": "rate limit exceeded", + "message": "Too many requests, please try again later", + }) +} + +func failureModeLabel(mode RateLimitFailureMode) string { + if mode == RateLimitFailClose { + return "fail-close" + } + return "fail-open" +} + +func parseInt64(value any) (int64, error) { + switch v := value.(type) { + case int64: + return v, nil + case int: + return int64(v), nil + case string: + parsed, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return parsed, nil + default: + return 0, fmt.Errorf("unexpected value type %T", value) + } +} diff --git a/backend/internal/middleware/rate_limiter_integration_test.go b/backend/internal/middleware/rate_limiter_integration_test.go new file mode 100644 index 00000000..4759a988 --- /dev/null +++ b/backend/internal/middleware/rate_limiter_integration_test.go @@ -0,0 +1,114 @@ +//go:build integration + +package middleware + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + tcredis "github.com/testcontainers/testcontainers-go/modules/redis" +) + +const redisImageTag = "redis:8.4-alpine" + +func TestRateLimiterSetsTTLAndDoesNotRefresh(t *testing.T) { + gin.SetMode(gin.TestMode) + + ctx := context.Background() + rdb := startRedis(t, ctx) + limiter := NewRateLimiter(rdb) + + router := gin.New() + router.Use(limiter.Limit("ttl-test", 10, 2*time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + recorder := performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + redisKey := limiter.prefix + "ttl-test:127.0.0.1" + ttlBefore, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Greater(t, ttlBefore, time.Duration(0)) + require.LessOrEqual(t, ttlBefore, 2*time.Second) + + time.Sleep(50 * time.Millisecond) + + recorder = performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + ttlAfter, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Less(t, ttlAfter, ttlBefore) +} + +func TestRateLimiterFixesMissingTTL(t *testing.T) { + gin.SetMode(gin.TestMode) + + ctx := context.Background() + rdb := startRedis(t, ctx) + limiter := NewRateLimiter(rdb) + + router := gin.New() + router.Use(limiter.Limit("ttl-missing", 10, 2*time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + redisKey := limiter.prefix + "ttl-missing:127.0.0.1" + require.NoError(t, rdb.Set(ctx, redisKey, 5, 0).Err()) + + ttlBefore, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Less(t, ttlBefore, time.Duration(0)) + + recorder := performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + ttlAfter, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Greater(t, ttlAfter, time.Duration(0)) +} + +func performRequest(router *gin.Engine) *httptest.ResponseRecorder { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + return recorder +} + +func startRedis(t *testing.T, ctx context.Context) *redis.Client { + t.Helper() + + redisContainer, err := tcredis.Run(ctx, redisImageTag) + require.NoError(t, err) + t.Cleanup(func() { + _ = redisContainer.Terminate(ctx) + }) + + redisHost, err := redisContainer.Host(ctx) + require.NoError(t, err) + redisPort, err := redisContainer.MappedPort(ctx, "6379/tcp") + require.NoError(t, err) + + rdb := redis.NewClient(&redis.Options{ + Addr: fmt.Sprintf("%s:%d", redisHost, redisPort.Int()), + DB: 0, + }) + require.NoError(t, rdb.Ping(ctx).Err()) + + t.Cleanup(func() { + _ = rdb.Close() + }) + + return rdb +} diff --git a/backend/internal/middleware/rate_limiter_test.go b/backend/internal/middleware/rate_limiter_test.go new file mode 100644 index 00000000..0c379c0f --- /dev/null +++ b/backend/internal/middleware/rate_limiter_test.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +func TestWindowTTLMillis(t *testing.T) { + require.Equal(t, int64(1), windowTTLMillis(500*time.Microsecond)) + require.Equal(t, int64(1), windowTTLMillis(1500*time.Microsecond)) + require.Equal(t, int64(2), windowTTLMillis(2500*time.Microsecond)) +} + +func TestRateLimiterFailureModes(t *testing.T) { + gin.SetMode(gin.TestMode) + + rdb := redis.NewClient(&redis.Options{ + Addr: "127.0.0.1:1", + DialTimeout: 50 * time.Millisecond, + ReadTimeout: 50 * time.Millisecond, + WriteTimeout: 50 * time.Millisecond, + }) + t.Cleanup(func() { + _ = rdb.Close() + }) + + limiter := NewRateLimiter(rdb) + + failOpenRouter := gin.New() + failOpenRouter.Use(limiter.Limit("test", 1, time.Second)) + failOpenRouter.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + failOpenRouter.ServeHTTP(recorder, req) + require.Equal(t, http.StatusOK, recorder.Code) + + failCloseRouter := gin.New() + failCloseRouter.Use(limiter.LimitWithOptions("test", 1, time.Second, RateLimitOptions{ + FailureMode: RateLimitFailClose, + })) + failCloseRouter.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req = httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder = httptest.NewRecorder() + failCloseRouter.ServeHTTP(recorder, req) + require.Equal(t, http.StatusTooManyRequests, recorder.Code) +} + +func TestRateLimiterSuccessAndLimit(t *testing.T) { + gin.SetMode(gin.TestMode) + + originalRun := rateLimitRun + counts := []int64{1, 2} + callIndex := 0 + rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, bool, error) { + if callIndex >= len(counts) { + return counts[len(counts)-1], false, nil + } + value := counts[callIndex] + callIndex++ + return value, false, nil + } + t.Cleanup(func() { + rateLimitRun = originalRun + }) + + limiter := NewRateLimiter(redis.NewClient(&redis.Options{Addr: "127.0.0.1:1"})) + + router := gin.New() + router.Use(limiter.Limit("test", 1, time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + require.Equal(t, http.StatusOK, recorder.Code) + + req = httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder = httptest.NewRecorder() + router.ServeHTTP(recorder, req) + require.Equal(t, http.StatusTooManyRequests, recorder.Code) +} diff --git a/backend/internal/pkg/antigravity/claude_types.go b/backend/internal/pkg/antigravity/claude_types.go new file mode 100644 index 00000000..8a29cd10 --- /dev/null +++ b/backend/internal/pkg/antigravity/claude_types.go @@ -0,0 +1,228 @@ +package antigravity + +import "encoding/json" + +// Claude 请求/响应类型定义 + +// ClaudeRequest Claude Messages API 请求 +type ClaudeRequest struct { + Model string `json:"model"` + Messages []ClaudeMessage `json:"messages"` + MaxTokens int `json:"max_tokens,omitempty"` + System json.RawMessage `json:"system,omitempty"` // string 或 []SystemBlock + Stream bool `json:"stream,omitempty"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"top_p,omitempty"` + TopK *int `json:"top_k,omitempty"` + Tools []ClaudeTool `json:"tools,omitempty"` + Thinking *ThinkingConfig `json:"thinking,omitempty"` + Metadata *ClaudeMetadata `json:"metadata,omitempty"` +} + +// ClaudeMessage Claude 消息 +type ClaudeMessage struct { + Role string `json:"role"` // user, assistant + Content json.RawMessage `json:"content"` +} + +// ThinkingConfig Thinking 配置 +type ThinkingConfig struct { + Type string `json:"type"` // "enabled" or "disabled" + BudgetTokens int `json:"budget_tokens,omitempty"` // thinking budget +} + +// ClaudeMetadata 请求元数据 +type ClaudeMetadata struct { + UserID string `json:"user_id,omitempty"` +} + +// ClaudeTool Claude 工具定义 +// 支持两种格式: +// 1. 标准格式: { "name": "...", "description": "...", "input_schema": {...} } +// 2. Custom 格式 (MCP): { "type": "custom", "name": "...", "custom": { "description": "...", "input_schema": {...} } } +type ClaudeTool struct { + Type string `json:"type,omitempty"` // "custom" 或空(标准格式) + Name string `json:"name"` + Description string `json:"description,omitempty"` // 标准格式使用 + InputSchema map[string]any `json:"input_schema,omitempty"` // 标准格式使用 + Custom *CustomToolSpec `json:"custom,omitempty"` // custom 格式使用 +} + +// CustomToolSpec MCP custom 工具规格 +type CustomToolSpec struct { + Description string `json:"description,omitempty"` + InputSchema map[string]any `json:"input_schema"` +} + +// ClaudeCustomToolSpec 兼容旧命名(MCP custom 工具规格) +type ClaudeCustomToolSpec = CustomToolSpec + +// SystemBlock system prompt 数组形式的元素 +type SystemBlock struct { + Type string `json:"type"` + Text string `json:"text"` +} + +// ContentBlock Claude 消息内容块(解析后) +type ContentBlock struct { + Type string `json:"type"` + // text + Text string `json:"text,omitempty"` + // thinking + Thinking string `json:"thinking,omitempty"` + Signature string `json:"signature,omitempty"` + // tool_use + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Input any `json:"input,omitempty"` + // tool_result + ToolUseID string `json:"tool_use_id,omitempty"` + Content json.RawMessage `json:"content,omitempty"` + IsError bool `json:"is_error,omitempty"` + // image + Source *ImageSource `json:"source,omitempty"` +} + +// ImageSource Claude 图片来源 +type ImageSource struct { + Type string `json:"type"` // "base64" + MediaType string `json:"media_type"` // "image/png", "image/jpeg" 等 + Data string `json:"data"` +} + +// ClaudeResponse Claude Messages API 响应 +type ClaudeResponse struct { + ID string `json:"id"` + Type string `json:"type"` // "message" + Role string `json:"role"` // "assistant" + Model string `json:"model"` + Content []ClaudeContentItem `json:"content"` + StopReason string `json:"stop_reason,omitempty"` // end_turn, tool_use, max_tokens + StopSequence *string `json:"stop_sequence,omitempty"` // null 或具体值 + Usage ClaudeUsage `json:"usage"` +} + +// ClaudeContentItem Claude 响应内容项 +type ClaudeContentItem struct { + Type string `json:"type"` // text, thinking, tool_use + + // text + Text string `json:"text,omitempty"` + + // thinking + Thinking string `json:"thinking,omitempty"` + Signature string `json:"signature,omitempty"` + + // tool_use + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Input any `json:"input,omitempty"` +} + +// ClaudeUsage Claude 用量统计 +type ClaudeUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationInputTokens int `json:"cache_creation_input_tokens,omitempty"` + CacheReadInputTokens int `json:"cache_read_input_tokens,omitempty"` +} + +// ClaudeError Claude 错误响应 +type ClaudeError struct { + Type string `json:"type"` // "error" + Error ErrorDetail `json:"error"` +} + +// ErrorDetail 错误详情 +type ErrorDetail struct { + Type string `json:"type"` + Message string `json:"message"` +} + +// modelDef Antigravity 模型定义(内部使用) +type modelDef struct { + ID string + DisplayName string + CreatedAt string // 仅 Claude API 格式使用 +} + +// Antigravity 支持的 Claude 模型 +var claudeModels = []modelDef{ + {ID: "claude-opus-4-5-thinking", DisplayName: "Claude Opus 4.5 Thinking", CreatedAt: "2025-11-01T00:00:00Z"}, + {ID: "claude-sonnet-4-5", DisplayName: "Claude Sonnet 4.5", CreatedAt: "2025-09-29T00:00:00Z"}, + {ID: "claude-sonnet-4-5-thinking", DisplayName: "Claude Sonnet 4.5 Thinking", CreatedAt: "2025-09-29T00:00:00Z"}, +} + +// Antigravity 支持的 Gemini 模型 +var geminiModels = []modelDef{ + {ID: "gemini-2.5-flash", DisplayName: "Gemini 2.5 Flash", CreatedAt: "2025-01-01T00:00:00Z"}, + {ID: "gemini-2.5-flash-lite", DisplayName: "Gemini 2.5 Flash Lite", CreatedAt: "2025-01-01T00:00:00Z"}, + {ID: "gemini-2.5-flash-thinking", DisplayName: "Gemini 2.5 Flash Thinking", CreatedAt: "2025-01-01T00:00:00Z"}, + {ID: "gemini-3-flash", DisplayName: "Gemini 3 Flash", CreatedAt: "2025-06-01T00:00:00Z"}, + {ID: "gemini-3-pro-low", DisplayName: "Gemini 3 Pro Low", CreatedAt: "2025-06-01T00:00:00Z"}, + {ID: "gemini-3-pro-high", DisplayName: "Gemini 3 Pro High", CreatedAt: "2025-06-01T00:00:00Z"}, + {ID: "gemini-3-pro-preview", DisplayName: "Gemini 3 Pro Preview", CreatedAt: "2025-06-01T00:00:00Z"}, + {ID: "gemini-3-pro-image", DisplayName: "Gemini 3 Pro Image", CreatedAt: "2025-06-01T00:00:00Z"}, +} + +// ========== Claude API 格式 (/v1/models) ========== + +// ClaudeModel Claude API 模型格式 +type ClaudeModel struct { + ID string `json:"id"` + Type string `json:"type"` + DisplayName string `json:"display_name"` + CreatedAt string `json:"created_at"` +} + +// DefaultModels 返回 Claude API 格式的模型列表(Claude + Gemini) +func DefaultModels() []ClaudeModel { + all := append(claudeModels, geminiModels...) + result := make([]ClaudeModel, len(all)) + for i, m := range all { + result[i] = ClaudeModel{ID: m.ID, Type: "model", DisplayName: m.DisplayName, CreatedAt: m.CreatedAt} + } + return result +} + +// ========== Gemini v1beta 格式 (/v1beta/models) ========== + +// GeminiModel Gemini v1beta 模型格式 +type GeminiModel struct { + Name string `json:"name"` + DisplayName string `json:"displayName,omitempty"` + SupportedGenerationMethods []string `json:"supportedGenerationMethods,omitempty"` +} + +// GeminiModelsListResponse Gemini v1beta 模型列表响应 +type GeminiModelsListResponse struct { + Models []GeminiModel `json:"models"` +} + +var defaultGeminiMethods = []string{"generateContent", "streamGenerateContent"} + +// DefaultGeminiModels 返回 Gemini v1beta 格式的模型列表(仅 Gemini 模型) +func DefaultGeminiModels() []GeminiModel { + result := make([]GeminiModel, len(geminiModels)) + for i, m := range geminiModels { + result[i] = GeminiModel{Name: "models/" + m.ID, DisplayName: m.DisplayName, SupportedGenerationMethods: defaultGeminiMethods} + } + return result +} + +// FallbackGeminiModelsList 返回 Gemini v1beta 格式的模型列表响应 +func FallbackGeminiModelsList() GeminiModelsListResponse { + return GeminiModelsListResponse{Models: DefaultGeminiModels()} +} + +// FallbackGeminiModel 返回单个模型信息(v1beta 格式) +func FallbackGeminiModel(model string) GeminiModel { + if model == "" { + return GeminiModel{Name: "models/unknown", SupportedGenerationMethods: defaultGeminiMethods} + } + name := model + if len(model) < 7 || model[:7] != "models/" { + name = "models/" + model + } + return GeminiModel{Name: name, SupportedGenerationMethods: defaultGeminiMethods} +} diff --git a/backend/internal/pkg/antigravity/client.go b/backend/internal/pkg/antigravity/client.go new file mode 100644 index 00000000..1248be95 --- /dev/null +++ b/backend/internal/pkg/antigravity/client.go @@ -0,0 +1,474 @@ +// Package antigravity provides a client for the Antigravity API. +package antigravity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// resolveHost 从 URL 解析 host +func resolveHost(urlStr string) string { + parsed, err := url.Parse(urlStr) + if err != nil { + return "" + } + return parsed.Host +} + +// NewAPIRequestWithURL 使用指定的 base URL 创建 Antigravity API 请求(v1internal 端点) +func NewAPIRequestWithURL(ctx context.Context, baseURL, action, accessToken string, body []byte) (*http.Request, error) { + // 构建 URL,流式请求添加 ?alt=sse 参数 + apiURL := fmt.Sprintf("%s/v1internal:%s", baseURL, action) + isStream := action == "streamGenerateContent" + if isStream { + apiURL += "?alt=sse" + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + // 基础 Headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("User-Agent", UserAgent) + + // Accept Header 根据请求类型设置 + if isStream { + req.Header.Set("Accept", "text/event-stream") + } else { + req.Header.Set("Accept", "application/json") + } + + // 显式设置 Host Header + if host := resolveHost(apiURL); host != "" { + req.Host = host + } + + return req, nil +} + +// NewAPIRequest 使用默认 URL 创建 Antigravity API 请求(v1internal 端点) +// 向后兼容:仅使用默认 BaseURL +func NewAPIRequest(ctx context.Context, action, accessToken string, body []byte) (*http.Request, error) { + return NewAPIRequestWithURL(ctx, BaseURL, action, accessToken, body) +} + +// TokenResponse Google OAuth token 响应 +type TokenResponse struct { + AccessToken string `json:"access_token"` + ExpiresIn int64 `json:"expires_in"` + TokenType string `json:"token_type"` + Scope string `json:"scope,omitempty"` + RefreshToken string `json:"refresh_token,omitempty"` +} + +// UserInfo Google 用户信息 +type UserInfo struct { + Email string `json:"email"` + Name string `json:"name,omitempty"` + GivenName string `json:"given_name,omitempty"` + FamilyName string `json:"family_name,omitempty"` + Picture string `json:"picture,omitempty"` +} + +// LoadCodeAssistRequest loadCodeAssist 请求 +type LoadCodeAssistRequest struct { + Metadata struct { + IDEType string `json:"ideType"` + } `json:"metadata"` +} + +// TierInfo 账户类型信息 +type TierInfo struct { + ID string `json:"id"` // free-tier, g1-pro-tier, g1-ultra-tier + Name string `json:"name"` // 显示名称 + Description string `json:"description"` // 描述 +} + +// UnmarshalJSON supports both legacy string tiers and object tiers. +func (t *TierInfo) UnmarshalJSON(data []byte) error { + data = bytes.TrimSpace(data) + if len(data) == 0 || string(data) == "null" { + return nil + } + if data[0] == '"' { + var id string + if err := json.Unmarshal(data, &id); err != nil { + return err + } + t.ID = id + return nil + } + type alias TierInfo + var decoded alias + if err := json.Unmarshal(data, &decoded); err != nil { + return err + } + *t = TierInfo(decoded) + return nil +} + +// IneligibleTier 不符合条件的层级信息 +type IneligibleTier struct { + Tier *TierInfo `json:"tier,omitempty"` + // ReasonCode 不符合条件的原因代码,如 INELIGIBLE_ACCOUNT + ReasonCode string `json:"reasonCode,omitempty"` + ReasonMessage string `json:"reasonMessage,omitempty"` +} + +// LoadCodeAssistResponse loadCodeAssist 响应 +type LoadCodeAssistResponse struct { + CloudAICompanionProject string `json:"cloudaicompanionProject"` + CurrentTier *TierInfo `json:"currentTier,omitempty"` + PaidTier *TierInfo `json:"paidTier,omitempty"` + IneligibleTiers []*IneligibleTier `json:"ineligibleTiers,omitempty"` +} + +// GetTier 获取账户类型 +// 优先返回 paidTier(付费订阅级别),否则返回 currentTier +func (r *LoadCodeAssistResponse) GetTier() string { + if r.PaidTier != nil && r.PaidTier.ID != "" { + return r.PaidTier.ID + } + if r.CurrentTier != nil { + return r.CurrentTier.ID + } + return "" +} + +// Client Antigravity API 客户端 +type Client struct { + httpClient *http.Client +} + +func NewClient(proxyURL string) *Client { + client := &http.Client{ + Timeout: 30 * time.Second, + } + + if strings.TrimSpace(proxyURL) != "" { + if proxyURLParsed, err := url.Parse(proxyURL); err == nil { + client.Transport = &http.Transport{ + Proxy: http.ProxyURL(proxyURLParsed), + } + } + } + + return &Client{ + httpClient: client, + } +} + +// isConnectionError 判断是否为连接错误(网络超时、DNS 失败、连接拒绝) +func isConnectionError(err error) bool { + if err == nil { + return false + } + + // 检查超时错误 + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return true + } + + // 检查连接错误(DNS 失败、连接拒绝) + var opErr *net.OpError + if errors.As(err, &opErr) { + return true + } + + // 检查 URL 错误 + var urlErr *url.Error + return errors.As(err, &urlErr) +} + +// shouldFallbackToNextURL 判断是否应切换到下一个 URL +// 仅连接错误和 HTTP 429 触发 URL 降级 +func shouldFallbackToNextURL(err error, statusCode int) bool { + if isConnectionError(err) { + return true + } + return statusCode == http.StatusTooManyRequests +} + +// ExchangeCode 用 authorization code 交换 token +func (c *Client) ExchangeCode(ctx context.Context, code, codeVerifier string) (*TokenResponse, error) { + params := url.Values{} + params.Set("client_id", ClientID) + params.Set("client_secret", ClientSecret) + params.Set("code", code) + params.Set("redirect_uri", RedirectURI) + params.Set("grant_type", "authorization_code") + params.Set("code_verifier", codeVerifier) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, TokenURL, strings.NewReader(params.Encode())) + if err != nil { + return nil, fmt.Errorf("创建请求失败: %w", err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("token 交换请求失败: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("读取响应失败: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("token 交换失败 (HTTP %d): %s", resp.StatusCode, string(bodyBytes)) + } + + var tokenResp TokenResponse + if err := json.Unmarshal(bodyBytes, &tokenResp); err != nil { + return nil, fmt.Errorf("token 解析失败: %w", err) + } + + return &tokenResp, nil +} + +// RefreshToken 刷新 access_token +func (c *Client) RefreshToken(ctx context.Context, refreshToken string) (*TokenResponse, error) { + params := url.Values{} + params.Set("client_id", ClientID) + params.Set("client_secret", ClientSecret) + params.Set("refresh_token", refreshToken) + params.Set("grant_type", "refresh_token") + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, TokenURL, strings.NewReader(params.Encode())) + if err != nil { + return nil, fmt.Errorf("创建请求失败: %w", err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("token 刷新请求失败: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("读取响应失败: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("token 刷新失败 (HTTP %d): %s", resp.StatusCode, string(bodyBytes)) + } + + var tokenResp TokenResponse + if err := json.Unmarshal(bodyBytes, &tokenResp); err != nil { + return nil, fmt.Errorf("token 解析失败: %w", err) + } + + return &tokenResp, nil +} + +// GetUserInfo 获取用户信息 +func (c *Client) GetUserInfo(ctx context.Context, accessToken string) (*UserInfo, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, UserInfoURL, nil) + if err != nil { + return nil, fmt.Errorf("创建请求失败: %w", err) + } + req.Header.Set("Authorization", "Bearer "+accessToken) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("用户信息请求失败: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("读取响应失败: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("获取用户信息失败 (HTTP %d): %s", resp.StatusCode, string(bodyBytes)) + } + + var userInfo UserInfo + if err := json.Unmarshal(bodyBytes, &userInfo); err != nil { + return nil, fmt.Errorf("用户信息解析失败: %w", err) + } + + return &userInfo, nil +} + +// LoadCodeAssist 获取账户信息,返回解析后的结构体和原始 JSON +// 支持 URL fallback:sandbox → daily → prod +func (c *Client) LoadCodeAssist(ctx context.Context, accessToken string) (*LoadCodeAssistResponse, map[string]any, error) { + reqBody := LoadCodeAssistRequest{} + reqBody.Metadata.IDEType = "ANTIGRAVITY" + + bodyBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, nil, fmt.Errorf("序列化请求失败: %w", err) + } + + // 获取可用的 URL 列表 + availableURLs := DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = BaseURLs // 所有 URL 都不可用时,重试所有 + } + + var lastErr error + for urlIdx, baseURL := range availableURLs { + apiURL := baseURL + "/v1internal:loadCodeAssist" + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + if err != nil { + lastErr = fmt.Errorf("创建请求失败: %w", err) + continue + } + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", UserAgent) + + resp, err := c.httpClient.Do(req) + if err != nil { + lastErr = fmt.Errorf("loadCodeAssist 请求失败: %w", err) + if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity] loadCodeAssist URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) + continue + } + return nil, nil, lastErr + } + + respBodyBytes, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() // 立即关闭,避免循环内 defer 导致的资源泄漏 + if err != nil { + return nil, nil, fmt.Errorf("读取响应失败: %w", err) + } + + // 检查是否需要 URL 降级 + if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { + DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity] loadCodeAssist URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) + continue + } + + if resp.StatusCode != http.StatusOK { + return nil, nil, fmt.Errorf("loadCodeAssist 失败 (HTTP %d): %s", resp.StatusCode, string(respBodyBytes)) + } + + var loadResp LoadCodeAssistResponse + if err := json.Unmarshal(respBodyBytes, &loadResp); err != nil { + return nil, nil, fmt.Errorf("响应解析失败: %w", err) + } + + // 解析原始 JSON 为 map + var rawResp map[string]any + _ = json.Unmarshal(respBodyBytes, &rawResp) + + return &loadResp, rawResp, nil + } + + return nil, nil, lastErr +} + +// ModelQuotaInfo 模型配额信息 +type ModelQuotaInfo struct { + RemainingFraction float64 `json:"remainingFraction"` + ResetTime string `json:"resetTime,omitempty"` +} + +// ModelInfo 模型信息 +type ModelInfo struct { + QuotaInfo *ModelQuotaInfo `json:"quotaInfo,omitempty"` +} + +// FetchAvailableModelsRequest fetchAvailableModels 请求 +type FetchAvailableModelsRequest struct { + Project string `json:"project"` +} + +// FetchAvailableModelsResponse fetchAvailableModels 响应 +type FetchAvailableModelsResponse struct { + Models map[string]ModelInfo `json:"models"` +} + +// FetchAvailableModels 获取可用模型和配额信息,返回解析后的结构体和原始 JSON +// 支持 URL fallback:sandbox → daily → prod +func (c *Client) FetchAvailableModels(ctx context.Context, accessToken, projectID string) (*FetchAvailableModelsResponse, map[string]any, error) { + reqBody := FetchAvailableModelsRequest{Project: projectID} + bodyBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, nil, fmt.Errorf("序列化请求失败: %w", err) + } + + // 获取可用的 URL 列表 + availableURLs := DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = BaseURLs // 所有 URL 都不可用时,重试所有 + } + + var lastErr error + for urlIdx, baseURL := range availableURLs { + apiURL := baseURL + "/v1internal:fetchAvailableModels" + req, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, strings.NewReader(string(bodyBytes))) + if err != nil { + lastErr = fmt.Errorf("创建请求失败: %w", err) + continue + } + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", UserAgent) + + resp, err := c.httpClient.Do(req) + if err != nil { + lastErr = fmt.Errorf("fetchAvailableModels 请求失败: %w", err) + if shouldFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity] fetchAvailableModels URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) + continue + } + return nil, nil, lastErr + } + + respBodyBytes, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() // 立即关闭,避免循环内 defer 导致的资源泄漏 + if err != nil { + return nil, nil, fmt.Errorf("读取响应失败: %w", err) + } + + // 检查是否需要 URL 降级 + if shouldFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { + DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity] fetchAvailableModels URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) + continue + } + + if resp.StatusCode != http.StatusOK { + return nil, nil, fmt.Errorf("fetchAvailableModels 失败 (HTTP %d): %s", resp.StatusCode, string(respBodyBytes)) + } + + var modelsResp FetchAvailableModelsResponse + if err := json.Unmarshal(respBodyBytes, &modelsResp); err != nil { + return nil, nil, fmt.Errorf("响应解析失败: %w", err) + } + + // 解析原始 JSON 为 map + var rawResp map[string]any + _ = json.Unmarshal(respBodyBytes, &rawResp) + + return &modelsResp, rawResp, nil + } + + return nil, nil, lastErr +} diff --git a/backend/internal/pkg/antigravity/gemini_types.go b/backend/internal/pkg/antigravity/gemini_types.go new file mode 100644 index 00000000..f688332f --- /dev/null +++ b/backend/internal/pkg/antigravity/gemini_types.go @@ -0,0 +1,175 @@ +package antigravity + +// Gemini v1internal 请求/响应类型定义 + +// V1InternalRequest v1internal 请求包装 +type V1InternalRequest struct { + Project string `json:"project"` + RequestID string `json:"requestId"` + UserAgent string `json:"userAgent"` + RequestType string `json:"requestType,omitempty"` + Model string `json:"model"` + Request GeminiRequest `json:"request"` +} + +// GeminiRequest Gemini 请求内容 +type GeminiRequest struct { + Contents []GeminiContent `json:"contents"` + SystemInstruction *GeminiContent `json:"systemInstruction,omitempty"` + GenerationConfig *GeminiGenerationConfig `json:"generationConfig,omitempty"` + Tools []GeminiToolDeclaration `json:"tools,omitempty"` + ToolConfig *GeminiToolConfig `json:"toolConfig,omitempty"` + SafetySettings []GeminiSafetySetting `json:"safetySettings,omitempty"` + SessionID string `json:"sessionId,omitempty"` +} + +// GeminiContent Gemini 内容 +type GeminiContent struct { + Role string `json:"role"` // user, model + Parts []GeminiPart `json:"parts"` +} + +// GeminiPart Gemini 内容部分 +type GeminiPart struct { + Text string `json:"text,omitempty"` + Thought bool `json:"thought,omitempty"` + ThoughtSignature string `json:"thoughtSignature,omitempty"` + InlineData *GeminiInlineData `json:"inlineData,omitempty"` + FunctionCall *GeminiFunctionCall `json:"functionCall,omitempty"` + FunctionResponse *GeminiFunctionResponse `json:"functionResponse,omitempty"` +} + +// GeminiInlineData Gemini 内联数据(图片等) +type GeminiInlineData struct { + MimeType string `json:"mimeType"` + Data string `json:"data"` +} + +// GeminiFunctionCall Gemini 函数调用 +type GeminiFunctionCall struct { + Name string `json:"name"` + Args any `json:"args,omitempty"` + ID string `json:"id,omitempty"` +} + +// GeminiFunctionResponse Gemini 函数响应 +type GeminiFunctionResponse struct { + Name string `json:"name"` + Response map[string]any `json:"response"` + ID string `json:"id,omitempty"` +} + +// GeminiGenerationConfig Gemini 生成配置 +type GeminiGenerationConfig struct { + MaxOutputTokens int `json:"maxOutputTokens,omitempty"` + Temperature *float64 `json:"temperature,omitempty"` + TopP *float64 `json:"topP,omitempty"` + TopK *int `json:"topK,omitempty"` + ThinkingConfig *GeminiThinkingConfig `json:"thinkingConfig,omitempty"` + StopSequences []string `json:"stopSequences,omitempty"` + ImageConfig *GeminiImageConfig `json:"imageConfig,omitempty"` +} + +// GeminiImageConfig Gemini 图片生成配置(仅 gemini-3-pro-image 支持) +type GeminiImageConfig struct { + AspectRatio string `json:"aspectRatio,omitempty"` // "1:1", "16:9", "9:16", "4:3", "3:4" + ImageSize string `json:"imageSize,omitempty"` // "1K", "2K", "4K" +} + +// GeminiThinkingConfig Gemini thinking 配置 +type GeminiThinkingConfig struct { + IncludeThoughts bool `json:"includeThoughts"` + ThinkingBudget int `json:"thinkingBudget,omitempty"` +} + +// GeminiToolDeclaration Gemini 工具声明 +type GeminiToolDeclaration struct { + FunctionDeclarations []GeminiFunctionDecl `json:"functionDeclarations,omitempty"` + GoogleSearch *GeminiGoogleSearch `json:"googleSearch,omitempty"` +} + +// GeminiFunctionDecl Gemini 函数声明 +type GeminiFunctionDecl struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters map[string]any `json:"parameters,omitempty"` +} + +// GeminiGoogleSearch Gemini Google 搜索工具 +type GeminiGoogleSearch struct { + EnhancedContent *GeminiEnhancedContent `json:"enhancedContent,omitempty"` +} + +// GeminiEnhancedContent 增强内容配置 +type GeminiEnhancedContent struct { + ImageSearch *GeminiImageSearch `json:"imageSearch,omitempty"` +} + +// GeminiImageSearch 图片搜索配置 +type GeminiImageSearch struct { + MaxResultCount int `json:"maxResultCount,omitempty"` +} + +// GeminiToolConfig Gemini 工具配置 +type GeminiToolConfig struct { + FunctionCallingConfig *GeminiFunctionCallingConfig `json:"functionCallingConfig,omitempty"` +} + +// GeminiFunctionCallingConfig 函数调用配置 +type GeminiFunctionCallingConfig struct { + Mode string `json:"mode,omitempty"` // VALIDATED, AUTO, NONE +} + +// GeminiSafetySetting Gemini 安全设置 +type GeminiSafetySetting struct { + Category string `json:"category"` + Threshold string `json:"threshold"` +} + +// V1InternalResponse v1internal 响应包装 +type V1InternalResponse struct { + Response GeminiResponse `json:"response"` + ResponseID string `json:"responseId,omitempty"` + ModelVersion string `json:"modelVersion,omitempty"` +} + +// GeminiResponse Gemini 响应 +type GeminiResponse struct { + Candidates []GeminiCandidate `json:"candidates,omitempty"` + UsageMetadata *GeminiUsageMetadata `json:"usageMetadata,omitempty"` + ResponseID string `json:"responseId,omitempty"` + ModelVersion string `json:"modelVersion,omitempty"` +} + +// GeminiCandidate Gemini 候选响应 +type GeminiCandidate struct { + Content *GeminiContent `json:"content,omitempty"` + FinishReason string `json:"finishReason,omitempty"` + Index int `json:"index,omitempty"` +} + +// GeminiUsageMetadata Gemini 用量元数据 +type GeminiUsageMetadata struct { + PromptTokenCount int `json:"promptTokenCount,omitempty"` + CandidatesTokenCount int `json:"candidatesTokenCount,omitempty"` + CachedContentTokenCount int `json:"cachedContentTokenCount,omitempty"` + TotalTokenCount int `json:"totalTokenCount,omitempty"` +} + +// DefaultSafetySettings 默认安全设置(关闭所有过滤) +var DefaultSafetySettings = []GeminiSafetySetting{ + {Category: "HARM_CATEGORY_HARASSMENT", Threshold: "OFF"}, + {Category: "HARM_CATEGORY_HATE_SPEECH", Threshold: "OFF"}, + {Category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", Threshold: "OFF"}, + {Category: "HARM_CATEGORY_DANGEROUS_CONTENT", Threshold: "OFF"}, + {Category: "HARM_CATEGORY_CIVIC_INTEGRITY", Threshold: "OFF"}, +} + +// DefaultStopSequences 默认停止序列 +var DefaultStopSequences = []string{ + "<|user|>", + "<|endoftext|>", + "<|end_of_turn|>", + "[DONE]", + "\n\nHuman:", +} diff --git a/backend/internal/pkg/antigravity/oauth.go b/backend/internal/pkg/antigravity/oauth.go new file mode 100644 index 00000000..736c45df --- /dev/null +++ b/backend/internal/pkg/antigravity/oauth.go @@ -0,0 +1,263 @@ +package antigravity + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "net/url" + "strings" + "sync" + "time" +) + +const ( + // Google OAuth 端点 + AuthorizeURL = "https://accounts.google.com/o/oauth2/v2/auth" + TokenURL = "https://oauth2.googleapis.com/token" + UserInfoURL = "https://www.googleapis.com/oauth2/v2/userinfo" + + // Antigravity OAuth 客户端凭证 + ClientID = "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com" + ClientSecret = "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" + + // 固定的 redirect_uri(用户需手动复制 code) + RedirectURI = "http://localhost:8085/callback" + + // OAuth scopes + Scopes = "https://www.googleapis.com/auth/cloud-platform " + + "https://www.googleapis.com/auth/userinfo.email " + + "https://www.googleapis.com/auth/userinfo.profile " + + "https://www.googleapis.com/auth/cclog " + + "https://www.googleapis.com/auth/experimentsandconfigs" + + // User-Agent(模拟官方客户端) + UserAgent = "antigravity/1.104.0 darwin/arm64" + + // Session 过期时间 + SessionTTL = 30 * time.Minute + + // URL 可用性 TTL(不可用 URL 的恢复时间) + URLAvailabilityTTL = 5 * time.Minute +) + +// BaseURLs 定义 Antigravity API 端点,按优先级排序 +// fallback 顺序: sandbox → daily → prod +var BaseURLs = []string{ + "https://daily-cloudcode-pa.sandbox.googleapis.com", // sandbox + "https://daily-cloudcode-pa.googleapis.com", // daily + "https://cloudcode-pa.googleapis.com", // prod +} + +// BaseURL 默认 URL(保持向后兼容) +var BaseURL = BaseURLs[0] + +// URLAvailability 管理 URL 可用性状态(带 TTL 自动恢复) +type URLAvailability struct { + mu sync.RWMutex + unavailable map[string]time.Time // URL -> 恢复时间 + ttl time.Duration +} + +// DefaultURLAvailability 全局 URL 可用性管理器 +var DefaultURLAvailability = NewURLAvailability(URLAvailabilityTTL) + +// NewURLAvailability 创建 URL 可用性管理器 +func NewURLAvailability(ttl time.Duration) *URLAvailability { + return &URLAvailability{ + unavailable: make(map[string]time.Time), + ttl: ttl, + } +} + +// MarkUnavailable 标记 URL 临时不可用 +func (u *URLAvailability) MarkUnavailable(url string) { + u.mu.Lock() + defer u.mu.Unlock() + u.unavailable[url] = time.Now().Add(u.ttl) +} + +// IsAvailable 检查 URL 是否可用 +func (u *URLAvailability) IsAvailable(url string) bool { + u.mu.RLock() + defer u.mu.RUnlock() + expiry, exists := u.unavailable[url] + if !exists { + return true + } + return time.Now().After(expiry) +} + +// GetAvailableURLs 返回可用的 URL 列表(保持优先级顺序) +func (u *URLAvailability) GetAvailableURLs() []string { + u.mu.RLock() + defer u.mu.RUnlock() + + now := time.Now() + result := make([]string, 0, len(BaseURLs)) + for _, url := range BaseURLs { + expiry, exists := u.unavailable[url] + if !exists || now.After(expiry) { + result = append(result, url) + } + } + return result +} + +// OAuthSession 保存 OAuth 授权流程的临时状态 +type OAuthSession struct { + State string `json:"state"` + CodeVerifier string `json:"code_verifier"` + ProxyURL string `json:"proxy_url,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +// SessionStore OAuth session 存储 +type SessionStore struct { + mu sync.RWMutex + sessions map[string]*OAuthSession + stopCh chan struct{} +} + +func NewSessionStore() *SessionStore { + store := &SessionStore{ + sessions: make(map[string]*OAuthSession), + stopCh: make(chan struct{}), + } + go store.cleanup() + return store +} + +func (s *SessionStore) Set(sessionID string, session *OAuthSession) { + s.mu.Lock() + defer s.mu.Unlock() + s.sessions[sessionID] = session +} + +func (s *SessionStore) Get(sessionID string) (*OAuthSession, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + session, ok := s.sessions[sessionID] + if !ok { + return nil, false + } + if time.Since(session.CreatedAt) > SessionTTL { + return nil, false + } + return session, true +} + +func (s *SessionStore) Delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionID) +} + +func (s *SessionStore) Stop() { + select { + case <-s.stopCh: + return + default: + close(s.stopCh) + } +} + +func (s *SessionStore) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } + } + s.mu.Unlock() + } + } +} + +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} + +func GenerateState() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return base64URLEncode(bytes), nil +} + +func GenerateSessionID() (string, error) { + bytes, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +func GenerateCodeVerifier() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return base64URLEncode(bytes), nil +} + +func GenerateCodeChallenge(verifier string) string { + hash := sha256.Sum256([]byte(verifier)) + return base64URLEncode(hash[:]) +} + +func base64URLEncode(data []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=") +} + +// BuildAuthorizationURL 构建 Google OAuth 授权 URL +func BuildAuthorizationURL(state, codeChallenge string) string { + params := url.Values{} + params.Set("client_id", ClientID) + params.Set("redirect_uri", RedirectURI) + params.Set("response_type", "code") + params.Set("scope", Scopes) + params.Set("state", state) + params.Set("code_challenge", codeChallenge) + params.Set("code_challenge_method", "S256") + params.Set("access_type", "offline") + params.Set("prompt", "consent") + params.Set("include_granted_scopes", "true") + + return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) +} + +// GenerateMockProjectID 生成随机 project_id(当 API 不返回时使用) +// 格式:{形容词}-{名词}-{5位随机字符} +func GenerateMockProjectID() string { + adjectives := []string{"useful", "bright", "swift", "calm", "bold"} + nouns := []string{"fuze", "wave", "spark", "flow", "core"} + + randBytes, _ := GenerateRandomBytes(7) + + adj := adjectives[int(randBytes[0])%len(adjectives)] + noun := nouns[int(randBytes[1])%len(nouns)] + + // 生成 5 位随机字符(a-z0-9) + const charset = "abcdefghijklmnopqrstuvwxyz0123456789" + suffix := make([]byte, 5) + for i := 0; i < 5; i++ { + suffix[i] = charset[int(randBytes[i+2])%len(charset)] + } + + return fmt.Sprintf("%s-%s-%s", adj, noun, string(suffix)) +} diff --git a/backend/internal/pkg/antigravity/request_transformer.go b/backend/internal/pkg/antigravity/request_transformer.go new file mode 100644 index 00000000..a8474576 --- /dev/null +++ b/backend/internal/pkg/antigravity/request_transformer.go @@ -0,0 +1,773 @@ +package antigravity + +import ( + "crypto/sha256" + "encoding/binary" + "encoding/json" + "fmt" + "log" + "math/rand" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +var ( + sessionRand = rand.New(rand.NewSource(time.Now().UnixNano())) + sessionRandMutex sync.Mutex +) + +// generateStableSessionID 基于用户消息内容生成稳定的 session ID +func generateStableSessionID(contents []GeminiContent) string { + // 查找第一个 user 消息的文本 + for _, content := range contents { + if content.Role == "user" && len(content.Parts) > 0 { + if text := content.Parts[0].Text; text != "" { + h := sha256.Sum256([]byte(text)) + n := int64(binary.BigEndian.Uint64(h[:8])) & 0x7FFFFFFFFFFFFFFF + return "-" + strconv.FormatInt(n, 10) + } + } + } + // 回退:生成随机 session ID + sessionRandMutex.Lock() + n := sessionRand.Int63n(9_000_000_000_000_000_000) + sessionRandMutex.Unlock() + return "-" + strconv.FormatInt(n, 10) +} + +type TransformOptions struct { + EnableIdentityPatch bool + // IdentityPatch 可选:自定义注入到 systemInstruction 开头的身份防护提示词; + // 为空时使用默认模板(包含 [IDENTITY_PATCH] 及 SYSTEM_PROMPT_BEGIN 标记)。 + IdentityPatch string +} + +func DefaultTransformOptions() TransformOptions { + return TransformOptions{ + EnableIdentityPatch: true, + } +} + +// TransformClaudeToGemini 将 Claude 请求转换为 v1internal Gemini 格式 +func TransformClaudeToGemini(claudeReq *ClaudeRequest, projectID, mappedModel string) ([]byte, error) { + return TransformClaudeToGeminiWithOptions(claudeReq, projectID, mappedModel, DefaultTransformOptions()) +} + +// TransformClaudeToGeminiWithOptions 将 Claude 请求转换为 v1internal Gemini 格式(可配置身份补丁等行为) +func TransformClaudeToGeminiWithOptions(claudeReq *ClaudeRequest, projectID, mappedModel string, opts TransformOptions) ([]byte, error) { + // 用于存储 tool_use id -> name 映射 + toolIDToName := make(map[string]string) + + // 检测是否启用 thinking + isThinkingEnabled := claudeReq.Thinking != nil && claudeReq.Thinking.Type == "enabled" + + // 只有 Gemini 模型支持 dummy thought workaround + // Claude 模型通过 Vertex/Google API 需要有效的 thought signatures + allowDummyThought := strings.HasPrefix(mappedModel, "gemini-") + + // 1. 构建 contents + contents, strippedThinking, err := buildContents(claudeReq.Messages, toolIDToName, isThinkingEnabled, allowDummyThought) + if err != nil { + return nil, fmt.Errorf("build contents: %w", err) + } + + // 2. 构建 systemInstruction + systemInstruction := buildSystemInstruction(claudeReq.System, claudeReq.Model, opts) + + // 3. 构建 generationConfig + reqForConfig := claudeReq + if strippedThinking { + // If we had to downgrade thinking blocks to plain text due to missing/invalid signatures, + // disable upstream thinking mode to avoid signature/structure validation errors. + reqCopy := *claudeReq + reqCopy.Thinking = nil + reqForConfig = &reqCopy + } + generationConfig := buildGenerationConfig(reqForConfig) + + // 4. 构建 tools + tools := buildTools(claudeReq.Tools) + + // 5. 构建内部请求 + innerRequest := GeminiRequest{ + Contents: contents, + // 总是设置 toolConfig,与官方客户端一致 + ToolConfig: &GeminiToolConfig{ + FunctionCallingConfig: &GeminiFunctionCallingConfig{ + Mode: "VALIDATED", + }, + }, + // 总是生成 sessionId,基于用户消息内容 + SessionID: generateStableSessionID(contents), + } + + if systemInstruction != nil { + innerRequest.SystemInstruction = systemInstruction + } + if generationConfig != nil { + innerRequest.GenerationConfig = generationConfig + } + if len(tools) > 0 { + innerRequest.Tools = tools + } + + // 如果提供了 metadata.user_id,优先使用 + if claudeReq.Metadata != nil && claudeReq.Metadata.UserID != "" { + innerRequest.SessionID = claudeReq.Metadata.UserID + } + + // 6. 包装为 v1internal 请求 + v1Req := V1InternalRequest{ + Project: projectID, + RequestID: "agent-" + uuid.New().String(), + UserAgent: "antigravity", // 固定值,与官方客户端一致 + RequestType: "agent", + Model: mappedModel, + Request: innerRequest, + } + + return json.Marshal(v1Req) +} + +// antigravityIdentity Antigravity identity 提示词 +const antigravityIdentity = ` +You are Antigravity, a powerful agentic AI coding assistant designed by the Google Deepmind team working on Advanced Agentic Coding. +You are pair programming with a USER to solve their coding task. The task may require creating a new codebase, modifying or debugging an existing codebase, or simply answering a question. +The USER will send you requests, which you must always prioritize addressing. Along with each USER request, we will attach additional metadata about their current state, such as what files they have open and where their cursor is. +This information may or may not be relevant to the coding task, it is up for you to decide. + + +- **Proactiveness**. As an agent, you are allowed to be proactive, but only in the course of completing the user's task. For example, if the user asks you to add a new component, you can edit the code, verify build and test statuses, and take any other obvious follow-up actions, such as performing additional research. However, avoid surprising the user. For example, if the user asks HOW to approach something, you should answer their question and instead of jumping into editing a file.` + +func defaultIdentityPatch(_ string) string { + return antigravityIdentity +} + +// GetDefaultIdentityPatch 返回默认的 Antigravity 身份提示词 +func GetDefaultIdentityPatch() string { + return antigravityIdentity +} + +// buildSystemInstruction 构建 systemInstruction +func buildSystemInstruction(system json.RawMessage, modelName string, opts TransformOptions) *GeminiContent { + var parts []GeminiPart + + // 先解析用户的 system prompt,检测是否已包含 Antigravity identity + userHasAntigravityIdentity := false + var userSystemParts []GeminiPart + + if len(system) > 0 { + // 尝试解析为字符串 + var sysStr string + if err := json.Unmarshal(system, &sysStr); err == nil { + if strings.TrimSpace(sysStr) != "" { + userSystemParts = append(userSystemParts, GeminiPart{Text: sysStr}) + if strings.Contains(sysStr, "You are Antigravity") { + userHasAntigravityIdentity = true + } + } + } else { + // 尝试解析为数组 + var sysBlocks []SystemBlock + if err := json.Unmarshal(system, &sysBlocks); err == nil { + for _, block := range sysBlocks { + if block.Type == "text" && strings.TrimSpace(block.Text) != "" { + userSystemParts = append(userSystemParts, GeminiPart{Text: block.Text}) + if strings.Contains(block.Text, "You are Antigravity") { + userHasAntigravityIdentity = true + } + } + } + } + } + } + + // 仅在用户未提供 Antigravity identity 时注入 + if opts.EnableIdentityPatch && !userHasAntigravityIdentity { + identityPatch := strings.TrimSpace(opts.IdentityPatch) + if identityPatch == "" { + identityPatch = defaultIdentityPatch(modelName) + } + parts = append(parts, GeminiPart{Text: identityPatch}) + } + + // 添加用户的 system prompt + parts = append(parts, userSystemParts...) + + if len(parts) == 0 { + return nil + } + + return &GeminiContent{ + Role: "user", + Parts: parts, + } +} + +// buildContents 构建 contents +func buildContents(messages []ClaudeMessage, toolIDToName map[string]string, isThinkingEnabled, allowDummyThought bool) ([]GeminiContent, bool, error) { + var contents []GeminiContent + strippedThinking := false + + for i, msg := range messages { + role := msg.Role + if role == "assistant" { + role = "model" + } + + parts, strippedThisMsg, err := buildParts(msg.Content, toolIDToName, allowDummyThought) + if err != nil { + return nil, false, fmt.Errorf("build parts for message %d: %w", i, err) + } + if strippedThisMsg { + strippedThinking = true + } + + // 只有 Gemini 模型支持 dummy thinking block workaround + // 只对最后一条 assistant 消息添加(Pre-fill 场景) + // 历史 assistant 消息不能添加没有 signature 的 dummy thinking block + if allowDummyThought && role == "model" && isThinkingEnabled && i == len(messages)-1 { + hasThoughtPart := false + for _, p := range parts { + if p.Thought { + hasThoughtPart = true + break + } + } + if !hasThoughtPart && len(parts) > 0 { + // 在开头添加 dummy thinking block + parts = append([]GeminiPart{{ + Text: "Thinking...", + Thought: true, + ThoughtSignature: dummyThoughtSignature, + }}, parts...) + } + } + + if len(parts) == 0 { + continue + } + + contents = append(contents, GeminiContent{ + Role: role, + Parts: parts, + }) + } + + return contents, strippedThinking, nil +} + +// dummyThoughtSignature 用于跳过 Gemini 3 thought_signature 验证 +// 参考: https://ai.google.dev/gemini-api/docs/thought-signatures +const dummyThoughtSignature = "skip_thought_signature_validator" + +// buildParts 构建消息的 parts +// allowDummyThought: 只有 Gemini 模型支持 dummy thought signature +func buildParts(content json.RawMessage, toolIDToName map[string]string, allowDummyThought bool) ([]GeminiPart, bool, error) { + var parts []GeminiPart + strippedThinking := false + + // 尝试解析为字符串 + var textContent string + if err := json.Unmarshal(content, &textContent); err == nil { + if textContent != "(no content)" && strings.TrimSpace(textContent) != "" { + parts = append(parts, GeminiPart{Text: strings.TrimSpace(textContent)}) + } + return parts, false, nil + } + + // 解析为内容块数组 + var blocks []ContentBlock + if err := json.Unmarshal(content, &blocks); err != nil { + return nil, false, fmt.Errorf("parse content blocks: %w", err) + } + + for _, block := range blocks { + switch block.Type { + case "text": + if block.Text != "(no content)" && strings.TrimSpace(block.Text) != "" { + parts = append(parts, GeminiPart{Text: block.Text}) + } + + case "thinking": + part := GeminiPart{ + Text: block.Thinking, + Thought: true, + } + // 保留原有 signature(Claude 模型需要有效的 signature) + if block.Signature != "" { + part.ThoughtSignature = block.Signature + } else if !allowDummyThought { + // Claude 模型需要有效 signature;在缺失时降级为普通文本,并在上层禁用 thinking mode。 + if strings.TrimSpace(block.Thinking) != "" { + parts = append(parts, GeminiPart{Text: block.Thinking}) + } + strippedThinking = true + continue + } else { + // Gemini 模型使用 dummy signature + part.ThoughtSignature = dummyThoughtSignature + } + parts = append(parts, part) + + case "image": + if block.Source != nil && block.Source.Type == "base64" { + parts = append(parts, GeminiPart{ + InlineData: &GeminiInlineData{ + MimeType: block.Source.MediaType, + Data: block.Source.Data, + }, + }) + } + + case "tool_use": + // 存储 id -> name 映射 + if block.ID != "" && block.Name != "" { + toolIDToName[block.ID] = block.Name + } + + part := GeminiPart{ + FunctionCall: &GeminiFunctionCall{ + Name: block.Name, + Args: block.Input, + ID: block.ID, + }, + } + // tool_use 的 signature 处理: + // - Gemini 模型:使用 dummy signature(跳过 thought_signature 校验) + // - Claude 模型:透传上游返回的真实 signature(Vertex/Google 需要完整签名链路) + if allowDummyThought { + part.ThoughtSignature = dummyThoughtSignature + } else if block.Signature != "" && block.Signature != dummyThoughtSignature { + part.ThoughtSignature = block.Signature + } + parts = append(parts, part) + + case "tool_result": + // 获取函数名 + funcName := block.Name + if funcName == "" { + if name, ok := toolIDToName[block.ToolUseID]; ok { + funcName = name + } else { + funcName = block.ToolUseID + } + } + + // 解析 content + resultContent := parseToolResultContent(block.Content, block.IsError) + + parts = append(parts, GeminiPart{ + FunctionResponse: &GeminiFunctionResponse{ + Name: funcName, + Response: map[string]any{ + "result": resultContent, + }, + ID: block.ToolUseID, + }, + }) + } + } + + return parts, strippedThinking, nil +} + +// parseToolResultContent 解析 tool_result 的 content +func parseToolResultContent(content json.RawMessage, isError bool) string { + if len(content) == 0 { + if isError { + return "Tool execution failed with no output." + } + return "Command executed successfully." + } + + // 尝试解析为字符串 + var str string + if err := json.Unmarshal(content, &str); err == nil { + if strings.TrimSpace(str) == "" { + if isError { + return "Tool execution failed with no output." + } + return "Command executed successfully." + } + return str + } + + // 尝试解析为数组 + var arr []map[string]any + if err := json.Unmarshal(content, &arr); err == nil { + var texts []string + for _, item := range arr { + if text, ok := item["text"].(string); ok { + texts = append(texts, text) + } + } + result := strings.Join(texts, "\n") + if strings.TrimSpace(result) == "" { + if isError { + return "Tool execution failed with no output." + } + return "Command executed successfully." + } + return result + } + + // 返回原始 JSON + return string(content) +} + +// buildGenerationConfig 构建 generationConfig +func buildGenerationConfig(req *ClaudeRequest) *GeminiGenerationConfig { + config := &GeminiGenerationConfig{ + MaxOutputTokens: 64000, // 默认最大输出 + StopSequences: DefaultStopSequences, + } + + // Thinking 配置 + if req.Thinking != nil && req.Thinking.Type == "enabled" { + config.ThinkingConfig = &GeminiThinkingConfig{ + IncludeThoughts: true, + } + if req.Thinking.BudgetTokens > 0 { + budget := req.Thinking.BudgetTokens + // gemini-2.5-flash 上限 24576 + if strings.Contains(req.Model, "gemini-2.5-flash") && budget > 24576 { + budget = 24576 + } + config.ThinkingConfig.ThinkingBudget = budget + } + } + + // 其他参数 + if req.Temperature != nil { + config.Temperature = req.Temperature + } + if req.TopP != nil { + config.TopP = req.TopP + } + if req.TopK != nil { + config.TopK = req.TopK + } + + return config +} + +// buildTools 构建 tools +func buildTools(tools []ClaudeTool) []GeminiToolDeclaration { + if len(tools) == 0 { + return nil + } + + // 检查是否有 web_search 工具 + hasWebSearch := false + for _, tool := range tools { + if tool.Name == "web_search" { + hasWebSearch = true + break + } + } + + if hasWebSearch { + // Web Search 工具映射 + return []GeminiToolDeclaration{{ + GoogleSearch: &GeminiGoogleSearch{ + EnhancedContent: &GeminiEnhancedContent{ + ImageSearch: &GeminiImageSearch{ + MaxResultCount: 5, + }, + }, + }, + }} + } + + // 普通工具 + var funcDecls []GeminiFunctionDecl + for _, tool := range tools { + // 跳过无效工具名称 + if strings.TrimSpace(tool.Name) == "" { + log.Printf("Warning: skipping tool with empty name") + continue + } + + var description string + var inputSchema map[string]any + + // 检查是否为 custom 类型工具 (MCP) + if tool.Type == "custom" { + if tool.Custom == nil || tool.Custom.InputSchema == nil { + log.Printf("[Warning] Skipping invalid custom tool '%s': missing custom spec or input_schema", tool.Name) + continue + } + description = tool.Custom.Description + inputSchema = tool.Custom.InputSchema + + } else { + // 标准格式: 从顶层字段获取 + description = tool.Description + inputSchema = tool.InputSchema + } + + // 清理 JSON Schema + params := cleanJSONSchema(inputSchema) + // 为 nil schema 提供默认值 + if params == nil { + params = map[string]any{ + "type": "OBJECT", + "properties": map[string]any{}, + } + } + + funcDecls = append(funcDecls, GeminiFunctionDecl{ + Name: tool.Name, + Description: description, + Parameters: params, + }) + } + + if len(funcDecls) == 0 { + return nil + } + + return []GeminiToolDeclaration{{ + FunctionDeclarations: funcDecls, + }} +} + +// cleanJSONSchema 清理 JSON Schema,移除 Antigravity/Gemini 不支持的字段 +// 参考 proxycast 的实现,确保 schema 符合 JSON Schema draft 2020-12 +func cleanJSONSchema(schema map[string]any) map[string]any { + if schema == nil { + return nil + } + cleaned := cleanSchemaValue(schema, "$") + result, ok := cleaned.(map[string]any) + if !ok { + return nil + } + + // 确保有 type 字段(默认 OBJECT) + if _, hasType := result["type"]; !hasType { + result["type"] = "OBJECT" + } + + // 确保有 properties 字段(默认空对象) + if _, hasProps := result["properties"]; !hasProps { + result["properties"] = make(map[string]any) + } + + // 验证 required 中的字段都存在于 properties 中 + if required, ok := result["required"].([]any); ok { + if props, ok := result["properties"].(map[string]any); ok { + validRequired := make([]any, 0, len(required)) + for _, r := range required { + if reqName, ok := r.(string); ok { + if _, exists := props[reqName]; exists { + validRequired = append(validRequired, r) + } + } + } + if len(validRequired) > 0 { + result["required"] = validRequired + } else { + delete(result, "required") + } + } + } + + return result +} + +var schemaValidationKeys = map[string]bool{ + "minLength": true, + "maxLength": true, + "pattern": true, + "minimum": true, + "maximum": true, + "exclusiveMinimum": true, + "exclusiveMaximum": true, + "multipleOf": true, + "uniqueItems": true, + "minItems": true, + "maxItems": true, + "minProperties": true, + "maxProperties": true, + "patternProperties": true, + "propertyNames": true, + "dependencies": true, + "dependentSchemas": true, + "dependentRequired": true, +} + +var warnedSchemaKeys sync.Map + +func schemaCleaningWarningsEnabled() bool { + // 可通过环境变量强制开关,方便排查:SUB2API_SCHEMA_CLEAN_WARN=true/false + if v := strings.TrimSpace(os.Getenv("SUB2API_SCHEMA_CLEAN_WARN")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes", "on": + return true + case "0", "false", "no", "off": + return false + } + } + // 默认:非 release 模式下输出(debug/test) + return gin.Mode() != gin.ReleaseMode +} + +func warnSchemaKeyRemovedOnce(key, path string) { + if !schemaCleaningWarningsEnabled() { + return + } + if !schemaValidationKeys[key] { + return + } + if _, loaded := warnedSchemaKeys.LoadOrStore(key, struct{}{}); loaded { + return + } + log.Printf("[SchemaClean] removed unsupported JSON Schema validation field key=%q path=%q", key, path) +} + +// excludedSchemaKeys 不支持的 schema 字段 +// 基于 Claude API (Vertex AI) 的实际支持情况 +// 支持: type, description, enum, properties, required, additionalProperties, items +// 不支持: minItems, maxItems, minLength, maxLength, pattern, minimum, maximum 等验证字段 +var excludedSchemaKeys = map[string]bool{ + // 元 schema 字段 + "$schema": true, + "$id": true, + "$ref": true, + + // 字符串验证(Gemini 不支持) + "minLength": true, + "maxLength": true, + "pattern": true, + + // 数字验证(Claude API 通过 Vertex AI 不支持这些字段) + "minimum": true, + "maximum": true, + "exclusiveMinimum": true, + "exclusiveMaximum": true, + "multipleOf": true, + + // 数组验证(Claude API 通过 Vertex AI 不支持这些字段) + "uniqueItems": true, + "minItems": true, + "maxItems": true, + + // 组合 schema(Gemini 不支持) + "oneOf": true, + "anyOf": true, + "allOf": true, + "not": true, + "if": true, + "then": true, + "else": true, + "$defs": true, + "definitions": true, + + // 对象验证(仅保留 properties/required/additionalProperties) + "minProperties": true, + "maxProperties": true, + "patternProperties": true, + "propertyNames": true, + "dependencies": true, + "dependentSchemas": true, + "dependentRequired": true, + + // 其他不支持的字段 + "default": true, + "const": true, + "examples": true, + "deprecated": true, + "readOnly": true, + "writeOnly": true, + "contentMediaType": true, + "contentEncoding": true, + + // Claude 特有字段 + "strict": true, +} + +// cleanSchemaValue 递归清理 schema 值 +func cleanSchemaValue(value any, path string) any { + switch v := value.(type) { + case map[string]any: + result := make(map[string]any) + for k, val := range v { + // 跳过不支持的字段 + if excludedSchemaKeys[k] { + warnSchemaKeyRemovedOnce(k, path) + continue + } + + // 特殊处理 type 字段 + if k == "type" { + result[k] = cleanTypeValue(val) + continue + } + + // 特殊处理 format 字段:只保留 Gemini 支持的 format 值 + if k == "format" { + if formatStr, ok := val.(string); ok { + // Gemini 只支持 date-time, date, time + if formatStr == "date-time" || formatStr == "date" || formatStr == "time" { + result[k] = val + } + // 其他 format 值直接跳过 + } + continue + } + + // 特殊处理 additionalProperties:Claude API 只支持布尔值,不支持 schema 对象 + if k == "additionalProperties" { + if boolVal, ok := val.(bool); ok { + result[k] = boolVal + } else { + // 如果是 schema 对象,转换为 false(更安全的默认值) + result[k] = false + } + continue + } + + // 递归清理所有值 + result[k] = cleanSchemaValue(val, path+"."+k) + } + return result + + case []any: + // 递归处理数组中的每个元素 + cleaned := make([]any, 0, len(v)) + for i, item := range v { + cleaned = append(cleaned, cleanSchemaValue(item, fmt.Sprintf("%s[%d]", path, i))) + } + return cleaned + + default: + return value + } +} + +// cleanTypeValue 处理 type 字段,转换为大写 +func cleanTypeValue(value any) any { + switch v := value.(type) { + case string: + return strings.ToUpper(v) + case []any: + // 联合类型 ["string", "null"] -> 取第一个非 null 类型 + for _, t := range v { + if ts, ok := t.(string); ok && ts != "null" { + return strings.ToUpper(ts) + } + } + // 如果只有 null,返回 STRING + return "STRING" + default: + return value + } +} diff --git a/backend/internal/pkg/antigravity/request_transformer_test.go b/backend/internal/pkg/antigravity/request_transformer_test.go new file mode 100644 index 00000000..60ee6f63 --- /dev/null +++ b/backend/internal/pkg/antigravity/request_transformer_test.go @@ -0,0 +1,244 @@ +package antigravity + +import ( + "encoding/json" + "testing" +) + +// TestBuildParts_ThinkingBlockWithoutSignature 测试thinking block无signature时的处理 +func TestBuildParts_ThinkingBlockWithoutSignature(t *testing.T) { + tests := []struct { + name string + content string + allowDummyThought bool + expectedParts int + description string + }{ + { + name: "Claude model - downgrade thinking to text without signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 3, // thinking 内容降级为普通 text part + description: "Claude模型缺少signature时应将thinking降级为text,并在上层禁用thinking mode", + }, + { + name: "Claude model - preserve thinking block with signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": "sig_real_123"}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: false, + expectedParts: 3, + description: "Claude模型应透传带 signature 的 thinking block(用于 Vertex 签名链路)", + }, + { + name: "Gemini model - use dummy signature", + content: `[ + {"type": "text", "text": "Hello"}, + {"type": "thinking", "thinking": "Let me think...", "signature": ""}, + {"type": "text", "text": "World"} + ]`, + allowDummyThought: true, + expectedParts: 3, // 三个block都保留,thinking使用dummy signature + description: "Gemini模型应该为无signature的thinking block使用dummy signature", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + toolIDToName := make(map[string]string) + parts, _, err := buildParts(json.RawMessage(tt.content), toolIDToName, tt.allowDummyThought) + + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + + if len(parts) != tt.expectedParts { + t.Errorf("%s: got %d parts, want %d parts", tt.description, len(parts), tt.expectedParts) + } + + switch tt.name { + case "Claude model - preserve thinking block with signature": + if len(parts) != 3 { + t.Fatalf("expected 3 parts, got %d", len(parts)) + } + if !parts[1].Thought || parts[1].ThoughtSignature != "sig_real_123" { + t.Fatalf("expected thought part with signature sig_real_123, got thought=%v signature=%q", + parts[1].Thought, parts[1].ThoughtSignature) + } + case "Claude model - downgrade thinking to text without signature": + if len(parts) != 3 { + t.Fatalf("expected 3 parts, got %d", len(parts)) + } + if parts[1].Thought { + t.Fatalf("expected downgraded text part, got thought=%v signature=%q", + parts[1].Thought, parts[1].ThoughtSignature) + } + if parts[1].Text != "Let me think..." { + t.Fatalf("expected downgraded text %q, got %q", "Let me think...", parts[1].Text) + } + case "Gemini model - use dummy signature": + if len(parts) != 3 { + t.Fatalf("expected 3 parts, got %d", len(parts)) + } + if !parts[1].Thought || parts[1].ThoughtSignature != dummyThoughtSignature { + t.Fatalf("expected dummy thought signature, got thought=%v signature=%q", + parts[1].Thought, parts[1].ThoughtSignature) + } + } + }) + } +} + +func TestBuildParts_ToolUseSignatureHandling(t *testing.T) { + content := `[ + {"type": "tool_use", "id": "t1", "name": "Bash", "input": {"command": "ls"}, "signature": "sig_tool_abc"} + ]` + + t.Run("Gemini uses dummy tool_use signature", func(t *testing.T) { + toolIDToName := make(map[string]string) + parts, _, err := buildParts(json.RawMessage(content), toolIDToName, true) + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + if len(parts) != 1 || parts[0].FunctionCall == nil { + t.Fatalf("expected 1 functionCall part, got %+v", parts) + } + if parts[0].ThoughtSignature != dummyThoughtSignature { + t.Fatalf("expected dummy tool signature %q, got %q", dummyThoughtSignature, parts[0].ThoughtSignature) + } + }) + + t.Run("Claude model - preserve valid signature for tool_use", func(t *testing.T) { + toolIDToName := make(map[string]string) + parts, _, err := buildParts(json.RawMessage(content), toolIDToName, false) + if err != nil { + t.Fatalf("buildParts() error = %v", err) + } + if len(parts) != 1 || parts[0].FunctionCall == nil { + t.Fatalf("expected 1 functionCall part, got %+v", parts) + } + // Claude 模型应透传有效的 signature(Vertex/Google 需要完整签名链路) + if parts[0].ThoughtSignature != "sig_tool_abc" { + t.Fatalf("expected preserved tool signature %q, got %q", "sig_tool_abc", parts[0].ThoughtSignature) + } + }) +} + +// TestBuildTools_CustomTypeTools 测试custom类型工具转换 +func TestBuildTools_CustomTypeTools(t *testing.T) { + tests := []struct { + name string + tools []ClaudeTool + expectedLen int + description string + }{ + { + name: "Standard tool format", + tools: []ClaudeTool{ + { + Name: "get_weather", + Description: "Get weather information", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "location": map[string]any{"type": "string"}, + }, + }, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "mcp_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "MCP tool description", + InputSchema: map[string]any{ + "type": "object", + "properties": map[string]any{ + "param": map[string]any{"type": "string"}, + }, + }, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从Custom字段读取description和input_schema", + }, + { + name: "Mixed standard and custom tools", + tools: []ClaudeTool{ + { + Name: "standard_tool", + Description: "Standard tool", + InputSchema: map[string]any{"type": "object"}, + }, + { + Type: "custom", + Name: "custom_tool", + Custom: &ClaudeCustomToolSpec{ + Description: "Custom tool", + InputSchema: map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, // 返回一个GeminiToolDeclaration,包含2个function declarations + description: "混合标准和custom工具应该都能正确转换", + }, + { + name: "Invalid custom tool - nil Custom field", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + // Custom 为 nil + }, + }, + expectedLen: 0, // 应该被跳过 + description: "Custom字段为nil的custom工具应该被跳过", + }, + { + name: "Invalid custom tool - nil InputSchema", + tools: []ClaudeTool{ + { + Type: "custom", + Name: "invalid_custom", + Custom: &ClaudeCustomToolSpec{ + Description: "Invalid", + // InputSchema 为 nil + }, + }, + }, + expectedLen: 0, // 应该被跳过 + description: "InputSchema为nil的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := buildTools(tt.tools) + + if len(result) != tt.expectedLen { + t.Errorf("%s: got %d tool declarations, want %d", tt.description, len(result), tt.expectedLen) + } + + // 验证function declarations存在 + if len(result) > 0 && result[0].FunctionDeclarations != nil { + if len(result[0].FunctionDeclarations) != len(tt.tools) { + t.Errorf("%s: got %d function declarations, want %d", + tt.description, len(result[0].FunctionDeclarations), len(tt.tools)) + } + } + }) + } +} diff --git a/backend/internal/pkg/antigravity/response_transformer.go b/backend/internal/pkg/antigravity/response_transformer.go new file mode 100644 index 00000000..cd7f5f80 --- /dev/null +++ b/backend/internal/pkg/antigravity/response_transformer.go @@ -0,0 +1,273 @@ +package antigravity + +import ( + "encoding/json" + "fmt" +) + +// TransformGeminiToClaude 将 Gemini 响应转换为 Claude 格式(非流式) +func TransformGeminiToClaude(geminiResp []byte, originalModel string) ([]byte, *ClaudeUsage, error) { + // 解包 v1internal 响应 + var v1Resp V1InternalResponse + if err := json.Unmarshal(geminiResp, &v1Resp); err != nil { + // 尝试直接解析为 GeminiResponse + var directResp GeminiResponse + if err2 := json.Unmarshal(geminiResp, &directResp); err2 != nil { + return nil, nil, fmt.Errorf("parse gemini response: %w", err) + } + v1Resp.Response = directResp + v1Resp.ResponseID = directResp.ResponseID + v1Resp.ModelVersion = directResp.ModelVersion + } + + // 使用处理器转换 + processor := NewNonStreamingProcessor() + claudeResp := processor.Process(&v1Resp.Response, v1Resp.ResponseID, originalModel) + + // 序列化 + respBytes, err := json.Marshal(claudeResp) + if err != nil { + return nil, nil, fmt.Errorf("marshal claude response: %w", err) + } + + return respBytes, &claudeResp.Usage, nil +} + +// NonStreamingProcessor 非流式响应处理器 +type NonStreamingProcessor struct { + contentBlocks []ClaudeContentItem + textBuilder string + thinkingBuilder string + thinkingSignature string + trailingSignature string + hasToolCall bool +} + +// NewNonStreamingProcessor 创建非流式响应处理器 +func NewNonStreamingProcessor() *NonStreamingProcessor { + return &NonStreamingProcessor{ + contentBlocks: make([]ClaudeContentItem, 0), + } +} + +// Process 处理 Gemini 响应 +func (p *NonStreamingProcessor) Process(geminiResp *GeminiResponse, responseID, originalModel string) *ClaudeResponse { + // 获取 parts + var parts []GeminiPart + if len(geminiResp.Candidates) > 0 && geminiResp.Candidates[0].Content != nil { + parts = geminiResp.Candidates[0].Content.Parts + } + + // 处理所有 parts + for _, part := range parts { + p.processPart(&part) + } + + // 刷新剩余内容 + p.flushThinking() + p.flushText() + + // 处理 trailingSignature + if p.trailingSignature != "" { + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: "", + Signature: p.trailingSignature, + }) + } + + // 构建响应 + return p.buildResponse(geminiResp, responseID, originalModel) +} + +// processPart 处理单个 part +func (p *NonStreamingProcessor) processPart(part *GeminiPart) { + signature := part.ThoughtSignature + + // 1. FunctionCall 处理 + if part.FunctionCall != nil { + p.flushThinking() + p.flushText() + + // 处理 trailingSignature + if p.trailingSignature != "" { + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: "", + Signature: p.trailingSignature, + }) + p.trailingSignature = "" + } + + p.hasToolCall = true + + // 生成 tool_use id + toolID := part.FunctionCall.ID + if toolID == "" { + toolID = fmt.Sprintf("%s-%s", part.FunctionCall.Name, generateRandomID()) + } + + item := ClaudeContentItem{ + Type: "tool_use", + ID: toolID, + Name: part.FunctionCall.Name, + Input: part.FunctionCall.Args, + } + + if signature != "" { + item.Signature = signature + } + + p.contentBlocks = append(p.contentBlocks, item) + return + } + + // 2. Text 处理 + if part.Text != "" || part.Thought { + if part.Thought { + // Thinking part + p.flushText() + + // 处理 trailingSignature + if p.trailingSignature != "" { + p.flushThinking() + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: "", + Signature: p.trailingSignature, + }) + p.trailingSignature = "" + } + + p.thinkingBuilder += part.Text + if signature != "" { + p.thinkingSignature = signature + } + } else { + // 普通 Text + if part.Text == "" { + // 空 text 带签名 - 暂存 + if signature != "" { + p.trailingSignature = signature + } + return + } + + p.flushThinking() + + // 处理之前的 trailingSignature + if p.trailingSignature != "" { + p.flushText() + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: "", + Signature: p.trailingSignature, + }) + p.trailingSignature = "" + } + + p.textBuilder += part.Text + + // 非空 text 带签名 - 立即刷新并输出空 thinking 块 + if signature != "" { + p.flushText() + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: "", + Signature: signature, + }) + } + } + } + + // 3. InlineData (Image) 处理 + if part.InlineData != nil && part.InlineData.Data != "" { + p.flushThinking() + markdownImg := fmt.Sprintf("![image](data:%s;base64,%s)", + part.InlineData.MimeType, part.InlineData.Data) + p.textBuilder += markdownImg + p.flushText() + } +} + +// flushText 刷新 text builder +func (p *NonStreamingProcessor) flushText() { + if p.textBuilder == "" { + return + } + + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "text", + Text: p.textBuilder, + }) + p.textBuilder = "" +} + +// flushThinking 刷新 thinking builder +func (p *NonStreamingProcessor) flushThinking() { + if p.thinkingBuilder == "" && p.thinkingSignature == "" { + return + } + + p.contentBlocks = append(p.contentBlocks, ClaudeContentItem{ + Type: "thinking", + Thinking: p.thinkingBuilder, + Signature: p.thinkingSignature, + }) + p.thinkingBuilder = "" + p.thinkingSignature = "" +} + +// buildResponse 构建最终响应 +func (p *NonStreamingProcessor) buildResponse(geminiResp *GeminiResponse, responseID, originalModel string) *ClaudeResponse { + var finishReason string + if len(geminiResp.Candidates) > 0 { + finishReason = geminiResp.Candidates[0].FinishReason + } + + stopReason := "end_turn" + if p.hasToolCall { + stopReason = "tool_use" + } else if finishReason == "MAX_TOKENS" { + stopReason = "max_tokens" + } + + // 注意:Gemini 的 promptTokenCount 包含 cachedContentTokenCount, + // 但 Claude 的 input_tokens 不包含 cache_read_input_tokens,需要减去 + usage := ClaudeUsage{} + if geminiResp.UsageMetadata != nil { + cached := geminiResp.UsageMetadata.CachedContentTokenCount + usage.InputTokens = geminiResp.UsageMetadata.PromptTokenCount - cached + usage.OutputTokens = geminiResp.UsageMetadata.CandidatesTokenCount + usage.CacheReadInputTokens = cached + } + + // 生成响应 ID + respID := responseID + if respID == "" { + respID = geminiResp.ResponseID + } + if respID == "" { + respID = "msg_" + generateRandomID() + } + + return &ClaudeResponse{ + ID: respID, + Type: "message", + Role: "assistant", + Model: originalModel, + Content: p.contentBlocks, + StopReason: stopReason, + Usage: usage, + } +} + +// generateRandomID 生成随机 ID +func generateRandomID() string { + const chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + result := make([]byte, 12) + for i := range result { + result[i] = chars[i%len(chars)] + } + return string(result) +} diff --git a/backend/internal/pkg/antigravity/stream_transformer.go b/backend/internal/pkg/antigravity/stream_transformer.go new file mode 100644 index 00000000..9fe68a11 --- /dev/null +++ b/backend/internal/pkg/antigravity/stream_transformer.go @@ -0,0 +1,464 @@ +package antigravity + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// BlockType 内容块类型 +type BlockType int + +const ( + BlockTypeNone BlockType = iota + BlockTypeText + BlockTypeThinking + BlockTypeFunction +) + +// StreamingProcessor 流式响应处理器 +type StreamingProcessor struct { + blockType BlockType + blockIndex int + messageStartSent bool + messageStopSent bool + usedTool bool + pendingSignature string + trailingSignature string + originalModel string + + // 累计 usage + inputTokens int + outputTokens int + cacheReadTokens int +} + +// NewStreamingProcessor 创建流式响应处理器 +func NewStreamingProcessor(originalModel string) *StreamingProcessor { + return &StreamingProcessor{ + blockType: BlockTypeNone, + originalModel: originalModel, + } +} + +// ProcessLine 处理 SSE 行,返回 Claude SSE 事件 +func (p *StreamingProcessor) ProcessLine(line string) []byte { + line = strings.TrimSpace(line) + if line == "" || !strings.HasPrefix(line, "data:") { + return nil + } + + data := strings.TrimSpace(strings.TrimPrefix(line, "data:")) + if data == "" || data == "[DONE]" { + return nil + } + + // 解包 v1internal 响应 + var v1Resp V1InternalResponse + if err := json.Unmarshal([]byte(data), &v1Resp); err != nil { + // 尝试直接解析为 GeminiResponse + var directResp GeminiResponse + if err2 := json.Unmarshal([]byte(data), &directResp); err2 != nil { + return nil + } + v1Resp.Response = directResp + v1Resp.ResponseID = directResp.ResponseID + v1Resp.ModelVersion = directResp.ModelVersion + } + + geminiResp := &v1Resp.Response + + var result bytes.Buffer + + // 发送 message_start + if !p.messageStartSent { + _, _ = result.Write(p.emitMessageStart(&v1Resp)) + } + + // 更新 usage + // 注意:Gemini 的 promptTokenCount 包含 cachedContentTokenCount, + // 但 Claude 的 input_tokens 不包含 cache_read_input_tokens,需要减去 + if geminiResp.UsageMetadata != nil { + cached := geminiResp.UsageMetadata.CachedContentTokenCount + p.inputTokens = geminiResp.UsageMetadata.PromptTokenCount - cached + p.outputTokens = geminiResp.UsageMetadata.CandidatesTokenCount + p.cacheReadTokens = cached + } + + // 处理 parts + if len(geminiResp.Candidates) > 0 && geminiResp.Candidates[0].Content != nil { + for _, part := range geminiResp.Candidates[0].Content.Parts { + _, _ = result.Write(p.processPart(&part)) + } + } + + // 检查是否结束 + if len(geminiResp.Candidates) > 0 { + finishReason := geminiResp.Candidates[0].FinishReason + if finishReason != "" { + _, _ = result.Write(p.emitFinish(finishReason)) + } + } + + return result.Bytes() +} + +// Finish 结束处理,返回最终事件和用量 +func (p *StreamingProcessor) Finish() ([]byte, *ClaudeUsage) { + var result bytes.Buffer + + if !p.messageStopSent { + _, _ = result.Write(p.emitFinish("")) + } + + usage := &ClaudeUsage{ + InputTokens: p.inputTokens, + OutputTokens: p.outputTokens, + CacheReadInputTokens: p.cacheReadTokens, + } + + return result.Bytes(), usage +} + +// emitMessageStart 发送 message_start 事件 +func (p *StreamingProcessor) emitMessageStart(v1Resp *V1InternalResponse) []byte { + if p.messageStartSent { + return nil + } + + usage := ClaudeUsage{} + if v1Resp.Response.UsageMetadata != nil { + cached := v1Resp.Response.UsageMetadata.CachedContentTokenCount + usage.InputTokens = v1Resp.Response.UsageMetadata.PromptTokenCount - cached + usage.OutputTokens = v1Resp.Response.UsageMetadata.CandidatesTokenCount + usage.CacheReadInputTokens = cached + } + + responseID := v1Resp.ResponseID + if responseID == "" { + responseID = v1Resp.Response.ResponseID + } + if responseID == "" { + responseID = "msg_" + generateRandomID() + } + + message := map[string]any{ + "id": responseID, + "type": "message", + "role": "assistant", + "content": []any{}, + "model": p.originalModel, + "stop_reason": nil, + "stop_sequence": nil, + "usage": usage, + } + + event := map[string]any{ + "type": "message_start", + "message": message, + } + + p.messageStartSent = true + return p.formatSSE("message_start", event) +} + +// processPart 处理单个 part +func (p *StreamingProcessor) processPart(part *GeminiPart) []byte { + var result bytes.Buffer + signature := part.ThoughtSignature + + // 1. FunctionCall 处理 + if part.FunctionCall != nil { + // 先处理 trailingSignature + if p.trailingSignature != "" { + _, _ = result.Write(p.endBlock()) + _, _ = result.Write(p.emitEmptyThinkingWithSignature(p.trailingSignature)) + p.trailingSignature = "" + } + + _, _ = result.Write(p.processFunctionCall(part.FunctionCall, signature)) + return result.Bytes() + } + + // 2. Text 处理 + if part.Text != "" || part.Thought { + if part.Thought { + _, _ = result.Write(p.processThinking(part.Text, signature)) + } else { + _, _ = result.Write(p.processText(part.Text, signature)) + } + } + + // 3. InlineData (Image) 处理 + if part.InlineData != nil && part.InlineData.Data != "" { + markdownImg := fmt.Sprintf("![image](data:%s;base64,%s)", + part.InlineData.MimeType, part.InlineData.Data) + _, _ = result.Write(p.processText(markdownImg, "")) + } + + return result.Bytes() +} + +// processThinking 处理 thinking +func (p *StreamingProcessor) processThinking(text, signature string) []byte { + var result bytes.Buffer + + // 处理之前的 trailingSignature + if p.trailingSignature != "" { + _, _ = result.Write(p.endBlock()) + _, _ = result.Write(p.emitEmptyThinkingWithSignature(p.trailingSignature)) + p.trailingSignature = "" + } + + // 开始或继续 thinking 块 + if p.blockType != BlockTypeThinking { + _, _ = result.Write(p.startBlock(BlockTypeThinking, map[string]any{ + "type": "thinking", + "thinking": "", + })) + } + + if text != "" { + _, _ = result.Write(p.emitDelta("thinking_delta", map[string]any{ + "thinking": text, + })) + } + + // 暂存签名 + if signature != "" { + p.pendingSignature = signature + } + + return result.Bytes() +} + +// processText 处理普通 text +func (p *StreamingProcessor) processText(text, signature string) []byte { + var result bytes.Buffer + + // 空 text 带签名 - 暂存 + if text == "" { + if signature != "" { + p.trailingSignature = signature + } + return nil + } + + // 处理之前的 trailingSignature + if p.trailingSignature != "" { + _, _ = result.Write(p.endBlock()) + _, _ = result.Write(p.emitEmptyThinkingWithSignature(p.trailingSignature)) + p.trailingSignature = "" + } + + // 非空 text 带签名 - 特殊处理 + if signature != "" { + _, _ = result.Write(p.startBlock(BlockTypeText, map[string]any{ + "type": "text", + "text": "", + })) + _, _ = result.Write(p.emitDelta("text_delta", map[string]any{ + "text": text, + })) + _, _ = result.Write(p.endBlock()) + _, _ = result.Write(p.emitEmptyThinkingWithSignature(signature)) + return result.Bytes() + } + + // 普通 text (无签名) + if p.blockType != BlockTypeText { + _, _ = result.Write(p.startBlock(BlockTypeText, map[string]any{ + "type": "text", + "text": "", + })) + } + + _, _ = result.Write(p.emitDelta("text_delta", map[string]any{ + "text": text, + })) + + return result.Bytes() +} + +// processFunctionCall 处理 function call +func (p *StreamingProcessor) processFunctionCall(fc *GeminiFunctionCall, signature string) []byte { + var result bytes.Buffer + + p.usedTool = true + + toolID := fc.ID + if toolID == "" { + toolID = fmt.Sprintf("%s-%s", fc.Name, generateRandomID()) + } + + toolUse := map[string]any{ + "type": "tool_use", + "id": toolID, + "name": fc.Name, + "input": map[string]any{}, + } + + if signature != "" { + toolUse["signature"] = signature + } + + _, _ = result.Write(p.startBlock(BlockTypeFunction, toolUse)) + + // 发送 input_json_delta + if fc.Args != nil { + argsJSON, _ := json.Marshal(fc.Args) + _, _ = result.Write(p.emitDelta("input_json_delta", map[string]any{ + "partial_json": string(argsJSON), + })) + } + + _, _ = result.Write(p.endBlock()) + + return result.Bytes() +} + +// startBlock 开始新的内容块 +func (p *StreamingProcessor) startBlock(blockType BlockType, contentBlock map[string]any) []byte { + var result bytes.Buffer + + if p.blockType != BlockTypeNone { + _, _ = result.Write(p.endBlock()) + } + + event := map[string]any{ + "type": "content_block_start", + "index": p.blockIndex, + "content_block": contentBlock, + } + + _, _ = result.Write(p.formatSSE("content_block_start", event)) + p.blockType = blockType + + return result.Bytes() +} + +// endBlock 结束当前内容块 +func (p *StreamingProcessor) endBlock() []byte { + if p.blockType == BlockTypeNone { + return nil + } + + var result bytes.Buffer + + // Thinking 块结束时发送暂存的签名 + if p.blockType == BlockTypeThinking && p.pendingSignature != "" { + _, _ = result.Write(p.emitDelta("signature_delta", map[string]any{ + "signature": p.pendingSignature, + })) + p.pendingSignature = "" + } + + event := map[string]any{ + "type": "content_block_stop", + "index": p.blockIndex, + } + + _, _ = result.Write(p.formatSSE("content_block_stop", event)) + + p.blockIndex++ + p.blockType = BlockTypeNone + + return result.Bytes() +} + +// emitDelta 发送 delta 事件 +func (p *StreamingProcessor) emitDelta(deltaType string, deltaContent map[string]any) []byte { + delta := map[string]any{ + "type": deltaType, + } + for k, v := range deltaContent { + delta[k] = v + } + + event := map[string]any{ + "type": "content_block_delta", + "index": p.blockIndex, + "delta": delta, + } + + return p.formatSSE("content_block_delta", event) +} + +// emitEmptyThinkingWithSignature 发送空 thinking 块承载签名 +func (p *StreamingProcessor) emitEmptyThinkingWithSignature(signature string) []byte { + var result bytes.Buffer + + _, _ = result.Write(p.startBlock(BlockTypeThinking, map[string]any{ + "type": "thinking", + "thinking": "", + })) + _, _ = result.Write(p.emitDelta("thinking_delta", map[string]any{ + "thinking": "", + })) + _, _ = result.Write(p.emitDelta("signature_delta", map[string]any{ + "signature": signature, + })) + _, _ = result.Write(p.endBlock()) + + return result.Bytes() +} + +// emitFinish 发送结束事件 +func (p *StreamingProcessor) emitFinish(finishReason string) []byte { + var result bytes.Buffer + + // 关闭最后一个块 + _, _ = result.Write(p.endBlock()) + + // 处理 trailingSignature + if p.trailingSignature != "" { + _, _ = result.Write(p.emitEmptyThinkingWithSignature(p.trailingSignature)) + p.trailingSignature = "" + } + + // 确定 stop_reason + stopReason := "end_turn" + if p.usedTool { + stopReason = "tool_use" + } else if finishReason == "MAX_TOKENS" { + stopReason = "max_tokens" + } + + usage := ClaudeUsage{ + InputTokens: p.inputTokens, + OutputTokens: p.outputTokens, + CacheReadInputTokens: p.cacheReadTokens, + } + + deltaEvent := map[string]any{ + "type": "message_delta", + "delta": map[string]any{ + "stop_reason": stopReason, + "stop_sequence": nil, + }, + "usage": usage, + } + + _, _ = result.Write(p.formatSSE("message_delta", deltaEvent)) + + if !p.messageStopSent { + stopEvent := map[string]any{ + "type": "message_stop", + } + _, _ = result.Write(p.formatSSE("message_stop", stopEvent)) + p.messageStopSent = true + } + + return result.Bytes() +} + +// formatSSE 格式化 SSE 事件 +func (p *StreamingProcessor) formatSSE(eventType string, data any) []byte { + jsonData, err := json.Marshal(data) + if err != nil { + return nil + } + + return []byte(fmt.Sprintf("event: %s\ndata: %s\n\n", eventType, string(jsonData))) +} diff --git a/backend/internal/pkg/claude/constants.go b/backend/internal/pkg/claude/constants.go new file mode 100644 index 00000000..d1a56a84 --- /dev/null +++ b/backend/internal/pkg/claude/constants.go @@ -0,0 +1,81 @@ +// Package claude provides constants and helpers for Claude API integration. +package claude + +// Claude Code 客户端相关常量 + +// Beta header 常量 +const ( + BetaOAuth = "oauth-2025-04-20" + BetaClaudeCode = "claude-code-20250219" + BetaInterleavedThinking = "interleaved-thinking-2025-05-14" + BetaFineGrainedToolStreaming = "fine-grained-tool-streaming-2025-05-14" +) + +// DefaultBetaHeader Claude Code 客户端默认的 anthropic-beta header +const DefaultBetaHeader = BetaClaudeCode + "," + BetaOAuth + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming + +// HaikuBetaHeader Haiku 模型使用的 anthropic-beta header(不需要 claude-code beta) +const HaikuBetaHeader = BetaOAuth + "," + BetaInterleavedThinking + +// APIKeyBetaHeader API-key 账号建议使用的 anthropic-beta header(不包含 oauth) +const APIKeyBetaHeader = BetaClaudeCode + "," + BetaInterleavedThinking + "," + BetaFineGrainedToolStreaming + +// APIKeyHaikuBetaHeader Haiku 模型在 API-key 账号下使用的 anthropic-beta header(不包含 oauth / claude-code) +const APIKeyHaikuBetaHeader = BetaInterleavedThinking + +// DefaultHeaders 是 Claude Code 客户端默认请求头。 +var DefaultHeaders = map[string]string{ + "User-Agent": "claude-cli/2.0.62 (external, cli)", + "X-Stainless-Lang": "js", + "X-Stainless-Package-Version": "0.52.0", + "X-Stainless-OS": "Linux", + "X-Stainless-Arch": "x64", + "X-Stainless-Runtime": "node", + "X-Stainless-Runtime-Version": "v22.14.0", + "X-Stainless-Retry-Count": "0", + "X-Stainless-Timeout": "60", + "X-App": "cli", + "Anthropic-Dangerous-Direct-Browser-Access": "true", +} + +// Model 表示一个 Claude 模型 +type Model struct { + ID string `json:"id"` + Type string `json:"type"` + DisplayName string `json:"display_name"` + CreatedAt string `json:"created_at"` +} + +// DefaultModels Claude Code 客户端支持的默认模型列表 +var DefaultModels = []Model{ + { + ID: "claude-opus-4-5-20251101", + Type: "model", + DisplayName: "Claude Opus 4.5", + CreatedAt: "2025-11-01T00:00:00Z", + }, + { + ID: "claude-sonnet-4-5-20250929", + Type: "model", + DisplayName: "Claude Sonnet 4.5", + CreatedAt: "2025-09-29T00:00:00Z", + }, + { + ID: "claude-haiku-4-5-20251001", + Type: "model", + DisplayName: "Claude Haiku 4.5", + CreatedAt: "2025-10-01T00:00:00Z", + }, +} + +// DefaultModelIDs 返回默认模型的 ID 列表 +func DefaultModelIDs() []string { + ids := make([]string, len(DefaultModels)) + for i, m := range DefaultModels { + ids[i] = m.ID + } + return ids +} + +// DefaultTestModel 测试时使用的默认模型 +const DefaultTestModel = "claude-sonnet-4-5-20250929" diff --git a/backend/internal/pkg/ctxkey/ctxkey.go b/backend/internal/pkg/ctxkey/ctxkey.go new file mode 100644 index 00000000..27bb5ac5 --- /dev/null +++ b/backend/internal/pkg/ctxkey/ctxkey.go @@ -0,0 +1,21 @@ +// Package ctxkey 定义用于 context.Value 的类型安全 key +package ctxkey + +// Key 定义 context key 的类型,避免使用内置 string 类型(staticcheck SA1029) +type Key string + +const ( + // ForcePlatform 强制平台(用于 /antigravity 路由),由 middleware.ForcePlatform 设置 + ForcePlatform Key = "ctx_force_platform" + + // ClientRequestID 客户端请求的唯一标识,用于追踪请求全生命周期(用于 Ops 监控与排障)。 + ClientRequestID Key = "ctx_client_request_id" + + // RetryCount 表示当前请求在网关层的重试次数(用于 Ops 记录与排障)。 + RetryCount Key = "ctx_retry_count" + + // IsClaudeCodeClient 标识当前请求是否来自 Claude Code 客户端 + IsClaudeCodeClient Key = "ctx_is_claude_code_client" + // Group 认证后的分组信息,由 API Key 认证中间件设置 + Group Key = "ctx_group" +) diff --git a/backend/internal/pkg/errors/errors.go b/backend/internal/pkg/errors/errors.go new file mode 100644 index 00000000..89977f99 --- /dev/null +++ b/backend/internal/pkg/errors/errors.go @@ -0,0 +1,158 @@ +package errors + +import ( + "errors" + "fmt" + "net/http" +) + +const ( + UnknownCode = http.StatusInternalServerError + UnknownReason = "" + UnknownMessage = "internal error" +) + +type Status struct { + Code int32 `json:"code"` + Reason string `json:"reason,omitempty"` + Message string `json:"message"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ApplicationError is the standard error type used to control HTTP responses. +// +// Code is expected to be an HTTP status code (e.g. 400/401/403/404/409/500). +type ApplicationError struct { + Status + cause error +} + +// Error is kept for backwards compatibility within this package. +type Error = ApplicationError + +func (e *ApplicationError) Error() string { + if e == nil { + return "" + } + if e.cause == nil { + return fmt.Sprintf("error: code=%d reason=%q message=%q metadata=%v", e.Code, e.Reason, e.Message, e.Metadata) + } + return fmt.Sprintf("error: code=%d reason=%q message=%q metadata=%v cause=%v", e.Code, e.Reason, e.Message, e.Metadata, e.cause) +} + +// Unwrap provides compatibility for Go 1.13 error chains. +func (e *ApplicationError) Unwrap() error { return e.cause } + +// Is matches each error in the chain with the target value. +func (e *ApplicationError) Is(err error) bool { + if se := new(ApplicationError); errors.As(err, &se) { + return se.Code == e.Code && se.Reason == e.Reason + } + return false +} + +// WithCause attaches the underlying cause of the error. +func (e *ApplicationError) WithCause(cause error) *ApplicationError { + err := Clone(e) + err.cause = cause + return err +} + +// WithMetadata deep-copies the given metadata map. +func (e *ApplicationError) WithMetadata(md map[string]string) *ApplicationError { + err := Clone(e) + if md == nil { + err.Metadata = nil + return err + } + err.Metadata = make(map[string]string, len(md)) + for k, v := range md { + err.Metadata[k] = v + } + return err +} + +// New returns an error object for the code, message. +func New(code int, reason, message string) *ApplicationError { + return &ApplicationError{ + Status: Status{ + Code: int32(code), + Message: message, + Reason: reason, + }, + } +} + +// Newf New(code fmt.Sprintf(format, a...)) +func Newf(code int, reason, format string, a ...any) *ApplicationError { + return New(code, reason, fmt.Sprintf(format, a...)) +} + +// Errorf returns an error object for the code, message and error info. +func Errorf(code int, reason, format string, a ...any) error { + return New(code, reason, fmt.Sprintf(format, a...)) +} + +// Code returns the http code for an error. +// It supports wrapped errors. +func Code(err error) int { + if err == nil { + return http.StatusOK + } + return int(FromError(err).Code) +} + +// Reason returns the reason for a particular error. +// It supports wrapped errors. +func Reason(err error) string { + if err == nil { + return UnknownReason + } + return FromError(err).Reason +} + +// Message returns the message for a particular error. +// It supports wrapped errors. +func Message(err error) string { + if err == nil { + return "" + } + return FromError(err).Message +} + +// Clone deep clone error to a new error. +func Clone(err *ApplicationError) *ApplicationError { + if err == nil { + return nil + } + var metadata map[string]string + if err.Metadata != nil { + metadata = make(map[string]string, len(err.Metadata)) + for k, v := range err.Metadata { + metadata[k] = v + } + } + return &ApplicationError{ + cause: err.cause, + Status: Status{ + Code: err.Code, + Reason: err.Reason, + Message: err.Message, + Metadata: metadata, + }, + } +} + +// FromError tries to convert an error to *ApplicationError. +// It supports wrapped errors. +func FromError(err error) *ApplicationError { + if err == nil { + return nil + } + if se := new(ApplicationError); errors.As(err, &se) { + return se + } + + // Fall back to a generic internal error. + return New(UnknownCode, UnknownReason, UnknownMessage).WithCause(err) +} diff --git a/backend/internal/pkg/errors/errors_test.go b/backend/internal/pkg/errors/errors_test.go new file mode 100644 index 00000000..1a1c842e --- /dev/null +++ b/backend/internal/pkg/errors/errors_test.go @@ -0,0 +1,168 @@ +//go:build unit + +package errors + +import ( + stderrors "errors" + "fmt" + "io" + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestApplicationError_Basics(t *testing.T) { + tests := []struct { + name string + err *ApplicationError + want Status + wantIs bool + target error + wrapped error + }{ + { + name: "new", + err: New(400, "BAD_REQUEST", "invalid input"), + want: Status{ + Code: 400, + Reason: "BAD_REQUEST", + Message: "invalid input", + }, + }, + { + name: "is_matches_code_and_reason", + err: New(401, "UNAUTHORIZED", "nope"), + want: Status{Code: 401, Reason: "UNAUTHORIZED", Message: "nope"}, + target: New(401, "UNAUTHORIZED", "ignored message"), + wantIs: true, + }, + { + name: "is_does_not_match_reason", + err: New(401, "UNAUTHORIZED", "nope"), + want: Status{Code: 401, Reason: "UNAUTHORIZED", Message: "nope"}, + target: New(401, "DIFFERENT", "ignored message"), + wantIs: false, + }, + { + name: "from_error_unwraps_wrapped_application_error", + err: New(404, "NOT_FOUND", "missing"), + wrapped: fmt.Errorf("wrap: %w", New(404, "NOT_FOUND", "missing")), + want: Status{ + Code: 404, + Reason: "NOT_FOUND", + Message: "missing", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.err != nil { + require.Equal(t, tt.want, tt.err.Status) + } + + if tt.target != nil { + require.Equal(t, tt.wantIs, stderrors.Is(tt.err, tt.target)) + } + + if tt.wrapped != nil { + got := FromError(tt.wrapped) + require.Equal(t, tt.want, got.Status) + } + }) + } +} + +func TestApplicationError_WithMetadataDeepCopy(t *testing.T) { + tests := []struct { + name string + md map[string]string + }{ + {name: "non_nil", md: map[string]string{"a": "1"}}, + {name: "nil", md: nil}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + appErr := BadRequest("BAD_REQUEST", "invalid input").WithMetadata(tt.md) + + if tt.md == nil { + require.Nil(t, appErr.Metadata) + return + } + + tt.md["a"] = "changed" + require.Equal(t, "1", appErr.Metadata["a"]) + }) + } +} + +func TestFromError_Generic(t *testing.T) { + tests := []struct { + name string + err error + wantCode int32 + wantReason string + wantMsg string + }{ + { + name: "plain_error", + err: stderrors.New("boom"), + wantCode: UnknownCode, + wantReason: UnknownReason, + wantMsg: UnknownMessage, + }, + { + name: "wrapped_plain_error", + err: fmt.Errorf("wrap: %w", io.EOF), + wantCode: UnknownCode, + wantReason: UnknownReason, + wantMsg: UnknownMessage, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := FromError(tt.err) + require.Equal(t, tt.wantCode, got.Code) + require.Equal(t, tt.wantReason, got.Reason) + require.Equal(t, tt.wantMsg, got.Message) + require.Equal(t, tt.err, got.Unwrap()) + }) + } +} + +func TestToHTTP(t *testing.T) { + tests := []struct { + name string + err error + wantStatusCode int + wantBody Status + }{ + { + name: "nil_error", + err: nil, + wantStatusCode: http.StatusOK, + wantBody: Status{Code: int32(http.StatusOK)}, + }, + { + name: "application_error", + err: Forbidden("FORBIDDEN", "no access"), + wantStatusCode: http.StatusForbidden, + wantBody: Status{ + Code: int32(http.StatusForbidden), + Reason: "FORBIDDEN", + Message: "no access", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + code, body := ToHTTP(tt.err) + require.Equal(t, tt.wantStatusCode, code) + require.Equal(t, tt.wantBody, body) + }) + } +} diff --git a/backend/internal/pkg/errors/http.go b/backend/internal/pkg/errors/http.go new file mode 100644 index 00000000..7b5560e3 --- /dev/null +++ b/backend/internal/pkg/errors/http.go @@ -0,0 +1,21 @@ +package errors + +import "net/http" + +// ToHTTP converts an error into an HTTP status code and a JSON-serializable body. +// +// The returned body matches the project's Status shape: +// { code, reason, message, metadata }. +func ToHTTP(err error) (statusCode int, body Status) { + if err == nil { + return http.StatusOK, Status{Code: int32(http.StatusOK)} + } + + appErr := FromError(err) + if appErr == nil { + return http.StatusOK, Status{Code: int32(http.StatusOK)} + } + + cloned := Clone(appErr) + return int(cloned.Code), cloned.Status +} diff --git a/backend/internal/pkg/errors/types.go b/backend/internal/pkg/errors/types.go new file mode 100644 index 00000000..21dfbeb8 --- /dev/null +++ b/backend/internal/pkg/errors/types.go @@ -0,0 +1,115 @@ +// Package errors provides application error types and helpers. +// nolint:mnd +package errors + +import "net/http" + +// BadRequest new BadRequest error that is mapped to a 400 response. +func BadRequest(reason, message string) *ApplicationError { + return New(http.StatusBadRequest, reason, message) +} + +// IsBadRequest determines if err is an error which indicates a BadRequest error. +// It supports wrapped errors. +func IsBadRequest(err error) bool { + return Code(err) == http.StatusBadRequest +} + +// TooManyRequests new TooManyRequests error that is mapped to a 429 response. +func TooManyRequests(reason, message string) *ApplicationError { + return New(http.StatusTooManyRequests, reason, message) +} + +// IsTooManyRequests determines if err is an error which indicates a TooManyRequests error. +// It supports wrapped errors. +func IsTooManyRequests(err error) bool { + return Code(err) == http.StatusTooManyRequests +} + +// Unauthorized new Unauthorized error that is mapped to a 401 response. +func Unauthorized(reason, message string) *ApplicationError { + return New(http.StatusUnauthorized, reason, message) +} + +// IsUnauthorized determines if err is an error which indicates an Unauthorized error. +// It supports wrapped errors. +func IsUnauthorized(err error) bool { + return Code(err) == http.StatusUnauthorized +} + +// Forbidden new Forbidden error that is mapped to a 403 response. +func Forbidden(reason, message string) *ApplicationError { + return New(http.StatusForbidden, reason, message) +} + +// IsForbidden determines if err is an error which indicates a Forbidden error. +// It supports wrapped errors. +func IsForbidden(err error) bool { + return Code(err) == http.StatusForbidden +} + +// NotFound new NotFound error that is mapped to a 404 response. +func NotFound(reason, message string) *ApplicationError { + return New(http.StatusNotFound, reason, message) +} + +// IsNotFound determines if err is an error which indicates an NotFound error. +// It supports wrapped errors. +func IsNotFound(err error) bool { + return Code(err) == http.StatusNotFound +} + +// Conflict new Conflict error that is mapped to a 409 response. +func Conflict(reason, message string) *ApplicationError { + return New(http.StatusConflict, reason, message) +} + +// IsConflict determines if err is an error which indicates a Conflict error. +// It supports wrapped errors. +func IsConflict(err error) bool { + return Code(err) == http.StatusConflict +} + +// InternalServer new InternalServer error that is mapped to a 500 response. +func InternalServer(reason, message string) *ApplicationError { + return New(http.StatusInternalServerError, reason, message) +} + +// IsInternalServer determines if err is an error which indicates an Internal error. +// It supports wrapped errors. +func IsInternalServer(err error) bool { + return Code(err) == http.StatusInternalServerError +} + +// ServiceUnavailable new ServiceUnavailable error that is mapped to an HTTP 503 response. +func ServiceUnavailable(reason, message string) *ApplicationError { + return New(http.StatusServiceUnavailable, reason, message) +} + +// IsServiceUnavailable determines if err is an error which indicates an Unavailable error. +// It supports wrapped errors. +func IsServiceUnavailable(err error) bool { + return Code(err) == http.StatusServiceUnavailable +} + +// GatewayTimeout new GatewayTimeout error that is mapped to an HTTP 504 response. +func GatewayTimeout(reason, message string) *ApplicationError { + return New(http.StatusGatewayTimeout, reason, message) +} + +// IsGatewayTimeout determines if err is an error which indicates a GatewayTimeout error. +// It supports wrapped errors. +func IsGatewayTimeout(err error) bool { + return Code(err) == http.StatusGatewayTimeout +} + +// ClientClosed new ClientClosed error that is mapped to an HTTP 499 response. +func ClientClosed(reason, message string) *ApplicationError { + return New(499, reason, message) +} + +// IsClientClosed determines if err is an error which indicates a IsClientClosed error. +// It supports wrapped errors. +func IsClientClosed(err error) bool { + return Code(err) == 499 +} diff --git a/backend/internal/pkg/gemini/models.go b/backend/internal/pkg/gemini/models.go new file mode 100644 index 00000000..e251c8d8 --- /dev/null +++ b/backend/internal/pkg/gemini/models.go @@ -0,0 +1,43 @@ +// Package gemini provides minimal fallback model metadata for Gemini native endpoints. +// It is used when upstream model listing is unavailable (e.g. OAuth token missing AI Studio scopes). +package gemini + +type Model struct { + Name string `json:"name"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + SupportedGenerationMethods []string `json:"supportedGenerationMethods,omitempty"` +} + +type ModelsListResponse struct { + Models []Model `json:"models"` +} + +func DefaultModels() []Model { + methods := []string{"generateContent", "streamGenerateContent"} + return []Model{ + {Name: "models/gemini-3-pro-preview", SupportedGenerationMethods: methods}, + {Name: "models/gemini-3-flash-preview", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-pro", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.5-flash", SupportedGenerationMethods: methods}, + {Name: "models/gemini-2.0-flash", SupportedGenerationMethods: methods}, + {Name: "models/gemini-1.5-pro", SupportedGenerationMethods: methods}, + {Name: "models/gemini-1.5-flash", SupportedGenerationMethods: methods}, + {Name: "models/gemini-1.5-flash-8b", SupportedGenerationMethods: methods}, + } +} + +func FallbackModelsList() ModelsListResponse { + return ModelsListResponse{Models: DefaultModels()} +} + +func FallbackModel(model string) Model { + methods := []string{"generateContent", "streamGenerateContent"} + if model == "" { + return Model{Name: "models/unknown", SupportedGenerationMethods: methods} + } + if len(model) >= 7 && model[:7] == "models/" { + return Model{Name: model, SupportedGenerationMethods: methods} + } + return Model{Name: "models/" + model, SupportedGenerationMethods: methods} +} diff --git a/backend/internal/pkg/geminicli/codeassist_types.go b/backend/internal/pkg/geminicli/codeassist_types.go new file mode 100644 index 00000000..dbc11b9e --- /dev/null +++ b/backend/internal/pkg/geminicli/codeassist_types.go @@ -0,0 +1,82 @@ +package geminicli + +import ( + "bytes" + "encoding/json" +) + +// LoadCodeAssistRequest matches done-hub's internal Code Assist call. +type LoadCodeAssistRequest struct { + Metadata LoadCodeAssistMetadata `json:"metadata"` +} + +type LoadCodeAssistMetadata struct { + IDEType string `json:"ideType"` + Platform string `json:"platform"` + PluginType string `json:"pluginType"` +} + +type TierInfo struct { + ID string `json:"id"` +} + +// UnmarshalJSON supports both legacy string tiers and object tiers. +func (t *TierInfo) UnmarshalJSON(data []byte) error { + data = bytes.TrimSpace(data) + if len(data) == 0 || string(data) == "null" { + return nil + } + if data[0] == '"' { + var id string + if err := json.Unmarshal(data, &id); err != nil { + return err + } + t.ID = id + return nil + } + type alias TierInfo + var decoded alias + if err := json.Unmarshal(data, &decoded); err != nil { + return err + } + *t = TierInfo(decoded) + return nil +} + +type LoadCodeAssistResponse struct { + CurrentTier *TierInfo `json:"currentTier,omitempty"` + PaidTier *TierInfo `json:"paidTier,omitempty"` + CloudAICompanionProject string `json:"cloudaicompanionProject,omitempty"` + AllowedTiers []AllowedTier `json:"allowedTiers,omitempty"` +} + +// GetTier extracts tier ID, prioritizing paidTier over currentTier +func (r *LoadCodeAssistResponse) GetTier() string { + if r.PaidTier != nil && r.PaidTier.ID != "" { + return r.PaidTier.ID + } + if r.CurrentTier != nil { + return r.CurrentTier.ID + } + return "" +} + +type AllowedTier struct { + ID string `json:"id"` + IsDefault bool `json:"isDefault,omitempty"` +} + +type OnboardUserRequest struct { + TierID string `json:"tierId"` + Metadata LoadCodeAssistMetadata `json:"metadata"` +} + +type OnboardUserResponse struct { + Done bool `json:"done"` + Response *OnboardUserResultData `json:"response,omitempty"` + Name string `json:"name,omitempty"` +} + +type OnboardUserResultData struct { + CloudAICompanionProject any `json:"cloudaicompanionProject,omitempty"` +} diff --git a/backend/internal/pkg/geminicli/constants.go b/backend/internal/pkg/geminicli/constants.go new file mode 100644 index 00000000..d4d52116 --- /dev/null +++ b/backend/internal/pkg/geminicli/constants.go @@ -0,0 +1,48 @@ +// Package geminicli provides helpers for interacting with Gemini CLI tools. +package geminicli + +import "time" + +const ( + AIStudioBaseURL = "https://generativelanguage.googleapis.com" + GeminiCliBaseURL = "https://cloudcode-pa.googleapis.com" + + AuthorizeURL = "https://accounts.google.com/o/oauth2/v2/auth" + TokenURL = "https://oauth2.googleapis.com/token" + + // AIStudioOAuthRedirectURI is the default redirect URI used for AI Studio OAuth. + // This matches the "copy/paste callback URL" flow used by OpenAI OAuth in this project. + // Note: You still need to register this redirect URI in your Google OAuth client + // unless you use an OAuth client type that permits localhost redirect URIs. + AIStudioOAuthRedirectURI = "http://localhost:1455/auth/callback" + + // DefaultScopes for Code Assist (includes cloud-platform for API access plus userinfo scopes) + // Required by Google's Code Assist API. + DefaultCodeAssistScopes = "https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile" + + // DefaultScopes for AI Studio (uses generativelanguage API with OAuth) + // Reference: https://ai.google.dev/gemini-api/docs/oauth + // For regular Google accounts, supports API calls to generativelanguage.googleapis.com + // Note: Google Auth platform currently documents the OAuth scope as + // https://www.googleapis.com/auth/generative-language.retriever (often with cloud-platform). + DefaultAIStudioScopes = "https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/generative-language.retriever" + + // DefaultGoogleOneScopes (DEPRECATED, no longer used) + // Google One now always uses the built-in Gemini CLI client with DefaultCodeAssistScopes. + // This constant is kept for backward compatibility but is not actively used. + DefaultGoogleOneScopes = "https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/generative-language.retriever https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile" + + // GeminiCLIRedirectURI is the redirect URI used by Gemini CLI for Code Assist OAuth. + GeminiCLIRedirectURI = "https://codeassist.google.com/authcode" + + // GeminiCLIOAuthClientID/Secret are the public OAuth client credentials used by Google Gemini CLI. + // They enable the "login without creating your own OAuth client" experience, but Google may + // restrict which scopes are allowed for this client. + GeminiCLIOAuthClientID = "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com" + GeminiCLIOAuthClientSecret = "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" + + SessionTTL = 30 * time.Minute + + // GeminiCLIUserAgent mimics Gemini CLI to maximize compatibility with internal endpoints. + GeminiCLIUserAgent = "GeminiCLI/0.1.5 (Windows; AMD64)" +) diff --git a/backend/internal/pkg/geminicli/drive_client.go b/backend/internal/pkg/geminicli/drive_client.go new file mode 100644 index 00000000..a6cbc3ab --- /dev/null +++ b/backend/internal/pkg/geminicli/drive_client.go @@ -0,0 +1,157 @@ +package geminicli + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" +) + +// DriveStorageInfo represents Google Drive storage quota information +type DriveStorageInfo struct { + Limit int64 `json:"limit"` // Storage limit in bytes + Usage int64 `json:"usage"` // Current usage in bytes +} + +// DriveClient interface for Google Drive API operations +type DriveClient interface { + GetStorageQuota(ctx context.Context, accessToken, proxyURL string) (*DriveStorageInfo, error) +} + +type driveClient struct{} + +// NewDriveClient creates a new Drive API client +func NewDriveClient() DriveClient { + return &driveClient{} +} + +// GetStorageQuota fetches storage quota from Google Drive API +func (c *driveClient) GetStorageQuota(ctx context.Context, accessToken, proxyURL string) (*DriveStorageInfo, error) { + const driveAPIURL = "https://www.googleapis.com/drive/v3/about?fields=storageQuota" + + req, err := http.NewRequestWithContext(ctx, "GET", driveAPIURL, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Get HTTP client with proxy support + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: proxyURL, + Timeout: 10 * time.Second, + }) + if err != nil { + return nil, fmt.Errorf("failed to create HTTP client: %w", err) + } + + sleepWithContext := func(d time.Duration) error { + timer := time.NewTimer(d) + defer timer.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + } + } + + // Retry logic with exponential backoff (+ jitter) for rate limits and transient failures + var resp *http.Response + maxRetries := 3 + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + for attempt := 0; attempt < maxRetries; attempt++ { + if ctx.Err() != nil { + return nil, fmt.Errorf("request cancelled: %w", ctx.Err()) + } + + resp, err = client.Do(req) + if err != nil { + // Network error retry + if attempt < maxRetries-1 { + backoff := time.Duration(1< SessionTTL { + return nil, false + } + return session, true +} + +func (s *SessionStore) Delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionID) +} + +func (s *SessionStore) Stop() { + select { + case <-s.stopCh: + return + default: + close(s.stopCh) + } +} + +func (s *SessionStore) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } + } + s.mu.Unlock() + } + } +} + +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} + +func GenerateState() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return base64URLEncode(bytes), nil +} + +func GenerateSessionID() (string, error) { + bytes, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeVerifier returns an RFC 7636 compatible code verifier (43+ chars). +func GenerateCodeVerifier() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return base64URLEncode(bytes), nil +} + +func GenerateCodeChallenge(verifier string) string { + hash := sha256.Sum256([]byte(verifier)) + return base64URLEncode(hash[:]) +} + +func base64URLEncode(data []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(data), "=") +} + +// EffectiveOAuthConfig returns the effective OAuth configuration. +// oauthType: "code_assist" or "ai_studio" (defaults to "code_assist" if empty). +// +// If ClientID/ClientSecret is not provided, this falls back to the built-in Gemini CLI OAuth client. +// +// Note: The built-in Gemini CLI OAuth client is restricted and may reject some scopes (e.g. +// https://www.googleapis.com/auth/generative-language), which will surface as +// "restricted_client" / "Unregistered scope(s)" errors during browser authorization. +func EffectiveOAuthConfig(cfg OAuthConfig, oauthType string) (OAuthConfig, error) { + effective := OAuthConfig{ + ClientID: strings.TrimSpace(cfg.ClientID), + ClientSecret: strings.TrimSpace(cfg.ClientSecret), + Scopes: strings.TrimSpace(cfg.Scopes), + } + + // Normalize scopes: allow comma-separated input but send space-delimited scopes to Google. + if effective.Scopes != "" { + effective.Scopes = strings.Join(strings.Fields(strings.ReplaceAll(effective.Scopes, ",", " ")), " ") + } + + // Fall back to built-in Gemini CLI OAuth client when not configured. + if effective.ClientID == "" && effective.ClientSecret == "" { + effective.ClientID = GeminiCLIOAuthClientID + effective.ClientSecret = GeminiCLIOAuthClientSecret + } else if effective.ClientID == "" || effective.ClientSecret == "" { + return OAuthConfig{}, fmt.Errorf("OAuth client not configured: please set both client_id and client_secret (or leave both empty to use the built-in Gemini CLI client)") + } + + isBuiltinClient := effective.ClientID == GeminiCLIOAuthClientID && + effective.ClientSecret == GeminiCLIOAuthClientSecret + + if effective.Scopes == "" { + // Use different default scopes based on OAuth type + switch oauthType { + case "ai_studio": + // Built-in client can't request some AI Studio scopes (notably generative-language). + if isBuiltinClient { + effective.Scopes = DefaultCodeAssistScopes + } else { + effective.Scopes = DefaultAIStudioScopes + } + case "google_one": + // Google One always uses built-in Gemini CLI client (same as code_assist) + // Built-in client can't request restricted scopes like generative-language.retriever or drive.readonly + effective.Scopes = DefaultCodeAssistScopes + default: + // Default to Code Assist scopes + effective.Scopes = DefaultCodeAssistScopes + } + } else if (oauthType == "ai_studio" || oauthType == "google_one") && isBuiltinClient { + // If user overrides scopes while still using the built-in client, strip restricted scopes. + parts := strings.Fields(effective.Scopes) + filtered := make([]string, 0, len(parts)) + for _, s := range parts { + if hasRestrictedScope(s) { + continue + } + filtered = append(filtered, s) + } + if len(filtered) == 0 { + effective.Scopes = DefaultCodeAssistScopes + } else { + effective.Scopes = strings.Join(filtered, " ") + } + } + + // Backward compatibility: normalize older AI Studio scope to the currently documented one. + if oauthType == "ai_studio" && effective.Scopes != "" { + parts := strings.Fields(effective.Scopes) + for i := range parts { + if parts[i] == "https://www.googleapis.com/auth/generative-language" { + parts[i] = "https://www.googleapis.com/auth/generative-language.retriever" + } + } + effective.Scopes = strings.Join(parts, " ") + } + + return effective, nil +} + +func hasRestrictedScope(scope string) bool { + return strings.HasPrefix(scope, "https://www.googleapis.com/auth/generative-language") || + strings.HasPrefix(scope, "https://www.googleapis.com/auth/drive") +} + +func BuildAuthorizationURL(cfg OAuthConfig, state, codeChallenge, redirectURI, projectID, oauthType string) (string, error) { + effectiveCfg, err := EffectiveOAuthConfig(cfg, oauthType) + if err != nil { + return "", err + } + redirectURI = strings.TrimSpace(redirectURI) + if redirectURI == "" { + return "", fmt.Errorf("redirect_uri is required") + } + + params := url.Values{} + params.Set("response_type", "code") + params.Set("client_id", effectiveCfg.ClientID) + params.Set("redirect_uri", redirectURI) + params.Set("scope", effectiveCfg.Scopes) + params.Set("state", state) + params.Set("code_challenge", codeChallenge) + params.Set("code_challenge_method", "S256") + params.Set("access_type", "offline") + params.Set("prompt", "consent") + params.Set("include_granted_scopes", "true") + if strings.TrimSpace(projectID) != "" { + params.Set("project_id", strings.TrimSpace(projectID)) + } + + return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()), nil +} diff --git a/backend/internal/pkg/geminicli/oauth_test.go b/backend/internal/pkg/geminicli/oauth_test.go new file mode 100644 index 00000000..0770730a --- /dev/null +++ b/backend/internal/pkg/geminicli/oauth_test.go @@ -0,0 +1,113 @@ +package geminicli + +import ( + "strings" + "testing" +) + +func TestEffectiveOAuthConfig_GoogleOne(t *testing.T) { + tests := []struct { + name string + input OAuthConfig + oauthType string + wantClientID string + wantScopes string + wantErr bool + }{ + { + name: "Google One with built-in client (empty config)", + input: OAuthConfig{}, + oauthType: "google_one", + wantClientID: GeminiCLIOAuthClientID, + wantScopes: DefaultCodeAssistScopes, + wantErr: false, + }, + { + name: "Google One always uses built-in client (even if custom credentials passed)", + input: OAuthConfig{ + ClientID: "custom-client-id", + ClientSecret: "custom-client-secret", + }, + oauthType: "google_one", + wantClientID: "custom-client-id", + wantScopes: DefaultCodeAssistScopes, // Uses code assist scopes even with custom client + wantErr: false, + }, + { + name: "Google One with built-in client and custom scopes (should filter restricted scopes)", + input: OAuthConfig{ + Scopes: "https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/generative-language.retriever https://www.googleapis.com/auth/drive.readonly", + }, + oauthType: "google_one", + wantClientID: GeminiCLIOAuthClientID, + wantScopes: "https://www.googleapis.com/auth/cloud-platform", + wantErr: false, + }, + { + name: "Google One with built-in client and only restricted scopes (should fallback to default)", + input: OAuthConfig{ + Scopes: "https://www.googleapis.com/auth/generative-language.retriever https://www.googleapis.com/auth/drive.readonly", + }, + oauthType: "google_one", + wantClientID: GeminiCLIOAuthClientID, + wantScopes: DefaultCodeAssistScopes, + wantErr: false, + }, + { + name: "Code Assist with built-in client", + input: OAuthConfig{}, + oauthType: "code_assist", + wantClientID: GeminiCLIOAuthClientID, + wantScopes: DefaultCodeAssistScopes, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := EffectiveOAuthConfig(tt.input, tt.oauthType) + if (err != nil) != tt.wantErr { + t.Errorf("EffectiveOAuthConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err != nil { + return + } + if got.ClientID != tt.wantClientID { + t.Errorf("EffectiveOAuthConfig() ClientID = %v, want %v", got.ClientID, tt.wantClientID) + } + if got.Scopes != tt.wantScopes { + t.Errorf("EffectiveOAuthConfig() Scopes = %v, want %v", got.Scopes, tt.wantScopes) + } + }) + } +} + +func TestEffectiveOAuthConfig_ScopeFiltering(t *testing.T) { + // Test that Google One with built-in client filters out restricted scopes + cfg, err := EffectiveOAuthConfig(OAuthConfig{ + Scopes: "https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/generative-language.retriever https://www.googleapis.com/auth/drive.readonly https://www.googleapis.com/auth/userinfo.profile", + }, "google_one") + + if err != nil { + t.Fatalf("EffectiveOAuthConfig() error = %v", err) + } + + // Should only contain cloud-platform, userinfo.email, and userinfo.profile + // Should NOT contain generative-language or drive scopes + if strings.Contains(cfg.Scopes, "generative-language") { + t.Errorf("Scopes should not contain generative-language when using built-in client, got: %v", cfg.Scopes) + } + if strings.Contains(cfg.Scopes, "drive") { + t.Errorf("Scopes should not contain drive when using built-in client, got: %v", cfg.Scopes) + } + if !strings.Contains(cfg.Scopes, "cloud-platform") { + t.Errorf("Scopes should contain cloud-platform, got: %v", cfg.Scopes) + } + if !strings.Contains(cfg.Scopes, "userinfo.email") { + t.Errorf("Scopes should contain userinfo.email, got: %v", cfg.Scopes) + } + if !strings.Contains(cfg.Scopes, "userinfo.profile") { + t.Errorf("Scopes should contain userinfo.profile, got: %v", cfg.Scopes) + } +} diff --git a/backend/internal/pkg/geminicli/sanitize.go b/backend/internal/pkg/geminicli/sanitize.go new file mode 100644 index 00000000..f5c407e4 --- /dev/null +++ b/backend/internal/pkg/geminicli/sanitize.go @@ -0,0 +1,46 @@ +package geminicli + +import "strings" + +const maxLogBodyLen = 2048 + +func SanitizeBodyForLogs(body string) string { + body = truncateBase64InMessage(body) + if len(body) > maxLogBodyLen { + body = body[:maxLogBodyLen] + "...[truncated]" + } + return body +} + +func truncateBase64InMessage(message string) string { + const maxBase64Length = 50 + + result := message + offset := 0 + for { + idx := strings.Index(result[offset:], ";base64,") + if idx == -1 { + break + } + actualIdx := offset + idx + start := actualIdx + len(";base64,") + + end := start + for end < len(result) && isBase64Char(result[end]) { + end++ + } + + if end-start > maxBase64Length { + result = result[:start+maxBase64Length] + "...[truncated]" + result[end:] + offset = start + maxBase64Length + len("...[truncated]") + continue + } + offset = end + } + + return result +} + +func isBase64Char(c byte) bool { + return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '+' || c == '/' || c == '=' +} diff --git a/backend/internal/pkg/geminicli/token_types.go b/backend/internal/pkg/geminicli/token_types.go new file mode 100644 index 00000000..f3cfbaed --- /dev/null +++ b/backend/internal/pkg/geminicli/token_types.go @@ -0,0 +1,9 @@ +package geminicli + +type TokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token,omitempty"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + Scope string `json:"scope,omitempty"` +} diff --git a/backend/internal/pkg/googleapi/status.go b/backend/internal/pkg/googleapi/status.go new file mode 100644 index 00000000..5eb0c54a --- /dev/null +++ b/backend/internal/pkg/googleapi/status.go @@ -0,0 +1,25 @@ +// Package googleapi provides helpers for Google-style API responses. +package googleapi + +import "net/http" + +// HTTPStatusToGoogleStatus maps HTTP status codes to Google-style error status strings. +func HTTPStatusToGoogleStatus(status int) string { + switch status { + case http.StatusBadRequest: + return "INVALID_ARGUMENT" + case http.StatusUnauthorized: + return "UNAUTHENTICATED" + case http.StatusForbidden: + return "PERMISSION_DENIED" + case http.StatusNotFound: + return "NOT_FOUND" + case http.StatusTooManyRequests: + return "RESOURCE_EXHAUSTED" + default: + if status >= 500 { + return "INTERNAL" + } + return "UNKNOWN" + } +} diff --git a/backend/internal/pkg/httpclient/pool.go b/backend/internal/pkg/httpclient/pool.go new file mode 100644 index 00000000..76b7aa91 --- /dev/null +++ b/backend/internal/pkg/httpclient/pool.go @@ -0,0 +1,165 @@ +// Package httpclient 提供共享 HTTP 客户端池 +// +// 性能优化说明: +// 原实现在多个服务中重复创建 http.Client: +// 1. proxy_probe_service.go: 每次探测创建新客户端 +// 2. pricing_service.go: 每次请求创建新客户端 +// 3. turnstile_service.go: 每次验证创建新客户端 +// 4. github_release_service.go: 每次请求创建新客户端 +// 5. claude_usage_service.go: 每次请求创建新客户端 +// +// 新实现使用统一的客户端池: +// 1. 相同配置复用同一 http.Client 实例 +// 2. 复用 Transport 连接池,减少 TCP/TLS 握手开销 +// 3. 支持 HTTP/HTTPS/SOCKS5/SOCKS5H 代理 +// 4. 代理配置失败时直接返回错误,不会回退到直连(避免 IP 关联风险) +package httpclient + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/proxyutil" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" +) + +// Transport 连接池默认配置 +const ( + defaultMaxIdleConns = 100 // 最大空闲连接数 + defaultMaxIdleConnsPerHost = 10 // 每个主机最大空闲连接数 + defaultIdleConnTimeout = 90 * time.Second // 空闲连接超时时间(建议小于上游 LB 超时) +) + +// Options 定义共享 HTTP 客户端的构建参数 +type Options struct { + ProxyURL string // 代理 URL(支持 http/https/socks5/socks5h) + Timeout time.Duration // 请求总超时时间 + ResponseHeaderTimeout time.Duration // 等待响应头超时时间 + InsecureSkipVerify bool // 是否跳过 TLS 证书验证(已禁用,不允许设置为 true) + ProxyStrict bool // 严格代理模式:代理失败时返回错误而非回退 + ValidateResolvedIP bool // 是否校验解析后的 IP(防止 DNS Rebinding) + AllowPrivateHosts bool // 允许私有地址解析(与 ValidateResolvedIP 一起使用) + + // 可选的连接池参数(不设置则使用默认值) + MaxIdleConns int // 最大空闲连接总数(默认 100) + MaxIdleConnsPerHost int // 每主机最大空闲连接(默认 10) + MaxConnsPerHost int // 每主机最大连接数(默认 0 无限制) +} + +// sharedClients 存储按配置参数缓存的 http.Client 实例 +var sharedClients sync.Map + +// GetClient 返回共享的 HTTP 客户端实例 +// 性能优化:相同配置复用同一客户端,避免重复创建 Transport +// 安全说明:代理配置失败时直接返回错误,不会回退到直连,避免 IP 关联风险 +func GetClient(opts Options) (*http.Client, error) { + key := buildClientKey(opts) + if cached, ok := sharedClients.Load(key); ok { + if client, ok := cached.(*http.Client); ok { + return client, nil + } + } + + client, err := buildClient(opts) + if err != nil { + return nil, err + } + + actual, _ := sharedClients.LoadOrStore(key, client) + if c, ok := actual.(*http.Client); ok { + return c, nil + } + return client, nil +} + +func buildClient(opts Options) (*http.Client, error) { + transport, err := buildTransport(opts) + if err != nil { + return nil, err + } + + var rt http.RoundTripper = transport + if opts.ValidateResolvedIP && !opts.AllowPrivateHosts { + rt = &validatedTransport{base: transport} + } + return &http.Client{ + Transport: rt, + Timeout: opts.Timeout, + }, nil +} + +func buildTransport(opts Options) (*http.Transport, error) { + // 使用自定义值或默认值 + maxIdleConns := opts.MaxIdleConns + if maxIdleConns <= 0 { + maxIdleConns = defaultMaxIdleConns + } + maxIdleConnsPerHost := opts.MaxIdleConnsPerHost + if maxIdleConnsPerHost <= 0 { + maxIdleConnsPerHost = defaultMaxIdleConnsPerHost + } + + transport := &http.Transport{ + MaxIdleConns: maxIdleConns, + MaxIdleConnsPerHost: maxIdleConnsPerHost, + MaxConnsPerHost: opts.MaxConnsPerHost, // 0 表示无限制 + IdleConnTimeout: defaultIdleConnTimeout, + ResponseHeaderTimeout: opts.ResponseHeaderTimeout, + } + + if opts.InsecureSkipVerify { + // 安全要求:禁止跳过证书验证,避免中间人攻击。 + return nil, fmt.Errorf("insecure_skip_verify is not allowed; install a trusted certificate instead") + } + + proxyURL := strings.TrimSpace(opts.ProxyURL) + if proxyURL == "" { + return transport, nil + } + + parsed, err := url.Parse(proxyURL) + if err != nil { + return nil, err + } + + if err := proxyutil.ConfigureTransportProxy(transport, parsed); err != nil { + return nil, err + } + + return transport, nil +} + +func buildClientKey(opts Options) string { + return fmt.Sprintf("%s|%s|%s|%t|%t|%t|%t|%d|%d|%d", + strings.TrimSpace(opts.ProxyURL), + opts.Timeout.String(), + opts.ResponseHeaderTimeout.String(), + opts.InsecureSkipVerify, + opts.ProxyStrict, + opts.ValidateResolvedIP, + opts.AllowPrivateHosts, + opts.MaxIdleConns, + opts.MaxIdleConnsPerHost, + opts.MaxConnsPerHost, + ) +} + +type validatedTransport struct { + base http.RoundTripper +} + +func (t *validatedTransport) RoundTrip(req *http.Request) (*http.Response, error) { + if req != nil && req.URL != nil { + host := strings.TrimSpace(req.URL.Hostname()) + if host != "" { + if err := urlvalidator.ValidateResolvedIP(host); err != nil { + return nil, err + } + } + } + return t.base.RoundTrip(req) +} diff --git a/backend/internal/pkg/ip/ip.go b/backend/internal/pkg/ip/ip.go new file mode 100644 index 00000000..97109c0c --- /dev/null +++ b/backend/internal/pkg/ip/ip.go @@ -0,0 +1,168 @@ +// Package ip 提供客户端 IP 地址提取工具。 +package ip + +import ( + "net" + "strings" + + "github.com/gin-gonic/gin" +) + +// GetClientIP 从 Gin Context 中提取客户端真实 IP 地址。 +// 按以下优先级检查 Header: +// 1. CF-Connecting-IP (Cloudflare) +// 2. X-Real-IP (Nginx) +// 3. X-Forwarded-For (取第一个非私有 IP) +// 4. c.ClientIP() (Gin 内置方法) +func GetClientIP(c *gin.Context) string { + // 1. Cloudflare + if ip := c.GetHeader("CF-Connecting-IP"); ip != "" { + return normalizeIP(ip) + } + + // 2. Nginx X-Real-IP + if ip := c.GetHeader("X-Real-IP"); ip != "" { + return normalizeIP(ip) + } + + // 3. X-Forwarded-For (多个 IP 时取第一个公网 IP) + if xff := c.GetHeader("X-Forwarded-For"); xff != "" { + ips := strings.Split(xff, ",") + for _, ip := range ips { + ip = strings.TrimSpace(ip) + if ip != "" && !isPrivateIP(ip) { + return normalizeIP(ip) + } + } + // 如果都是私有 IP,返回第一个 + if len(ips) > 0 { + return normalizeIP(strings.TrimSpace(ips[0])) + } + } + + // 4. Gin 内置方法 + return normalizeIP(c.ClientIP()) +} + +// normalizeIP 规范化 IP 地址,去除端口号和空格。 +func normalizeIP(ip string) string { + ip = strings.TrimSpace(ip) + // 移除端口号(如 "192.168.1.1:8080" -> "192.168.1.1") + if host, _, err := net.SplitHostPort(ip); err == nil { + return host + } + return ip +} + +// isPrivateIP 检查 IP 是否为私有地址。 +func isPrivateIP(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + + // 私有 IP 范围 + privateBlocks := []string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "127.0.0.0/8", + "::1/128", + "fc00::/7", + } + + for _, block := range privateBlocks { + _, cidr, err := net.ParseCIDR(block) + if err != nil { + continue + } + if cidr.Contains(ip) { + return true + } + } + return false +} + +// MatchesPattern 检查 IP 是否匹配指定的模式(支持单个 IP 或 CIDR)。 +// pattern 可以是: +// - 单个 IP: "192.168.1.100" +// - CIDR 范围: "192.168.1.0/24" +func MatchesPattern(clientIP, pattern string) bool { + ip := net.ParseIP(clientIP) + if ip == nil { + return false + } + + // 尝试解析为 CIDR + if strings.Contains(pattern, "/") { + _, cidr, err := net.ParseCIDR(pattern) + if err != nil { + return false + } + return cidr.Contains(ip) + } + + // 作为单个 IP 处理 + patternIP := net.ParseIP(pattern) + if patternIP == nil { + return false + } + return ip.Equal(patternIP) +} + +// MatchesAnyPattern 检查 IP 是否匹配任意一个模式。 +func MatchesAnyPattern(clientIP string, patterns []string) bool { + for _, pattern := range patterns { + if MatchesPattern(clientIP, pattern) { + return true + } + } + return false +} + +// CheckIPRestriction 检查 IP 是否被 API Key 的 IP 限制允许。 +// 返回值:(是否允许, 拒绝原因) +// 逻辑: +// 1. 先检查黑名单,如果在黑名单中则直接拒绝 +// 2. 如果白名单不为空,IP 必须在白名单中 +// 3. 如果白名单为空,允许访问(除非被黑名单拒绝) +func CheckIPRestriction(clientIP string, whitelist, blacklist []string) (bool, string) { + // 规范化 IP + clientIP = normalizeIP(clientIP) + if clientIP == "" { + return false, "access denied" + } + + // 1. 检查黑名单 + if len(blacklist) > 0 && MatchesAnyPattern(clientIP, blacklist) { + return false, "access denied" + } + + // 2. 检查白名单(如果设置了白名单,IP 必须在其中) + if len(whitelist) > 0 && !MatchesAnyPattern(clientIP, whitelist) { + return false, "access denied" + } + + return true, "" +} + +// ValidateIPPattern 验证 IP 或 CIDR 格式是否有效。 +func ValidateIPPattern(pattern string) bool { + if strings.Contains(pattern, "/") { + _, _, err := net.ParseCIDR(pattern) + return err == nil + } + return net.ParseIP(pattern) != nil +} + +// ValidateIPPatterns 验证多个 IP 或 CIDR 格式。 +// 返回无效的模式列表。 +func ValidateIPPatterns(patterns []string) []string { + var invalid []string + for _, p := range patterns { + if !ValidateIPPattern(p) { + invalid = append(invalid, p) + } + } + return invalid +} diff --git a/backend/internal/pkg/oauth/oauth.go b/backend/internal/pkg/oauth/oauth.go new file mode 100644 index 00000000..d29c2422 --- /dev/null +++ b/backend/internal/pkg/oauth/oauth.go @@ -0,0 +1,237 @@ +// Package oauth provides helpers for OAuth flows used by this service. +package oauth + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "net/url" + "strings" + "sync" + "time" +) + +// Claude OAuth Constants (from CRS project) +const ( + // OAuth Client ID for Claude + ClientID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e" + + // OAuth endpoints + AuthorizeURL = "https://claude.ai/oauth/authorize" + TokenURL = "https://console.anthropic.com/v1/oauth/token" + RedirectURI = "https://console.anthropic.com/oauth/code/callback" + + // Scopes + ScopeProfile = "user:profile" + ScopeInference = "user:inference" + + // Session TTL + SessionTTL = 30 * time.Minute +) + +// OAuthSession stores OAuth flow state +type OAuthSession struct { + State string `json:"state"` + CodeVerifier string `json:"code_verifier"` + Scope string `json:"scope"` + ProxyURL string `json:"proxy_url,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +// SessionStore manages OAuth sessions in memory +type SessionStore struct { + mu sync.RWMutex + sessions map[string]*OAuthSession + stopCh chan struct{} +} + +// NewSessionStore creates a new session store +func NewSessionStore() *SessionStore { + store := &SessionStore{ + sessions: make(map[string]*OAuthSession), + stopCh: make(chan struct{}), + } + // Start cleanup goroutine + go store.cleanup() + return store +} + +// Stop stops the cleanup goroutine +func (s *SessionStore) Stop() { + close(s.stopCh) +} + +// Set stores a session +func (s *SessionStore) Set(sessionID string, session *OAuthSession) { + s.mu.Lock() + defer s.mu.Unlock() + s.sessions[sessionID] = session +} + +// Get retrieves a session +func (s *SessionStore) Get(sessionID string) (*OAuthSession, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + session, ok := s.sessions[sessionID] + if !ok { + return nil, false + } + // Check if expired + if time.Since(session.CreatedAt) > SessionTTL { + return nil, false + } + return session, true +} + +// Delete removes a session +func (s *SessionStore) Delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionID) +} + +// cleanup removes expired sessions periodically +func (s *SessionStore) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } + } + s.mu.Unlock() + } + } +} + +// GenerateRandomBytes generates cryptographically secure random bytes +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} + +// GenerateState generates a random state string for OAuth +func GenerateState() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateSessionID generates a unique session ID +func GenerateSessionID() (string, error) { + bytes, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeVerifier generates a PKCE code verifier (32 bytes -> base64url) +func GenerateCodeVerifier() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return base64URLEncode(bytes), nil +} + +// GenerateCodeChallenge generates a PKCE code challenge using S256 method +func GenerateCodeChallenge(verifier string) string { + hash := sha256.Sum256([]byte(verifier)) + return base64URLEncode(hash[:]) +} + +// base64URLEncode encodes bytes to base64url without padding +func base64URLEncode(data []byte) string { + encoded := base64.URLEncoding.EncodeToString(data) + // Remove padding + return strings.TrimRight(encoded, "=") +} + +// BuildAuthorizationURL builds the OAuth authorization URL +func BuildAuthorizationURL(state, codeChallenge, scope string) string { + params := url.Values{} + params.Set("response_type", "code") + params.Set("client_id", ClientID) + params.Set("redirect_uri", RedirectURI) + params.Set("scope", scope) + params.Set("state", state) + params.Set("code_challenge", codeChallenge) + params.Set("code_challenge_method", "S256") + + return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) +} + +// TokenRequest represents the token exchange request body +type TokenRequest struct { + GrantType string `json:"grant_type"` + ClientID string `json:"client_id"` + Code string `json:"code"` + RedirectURI string `json:"redirect_uri"` + CodeVerifier string `json:"code_verifier"` + State string `json:"state"` +} + +// TokenResponse represents the token response from OAuth provider +type TokenResponse struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` + // Organization and Account info from OAuth response + Organization *OrgInfo `json:"organization,omitempty"` + Account *AccountInfo `json:"account,omitempty"` +} + +// OrgInfo represents organization info from OAuth response +type OrgInfo struct { + UUID string `json:"uuid"` +} + +// AccountInfo represents account info from OAuth response +type AccountInfo struct { + UUID string `json:"uuid"` +} + +// RefreshTokenRequest represents the refresh token request +type RefreshTokenRequest struct { + GrantType string `json:"grant_type"` + RefreshToken string `json:"refresh_token"` + ClientID string `json:"client_id"` +} + +// BuildTokenRequest creates a token exchange request +func BuildTokenRequest(code, codeVerifier, state string) *TokenRequest { + return &TokenRequest{ + GrantType: "authorization_code", + ClientID: ClientID, + Code: code, + RedirectURI: RedirectURI, + CodeVerifier: codeVerifier, + State: state, + } +} + +// BuildRefreshTokenRequest creates a refresh token request +func BuildRefreshTokenRequest(refreshToken string) *RefreshTokenRequest { + return &RefreshTokenRequest{ + GrantType: "refresh_token", + RefreshToken: refreshToken, + ClientID: ClientID, + } +} diff --git a/backend/internal/pkg/openai/constants.go b/backend/internal/pkg/openai/constants.go new file mode 100644 index 00000000..4fab3359 --- /dev/null +++ b/backend/internal/pkg/openai/constants.go @@ -0,0 +1,43 @@ +// Package openai provides helpers and types for OpenAI API integration. +package openai + +import _ "embed" + +// Model represents an OpenAI model +type Model struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + OwnedBy string `json:"owned_by"` + Type string `json:"type"` + DisplayName string `json:"display_name"` +} + +// DefaultModels OpenAI models list +var DefaultModels = []Model{ + {ID: "gpt-5.2", Object: "model", Created: 1733875200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2"}, + {ID: "gpt-5.2-codex", Object: "model", Created: 1733011200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2 Codex"}, + {ID: "gpt-5.1-codex-max", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex Max"}, + {ID: "gpt-5.1-codex", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex"}, + {ID: "gpt-5.1", Object: "model", Created: 1731456000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1"}, + {ID: "gpt-5.1-codex-mini", Object: "model", Created: 1730419200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.1 Codex Mini"}, + {ID: "gpt-5", Object: "model", Created: 1722988800, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5"}, +} + +// DefaultModelIDs returns the default model ID list +func DefaultModelIDs() []string { + ids := make([]string, len(DefaultModels)) + for i, m := range DefaultModels { + ids[i] = m.ID + } + return ids +} + +// DefaultTestModel default model for testing OpenAI accounts +const DefaultTestModel = "gpt-5.1-codex" + +// DefaultInstructions default instructions for non-Codex CLI requests +// Content loaded from instructions.txt at compile time +// +//go:embed instructions.txt +var DefaultInstructions string diff --git a/backend/internal/pkg/openai/instructions.txt b/backend/internal/pkg/openai/instructions.txt new file mode 100644 index 00000000..431f0f84 --- /dev/null +++ b/backend/internal/pkg/openai/instructions.txt @@ -0,0 +1,118 @@ +You are Codex, based on GPT-5. You are running as a coding agent in the Codex CLI on a user's computer. + +## General + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) + +## Editing constraints + +- Default to ASCII when editing or creating files. Only introduce non-ASCII or other Unicode characters when there is a clear justification and the file already uses them. +- Add succinct code comments that explain what is going on if code is not self-explanatory. You should not add comments like \"Assigns the value to the variable\", but a brief comment might be useful ahead of a complex code block that the user would otherwise have to spend time parsing out. Usage of these comments should be rare. +- Try to use apply_patch for single file edits, but it is fine to explore other options to make the edit if it does not work well. Do not use apply_patch for changes that are auto-generated (i.e. generating package.json or running a lint or format command like gofmt) or when scripting is more efficient (such as search and replacing a string across a codebase). +- You may be in a dirty git worktree. + * NEVER revert existing changes you did not make unless explicitly requested, since these changes were made by the user. + * If asked to make a commit or code edits and there are unrelated changes to your work or changes that you didn't make in those files, don't revert those changes. + * If the changes are in files you've touched recently, you should read carefully and understand how you can work with the changes rather than reverting them. + * If the changes are in unrelated files, just ignore them and don't revert them. + - Do not amend a commit unless explicitly requested to do so. +- While you are working, you might notice unexpected changes that you didn't make. If this happens, STOP IMMEDIATELY and ask the user how they would like to proceed. +- **NEVER** use destructive commands like `git reset --hard` or `git checkout --` unless specifically requested or approved by the user. + +## Plan tool + +When using the planning tool: +- Skip using the planning tool for straightforward tasks (roughly the easiest 25%). +- Do not make single-step plans. +- When you made a plan, update it after having performed one of the sub-tasks that you shared on the plan. + +## Codex CLI harness, sandboxing, and approvals + +The Codex CLI harness supports several different configurations for sandboxing and escalation approvals that the user can choose from. + +Filesystem sandboxing defines which files can be read or written. The options for `sandbox_mode` are: +- **read-only**: The sandbox only permits reading files. +- **workspace-write**: The sandbox permits reading files, and editing files in `cwd` and `writable_roots`. Editing files in other directories requires approval. +- **danger-full-access**: No filesystem sandboxing - all commands are permitted. + +Network sandboxing defines whether network can be accessed without approval. Options for `network_access` are: +- **restricted**: Requires approval +- **enabled**: No approval needed + +Approvals are your mechanism to get user consent to run shell commands without the sandbox. Possible configuration options for `approval_policy` are +- **untrusted**: The harness will escalate most commands for user approval, apart from a limited allowlist of safe \"read\" commands. +- **on-failure**: The harness will allow all commands to run in the sandbox (if enabled), and failures will be escalated to the user for approval to run again without the sandbox. +- **on-request**: Commands will be run in the sandbox by default, and you can specify in your tool call if you want to escalate a command to run without sandboxing. (Note that this mode is not always available. If it is, you'll see parameters for it in the `shell` command description.) +- **never**: This is a non-interactive mode where you may NEVER ask the user for approval to run commands. Instead, you must always persist and work around constraints to solve the task for the user. You MUST do your utmost best to finish the task and validate your work before yielding. If this mode is paired with `danger-full-access`, take advantage of it to deliver the best outcome for the user. Further, in this mode, your default testing philosophy is overridden: Even if you don't see local patterns for testing, you may add tests and scripts to validate your work. Just remove them before yielding. + +When you are running with `approval_policy == on-request`, and sandboxing enabled, here are scenarios where you'll need to request approval: +- You need to run a command that writes to a directory that requires it (e.g. running tests that write to /var) +- You need to run a GUI app (e.g., open/xdg-open/osascript) to open browsers or files. +- You are running sandboxed and need to run a command that requires network access (e.g. installing packages) +- If you run a command that is important to solving the user's query, but it fails because of sandboxing, rerun the command with approval. ALWAYS proceed to use the `sandbox_permissions` and `justification` parameters - do not message the user before requesting approval for the command. +- You are about to take a potentially destructive action such as an `rm` or `git reset` that the user did not explicitly ask for +- (for all of these, you should weigh alternative paths that do not require approval) + +When `sandbox_mode` is set to read-only, you'll need to request approval for any command that isn't a read. + +You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure. + +Although they introduce friction to the user because your work is paused until the user responds, you should leverage them when necessary to accomplish important work. If the completing the task requires escalated permissions, Do not let these settings or the sandbox deter you from attempting to accomplish the user's task unless it is set to \"never\", in which case never ask for approvals. + +When requesting approval to execute a command that will require escalated privileges: + - Provide the `sandbox_permissions` parameter with the value `\"require_escalated\"` + - Include a short, 1 sentence explanation for why you need escalated permissions in the justification parameter + +## Special user requests + +- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as `date`), you should do so. +- If the user asks for a \"review\", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps. + +## Frontend tasks +When doing frontend design tasks, avoid collapsing into \"AI slop\" or safe, average-looking layouts. +Aim for interfaces that feel intentional, bold, and a bit surprising. +- Typography: Use expressive, purposeful fonts and avoid default stacks (Inter, Roboto, Arial, system). +- Color & Look: Choose a clear visual direction; define CSS variables; avoid purple-on-white defaults. No purple bias or dark mode bias. +- Motion: Use a few meaningful animations (page-load, staggered reveals) instead of generic micro-motions. +- Background: Don't rely on flat, single-color backgrounds; use gradients, shapes, or subtle patterns to build atmosphere. +- Overall: Avoid boilerplate layouts and interchangeable UI patterns. Vary themes, type families, and visual languages across outputs. +- Ensure the page loads properly on both desktop and mobile + +Exception: If working within an existing website or design system, preserve the established patterns, structure, and visual language. + +## Presenting your work and final message + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +- Default: be very concise; friendly coding teammate tone. +- Ask only when needed; suggest ideas; mirror the user's style. +- For substantial work, summarize clearly; follow final‑answer formatting. +- Skip heavy formatting for simple confirmations. +- Don't dump large files you've written; reference paths only. +- No \"save/copy this file\" - User is on the same machine. +- Offer logical next steps (tests, commits, build) briefly; add verify steps if you couldn't do something. +- For code changes: + * Lead with a quick explanation of the change, and then give more details on the context covering where and why a change was made. Do not start this explanation with \"summary\", just jump right in. + * If there are natural next steps the user may want to take, suggest them at the end of your response. Do not make suggestions if there are no natural next steps. + * When suggesting multiple options, use numeric lists for the suggestions so the user can quickly respond with a single number. + - The user does not command execution outputs. When asked to show the output of a command (e.g. `git show`), relay the important details in your answer or summarize the key lines so the user understands the result. + +### Final answer structure and style guidelines + +- Plain text; CLI handles styling. Use structure only when it helps scanability. +- Headers: optional; short Title Case (1-3 words) wrapped in **…**; no blank line before the first bullet; add only if they truly help. +- Bullets: use - ; merge related points; keep to one line when possible; 4–6 per list ordered by importance; keep phrasing consistent. +- Monospace: backticks for commands/paths/env vars/code ids and inline examples; use for literal keyword bullets; never combine with **. +- Code samples or multi-line snippets should be wrapped in fenced code blocks; include an info string as often as possible. +- Structure: group related bullets; order sections general → specific → supporting; for subsections, start with a bolded keyword bullet, then items; match complexity to the task. +- Tone: collaborative, concise, factual; present tense, active voice; self‑contained; no \"above/below\"; parallel wording. +- Don'ts: no nested bullets/hierarchies; no ANSI codes; don't cram unrelated keywords; keep keyword lists short—wrap/reformat if long; avoid naming formatting styles in answers. +- Adaptation: code explanations → precise, structured with code refs; simple tasks → lead with outcome; big changes → logical walkthrough + rationale + next actions; casual one-offs → plain sentences, no headers/bullets. +- File References: When referencing files in your response follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Optionally include line/column (1‑based): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\\repo\\project\\main.rs:12:5 + \ No newline at end of file diff --git a/backend/internal/pkg/openai/oauth.go b/backend/internal/pkg/openai/oauth.go new file mode 100644 index 00000000..df972a13 --- /dev/null +++ b/backend/internal/pkg/openai/oauth.go @@ -0,0 +1,366 @@ +package openai + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "net/url" + "strings" + "sync" + "time" +) + +// OpenAI OAuth Constants (from CRS project - Codex CLI client) +const ( + // OAuth Client ID for OpenAI (Codex CLI official) + ClientID = "app_EMoamEEZ73f0CkXaXp7hrann" + + // OAuth endpoints + AuthorizeURL = "https://auth.openai.com/oauth/authorize" + TokenURL = "https://auth.openai.com/oauth/token" + + // Default redirect URI (can be customized) + DefaultRedirectURI = "http://localhost:1455/auth/callback" + + // Scopes + DefaultScopes = "openid profile email offline_access" + // RefreshScopes - scope for token refresh (without offline_access, aligned with CRS project) + RefreshScopes = "openid profile email" + + // Session TTL + SessionTTL = 30 * time.Minute +) + +// OAuthSession stores OAuth flow state for OpenAI +type OAuthSession struct { + State string `json:"state"` + CodeVerifier string `json:"code_verifier"` + ProxyURL string `json:"proxy_url,omitempty"` + RedirectURI string `json:"redirect_uri"` + CreatedAt time.Time `json:"created_at"` +} + +// SessionStore manages OAuth sessions in memory +type SessionStore struct { + mu sync.RWMutex + sessions map[string]*OAuthSession + stopCh chan struct{} +} + +// NewSessionStore creates a new session store +func NewSessionStore() *SessionStore { + store := &SessionStore{ + sessions: make(map[string]*OAuthSession), + stopCh: make(chan struct{}), + } + // Start cleanup goroutine + go store.cleanup() + return store +} + +// Set stores a session +func (s *SessionStore) Set(sessionID string, session *OAuthSession) { + s.mu.Lock() + defer s.mu.Unlock() + s.sessions[sessionID] = session +} + +// Get retrieves a session +func (s *SessionStore) Get(sessionID string) (*OAuthSession, bool) { + s.mu.RLock() + defer s.mu.RUnlock() + session, ok := s.sessions[sessionID] + if !ok { + return nil, false + } + // Check if expired + if time.Since(session.CreatedAt) > SessionTTL { + return nil, false + } + return session, true +} + +// Delete removes a session +func (s *SessionStore) Delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.sessions, sessionID) +} + +// Stop stops the cleanup goroutine +func (s *SessionStore) Stop() { + close(s.stopCh) +} + +// cleanup removes expired sessions periodically +func (s *SessionStore) cleanup() { + ticker := time.NewTicker(5 * time.Minute) + defer ticker.Stop() + for { + select { + case <-s.stopCh: + return + case <-ticker.C: + s.mu.Lock() + for id, session := range s.sessions { + if time.Since(session.CreatedAt) > SessionTTL { + delete(s.sessions, id) + } + } + s.mu.Unlock() + } + } +} + +// GenerateRandomBytes generates cryptographically secure random bytes +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + return nil, err + } + return b, nil +} + +// GenerateState generates a random state string for OAuth +func GenerateState() (string, error) { + bytes, err := GenerateRandomBytes(32) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateSessionID generates a unique session ID +func GenerateSessionID() (string, error) { + bytes, err := GenerateRandomBytes(16) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeVerifier generates a PKCE code verifier (64 bytes -> hex for OpenAI) +// OpenAI uses hex encoding instead of base64url +func GenerateCodeVerifier() (string, error) { + bytes, err := GenerateRandomBytes(64) + if err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// GenerateCodeChallenge generates a PKCE code challenge using S256 method +// Uses base64url encoding as per RFC 7636 +func GenerateCodeChallenge(verifier string) string { + hash := sha256.Sum256([]byte(verifier)) + return base64URLEncode(hash[:]) +} + +// base64URLEncode encodes bytes to base64url without padding +func base64URLEncode(data []byte) string { + encoded := base64.URLEncoding.EncodeToString(data) + // Remove padding + return strings.TrimRight(encoded, "=") +} + +// BuildAuthorizationURL builds the OpenAI OAuth authorization URL +func BuildAuthorizationURL(state, codeChallenge, redirectURI string) string { + if redirectURI == "" { + redirectURI = DefaultRedirectURI + } + + params := url.Values{} + params.Set("response_type", "code") + params.Set("client_id", ClientID) + params.Set("redirect_uri", redirectURI) + params.Set("scope", DefaultScopes) + params.Set("state", state) + params.Set("code_challenge", codeChallenge) + params.Set("code_challenge_method", "S256") + // OpenAI specific parameters + params.Set("id_token_add_organizations", "true") + params.Set("codex_cli_simplified_flow", "true") + + return fmt.Sprintf("%s?%s", AuthorizeURL, params.Encode()) +} + +// TokenRequest represents the token exchange request body +type TokenRequest struct { + GrantType string `json:"grant_type"` + ClientID string `json:"client_id"` + Code string `json:"code"` + RedirectURI string `json:"redirect_uri"` + CodeVerifier string `json:"code_verifier"` +} + +// TokenResponse represents the token response from OpenAI OAuth +type TokenResponse struct { + AccessToken string `json:"access_token"` + IDToken string `json:"id_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` +} + +// RefreshTokenRequest represents the refresh token request +type RefreshTokenRequest struct { + GrantType string `json:"grant_type"` + RefreshToken string `json:"refresh_token"` + ClientID string `json:"client_id"` + Scope string `json:"scope"` +} + +// IDTokenClaims represents the claims from OpenAI ID Token +type IDTokenClaims struct { + // Standard claims + Sub string `json:"sub"` + Email string `json:"email"` + EmailVerified bool `json:"email_verified"` + Iss string `json:"iss"` + Aud []string `json:"aud"` // OpenAI returns aud as an array + Exp int64 `json:"exp"` + Iat int64 `json:"iat"` + + // OpenAI specific claims (nested under https://api.openai.com/auth) + OpenAIAuth *OpenAIAuthClaims `json:"https://api.openai.com/auth,omitempty"` +} + +// OpenAIAuthClaims represents the OpenAI specific auth claims +type OpenAIAuthClaims struct { + ChatGPTAccountID string `json:"chatgpt_account_id"` + ChatGPTUserID string `json:"chatgpt_user_id"` + UserID string `json:"user_id"` + Organizations []OrganizationClaim `json:"organizations"` +} + +// OrganizationClaim represents an organization in the ID Token +type OrganizationClaim struct { + ID string `json:"id"` + Role string `json:"role"` + Title string `json:"title"` + IsDefault bool `json:"is_default"` +} + +// BuildTokenRequest creates a token exchange request for OpenAI +func BuildTokenRequest(code, codeVerifier, redirectURI string) *TokenRequest { + if redirectURI == "" { + redirectURI = DefaultRedirectURI + } + return &TokenRequest{ + GrantType: "authorization_code", + ClientID: ClientID, + Code: code, + RedirectURI: redirectURI, + CodeVerifier: codeVerifier, + } +} + +// BuildRefreshTokenRequest creates a refresh token request for OpenAI +func BuildRefreshTokenRequest(refreshToken string) *RefreshTokenRequest { + return &RefreshTokenRequest{ + GrantType: "refresh_token", + RefreshToken: refreshToken, + ClientID: ClientID, + Scope: RefreshScopes, + } +} + +// ToFormData converts TokenRequest to URL-encoded form data +func (r *TokenRequest) ToFormData() string { + params := url.Values{} + params.Set("grant_type", r.GrantType) + params.Set("client_id", r.ClientID) + params.Set("code", r.Code) + params.Set("redirect_uri", r.RedirectURI) + params.Set("code_verifier", r.CodeVerifier) + return params.Encode() +} + +// ToFormData converts RefreshTokenRequest to URL-encoded form data +func (r *RefreshTokenRequest) ToFormData() string { + params := url.Values{} + params.Set("grant_type", r.GrantType) + params.Set("client_id", r.ClientID) + params.Set("refresh_token", r.RefreshToken) + params.Set("scope", r.Scope) + return params.Encode() +} + +// ParseIDToken parses the ID Token JWT and extracts claims +// Note: This does NOT verify the signature - it only decodes the payload +// For production, you should verify the token signature using OpenAI's public keys +func ParseIDToken(idToken string) (*IDTokenClaims, error) { + parts := strings.Split(idToken, ".") + if len(parts) != 3 { + return nil, fmt.Errorf("invalid JWT format: expected 3 parts, got %d", len(parts)) + } + + // Decode payload (second part) + payload := parts[1] + // Add padding if necessary + switch len(payload) % 4 { + case 2: + payload += "==" + case 3: + payload += "=" + } + + decoded, err := base64.URLEncoding.DecodeString(payload) + if err != nil { + // Try standard encoding + decoded, err = base64.StdEncoding.DecodeString(payload) + if err != nil { + return nil, fmt.Errorf("failed to decode JWT payload: %w", err) + } + } + + var claims IDTokenClaims + if err := json.Unmarshal(decoded, &claims); err != nil { + return nil, fmt.Errorf("failed to parse JWT claims: %w", err) + } + + return &claims, nil +} + +// UserInfo represents user information extracted from ID Token claims. +type UserInfo struct { + Email string + ChatGPTAccountID string + ChatGPTUserID string + UserID string + OrganizationID string + Organizations []OrganizationClaim +} + +// GetUserInfo extracts user info from ID Token claims +func (c *IDTokenClaims) GetUserInfo() *UserInfo { + info := &UserInfo{ + Email: c.Email, + } + + if c.OpenAIAuth != nil { + info.ChatGPTAccountID = c.OpenAIAuth.ChatGPTAccountID + info.ChatGPTUserID = c.OpenAIAuth.ChatGPTUserID + info.UserID = c.OpenAIAuth.UserID + info.Organizations = c.OpenAIAuth.Organizations + + // Get default organization ID + for _, org := range c.OpenAIAuth.Organizations { + if org.IsDefault { + info.OrganizationID = org.ID + break + } + } + // If no default, use first org + if info.OrganizationID == "" && len(c.OpenAIAuth.Organizations) > 0 { + info.OrganizationID = c.OpenAIAuth.Organizations[0].ID + } + } + + return info +} diff --git a/backend/internal/pkg/openai/request.go b/backend/internal/pkg/openai/request.go new file mode 100644 index 00000000..5b049ddc --- /dev/null +++ b/backend/internal/pkg/openai/request.go @@ -0,0 +1,18 @@ +package openai + +// CodexCLIUserAgentPrefixes matches Codex CLI User-Agent patterns +// Examples: "codex_vscode/1.0.0", "codex_cli_rs/0.1.2" +var CodexCLIUserAgentPrefixes = []string{ + "codex_vscode/", + "codex_cli_rs/", +} + +// IsCodexCLIRequest checks if the User-Agent indicates a Codex CLI request +func IsCodexCLIRequest(userAgent string) bool { + for _, prefix := range CodexCLIUserAgentPrefixes { + if len(userAgent) >= len(prefix) && userAgent[:len(prefix)] == prefix { + return true + } + } + return false +} diff --git a/backend/internal/pkg/pagination/pagination.go b/backend/internal/pkg/pagination/pagination.go new file mode 100644 index 00000000..c162588a --- /dev/null +++ b/backend/internal/pkg/pagination/pagination.go @@ -0,0 +1,43 @@ +// Package pagination provides types and helpers for paginated responses. +package pagination + +// PaginationParams 分页参数 +type PaginationParams struct { + Page int + PageSize int +} + +// PaginationResult 分页结果 +type PaginationResult struct { + Total int64 + Page int + PageSize int + Pages int +} + +// DefaultPagination 默认分页参数 +func DefaultPagination() PaginationParams { + return PaginationParams{ + Page: 1, + PageSize: 20, + } +} + +// Offset 计算偏移量 +func (p PaginationParams) Offset() int { + if p.Page < 1 { + p.Page = 1 + } + return (p.Page - 1) * p.PageSize +} + +// Limit 获取限制数 +func (p PaginationParams) Limit() int { + if p.PageSize < 1 { + return 20 + } + if p.PageSize > 100 { + return 100 + } + return p.PageSize +} diff --git a/backend/internal/pkg/proxyutil/dialer.go b/backend/internal/pkg/proxyutil/dialer.go new file mode 100644 index 00000000..91b224a2 --- /dev/null +++ b/backend/internal/pkg/proxyutil/dialer.go @@ -0,0 +1,62 @@ +// Package proxyutil 提供统一的代理配置功能 +// +// 支持的代理协议: +// - HTTP/HTTPS: 通过 Transport.Proxy 设置 +// - SOCKS5/SOCKS5H: 通过 Transport.DialContext 设置(服务端解析 DNS) +package proxyutil + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/proxy" +) + +// ConfigureTransportProxy 根据代理 URL 配置 Transport +// +// 支持的协议: +// - http/https: 设置 transport.Proxy +// - socks5/socks5h: 设置 transport.DialContext(由代理服务端解析 DNS) +// +// 参数: +// - transport: 需要配置的 http.Transport +// - proxyURL: 代理地址,nil 表示直连 +// +// 返回: +// - error: 代理配置错误(协议不支持或 dialer 创建失败) +func ConfigureTransportProxy(transport *http.Transport, proxyURL *url.URL) error { + if proxyURL == nil { + return nil + } + + scheme := strings.ToLower(proxyURL.Scheme) + switch scheme { + case "http", "https": + transport.Proxy = http.ProxyURL(proxyURL) + return nil + + case "socks5", "socks5h": + dialer, err := proxy.FromURL(proxyURL, proxy.Direct) + if err != nil { + return fmt.Errorf("create socks5 dialer: %w", err) + } + // 优先使用支持 context 的 DialContext,以支持请求取消和超时 + if contextDialer, ok := dialer.(proxy.ContextDialer); ok { + transport.DialContext = contextDialer.DialContext + } else { + // 回退路径:如果 dialer 不支持 ContextDialer,则包装为简单的 DialContext + // 注意:此回退不支持请求取消和超时控制 + transport.DialContext = func(_ context.Context, network, addr string) (net.Conn, error) { + return dialer.Dial(network, addr) + } + } + return nil + + default: + return fmt.Errorf("unsupported proxy scheme: %s", scheme) + } +} diff --git a/backend/internal/pkg/proxyutil/dialer_test.go b/backend/internal/pkg/proxyutil/dialer_test.go new file mode 100644 index 00000000..f153cc9f --- /dev/null +++ b/backend/internal/pkg/proxyutil/dialer_test.go @@ -0,0 +1,204 @@ +package proxyutil + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConfigureTransportProxy_Nil(t *testing.T) { + transport := &http.Transport{} + err := ConfigureTransportProxy(transport, nil) + + require.NoError(t, err) + assert.Nil(t, transport.Proxy, "nil proxy should not set Proxy") + assert.Nil(t, transport.DialContext, "nil proxy should not set DialContext") +} + +func TestConfigureTransportProxy_HTTP(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse("http://proxy.example.com:8080") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.NotNil(t, transport.Proxy, "HTTP proxy should set Proxy") + assert.Nil(t, transport.DialContext, "HTTP proxy should not set DialContext") +} + +func TestConfigureTransportProxy_HTTPS(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse("https://secure-proxy.example.com:8443") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.NotNil(t, transport.Proxy, "HTTPS proxy should set Proxy") + assert.Nil(t, transport.DialContext, "HTTPS proxy should not set DialContext") +} + +func TestConfigureTransportProxy_SOCKS5(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse("socks5://socks.example.com:1080") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.Nil(t, transport.Proxy, "SOCKS5 proxy should not set Proxy") + assert.NotNil(t, transport.DialContext, "SOCKS5 proxy should set DialContext") +} + +func TestConfigureTransportProxy_SOCKS5H(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse("socks5h://socks.example.com:1080") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.Nil(t, transport.Proxy, "SOCKS5H proxy should not set Proxy") + assert.NotNil(t, transport.DialContext, "SOCKS5H proxy should set DialContext") +} + +func TestConfigureTransportProxy_CaseInsensitive(t *testing.T) { + testCases := []struct { + scheme string + useProxy bool // true = uses Transport.Proxy, false = uses DialContext + }{ + {"HTTP://proxy.example.com:8080", true}, + {"Http://proxy.example.com:8080", true}, + {"HTTPS://proxy.example.com:8443", true}, + {"Https://proxy.example.com:8443", true}, + {"SOCKS5://socks.example.com:1080", false}, + {"Socks5://socks.example.com:1080", false}, + {"SOCKS5H://socks.example.com:1080", false}, + {"Socks5h://socks.example.com:1080", false}, + } + + for _, tc := range testCases { + t.Run(tc.scheme, func(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse(tc.scheme) + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + if tc.useProxy { + assert.NotNil(t, transport.Proxy) + assert.Nil(t, transport.DialContext) + } else { + assert.Nil(t, transport.Proxy) + assert.NotNil(t, transport.DialContext) + } + }) + } +} + +func TestConfigureTransportProxy_Unsupported(t *testing.T) { + testCases := []string{ + "ftp://ftp.example.com", + "file:///path/to/file", + "unknown://example.com", + } + + for _, tc := range testCases { + t.Run(tc, func(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse(tc) + + err := ConfigureTransportProxy(transport, proxyURL) + + require.Error(t, err) + assert.Contains(t, err.Error(), "unsupported proxy scheme") + }) + } +} + +func TestConfigureTransportProxy_WithAuth(t *testing.T) { + transport := &http.Transport{} + proxyURL, _ := url.Parse("socks5://user:password@socks.example.com:1080") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.NotNil(t, transport.DialContext, "SOCKS5 with auth should set DialContext") +} + +func TestConfigureTransportProxy_EmptyScheme(t *testing.T) { + transport := &http.Transport{} + // 空 scheme 的 URL + proxyURL := &url.URL{Host: "proxy.example.com:8080"} + + err := ConfigureTransportProxy(transport, proxyURL) + + require.Error(t, err) + assert.Contains(t, err.Error(), "unsupported proxy scheme") +} + +func TestConfigureTransportProxy_PreservesExistingConfig(t *testing.T) { + // 验证代理配置不会覆盖 Transport 的其他配置 + transport := &http.Transport{ + MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, + } + proxyURL, _ := url.Parse("socks5://socks.example.com:1080") + + err := ConfigureTransportProxy(transport, proxyURL) + + require.NoError(t, err) + assert.Equal(t, 100, transport.MaxIdleConns, "MaxIdleConns should be preserved") + assert.Equal(t, 10, transport.MaxIdleConnsPerHost, "MaxIdleConnsPerHost should be preserved") + assert.NotNil(t, transport.DialContext, "DialContext should be set") +} + +func TestConfigureTransportProxy_IPv6(t *testing.T) { + testCases := []struct { + name string + proxyURL string + }{ + {"SOCKS5H with IPv6 loopback", "socks5h://[::1]:1080"}, + {"SOCKS5 with full IPv6", "socks5://[2001:db8::1]:1080"}, + {"HTTP with IPv6", "http://[::1]:8080"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + transport := &http.Transport{} + proxyURL, err := url.Parse(tc.proxyURL) + require.NoError(t, err, "URL should be parseable") + + err = ConfigureTransportProxy(transport, proxyURL) + require.NoError(t, err) + }) + } +} + +func TestConfigureTransportProxy_SpecialCharsInPassword(t *testing.T) { + testCases := []struct { + name string + proxyURL string + }{ + // 密码包含 @ 符号(URL 编码为 %40) + {"password with @", "socks5://user:p%40ssword@proxy.example.com:1080"}, + // 密码包含 : 符号(URL 编码为 %3A) + {"password with :", "socks5://user:pass%3Aword@proxy.example.com:1080"}, + // 密码包含 / 符号(URL 编码为 %2F) + {"password with /", "socks5://user:pass%2Fword@proxy.example.com:1080"}, + // 复杂密码 + {"complex password", "socks5h://admin:P%40ss%3Aw0rd%2F123@proxy.example.com:1080"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + transport := &http.Transport{} + proxyURL, err := url.Parse(tc.proxyURL) + require.NoError(t, err, "URL should be parseable") + + err = ConfigureTransportProxy(transport, proxyURL) + require.NoError(t, err) + assert.NotNil(t, transport.DialContext, "SOCKS5 should set DialContext") + }) + } +} diff --git a/backend/internal/pkg/response/response.go b/backend/internal/pkg/response/response.go new file mode 100644 index 00000000..a92ff9e8 --- /dev/null +++ b/backend/internal/pkg/response/response.go @@ -0,0 +1,186 @@ +// Package response provides standardized HTTP response helpers. +package response + +import ( + "math" + "net/http" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/gin-gonic/gin" +) + +// Response 标准API响应格式 +type Response struct { + Code int `json:"code"` + Message string `json:"message"` + Reason string `json:"reason,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + Data any `json:"data,omitempty"` +} + +// PaginatedData 分页数据格式(匹配前端期望) +type PaginatedData struct { + Items any `json:"items"` + Total int64 `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` + Pages int `json:"pages"` +} + +// Success 返回成功响应 +func Success(c *gin.Context, data any) { + c.JSON(http.StatusOK, Response{ + Code: 0, + Message: "success", + Data: data, + }) +} + +// Created 返回创建成功响应 +func Created(c *gin.Context, data any) { + c.JSON(http.StatusCreated, Response{ + Code: 0, + Message: "success", + Data: data, + }) +} + +// Error 返回错误响应 +func Error(c *gin.Context, statusCode int, message string) { + c.JSON(statusCode, Response{ + Code: statusCode, + Message: message, + Reason: "", + Metadata: nil, + }) +} + +// ErrorWithDetails returns an error response compatible with the existing envelope while +// optionally providing structured error fields (reason/metadata). +func ErrorWithDetails(c *gin.Context, statusCode int, message, reason string, metadata map[string]string) { + c.JSON(statusCode, Response{ + Code: statusCode, + Message: message, + Reason: reason, + Metadata: metadata, + }) +} + +// ErrorFrom converts an ApplicationError (or any error) into the envelope-compatible error response. +// It returns true if an error was written. +func ErrorFrom(c *gin.Context, err error) bool { + if err == nil { + return false + } + + statusCode, status := infraerrors.ToHTTP(err) + ErrorWithDetails(c, statusCode, status.Message, status.Reason, status.Metadata) + return true +} + +// BadRequest 返回400错误 +func BadRequest(c *gin.Context, message string) { + Error(c, http.StatusBadRequest, message) +} + +// Unauthorized 返回401错误 +func Unauthorized(c *gin.Context, message string) { + Error(c, http.StatusUnauthorized, message) +} + +// Forbidden 返回403错误 +func Forbidden(c *gin.Context, message string) { + Error(c, http.StatusForbidden, message) +} + +// NotFound 返回404错误 +func NotFound(c *gin.Context, message string) { + Error(c, http.StatusNotFound, message) +} + +// InternalError 返回500错误 +func InternalError(c *gin.Context, message string) { + Error(c, http.StatusInternalServerError, message) +} + +// Paginated 返回分页数据 +func Paginated(c *gin.Context, items any, total int64, page, pageSize int) { + pages := int(math.Ceil(float64(total) / float64(pageSize))) + if pages < 1 { + pages = 1 + } + + Success(c, PaginatedData{ + Items: items, + Total: total, + Page: page, + PageSize: pageSize, + Pages: pages, + }) +} + +// PaginationResult 分页结果(与pagination.PaginationResult兼容) +type PaginationResult struct { + Total int64 + Page int + PageSize int + Pages int +} + +// PaginatedWithResult 使用PaginationResult返回分页数据 +func PaginatedWithResult(c *gin.Context, items any, pagination *PaginationResult) { + if pagination == nil { + Success(c, PaginatedData{ + Items: items, + Total: 0, + Page: 1, + PageSize: 20, + Pages: 1, + }) + return + } + + Success(c, PaginatedData{ + Items: items, + Total: pagination.Total, + Page: pagination.Page, + PageSize: pagination.PageSize, + Pages: pagination.Pages, + }) +} + +// ParsePagination 解析分页参数 +func ParsePagination(c *gin.Context) (page, pageSize int) { + page = 1 + pageSize = 20 + + if p := c.Query("page"); p != "" { + if val, err := parseInt(p); err == nil && val > 0 { + page = val + } + } + + // 支持 page_size 和 limit 两种参数名 + if ps := c.Query("page_size"); ps != "" { + if val, err := parseInt(ps); err == nil && val > 0 && val <= 100 { + pageSize = val + } + } else if l := c.Query("limit"); l != "" { + if val, err := parseInt(l); err == nil && val > 0 && val <= 100 { + pageSize = val + } + } + + return page, pageSize +} + +func parseInt(s string) (int, error) { + var result int + for _, c := range s { + if c < '0' || c > '9' { + return 0, nil + } + result = result*10 + int(c-'0') + } + return result, nil +} diff --git a/backend/internal/pkg/response/response_test.go b/backend/internal/pkg/response/response_test.go new file mode 100644 index 00000000..ef31ca3c --- /dev/null +++ b/backend/internal/pkg/response/response_test.go @@ -0,0 +1,171 @@ +//go:build unit + +package response + +import ( + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + errors2 "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestErrorWithDetails(t *testing.T) { + gin.SetMode(gin.TestMode) + + tests := []struct { + name string + statusCode int + message string + reason string + metadata map[string]string + want Response + }{ + { + name: "plain_error", + statusCode: http.StatusBadRequest, + message: "invalid request", + want: Response{ + Code: http.StatusBadRequest, + Message: "invalid request", + }, + }, + { + name: "structured_error", + statusCode: http.StatusForbidden, + message: "no access", + reason: "FORBIDDEN", + metadata: map[string]string{"k": "v"}, + want: Response{ + Code: http.StatusForbidden, + Message: "no access", + Reason: "FORBIDDEN", + Metadata: map[string]string{"k": "v"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + ErrorWithDetails(c, tt.statusCode, tt.message, tt.reason, tt.metadata) + + require.Equal(t, tt.statusCode, w.Code) + + var got Response + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got)) + require.Equal(t, tt.want, got) + }) + } +} + +func TestErrorFrom(t *testing.T) { + gin.SetMode(gin.TestMode) + + tests := []struct { + name string + err error + wantWritten bool + wantHTTPCode int + wantBody Response + }{ + { + name: "nil_error", + err: nil, + wantWritten: false, + }, + { + name: "application_error", + err: errors2.Forbidden("FORBIDDEN", "no access").WithMetadata(map[string]string{"scope": "admin"}), + wantWritten: true, + wantHTTPCode: http.StatusForbidden, + wantBody: Response{ + Code: http.StatusForbidden, + Message: "no access", + Reason: "FORBIDDEN", + Metadata: map[string]string{"scope": "admin"}, + }, + }, + { + name: "bad_request_error", + err: errors2.BadRequest("INVALID_REQUEST", "invalid request"), + wantWritten: true, + wantHTTPCode: http.StatusBadRequest, + wantBody: Response{ + Code: http.StatusBadRequest, + Message: "invalid request", + Reason: "INVALID_REQUEST", + }, + }, + { + name: "unauthorized_error", + err: errors2.Unauthorized("UNAUTHORIZED", "unauthorized"), + wantWritten: true, + wantHTTPCode: http.StatusUnauthorized, + wantBody: Response{ + Code: http.StatusUnauthorized, + Message: "unauthorized", + Reason: "UNAUTHORIZED", + }, + }, + { + name: "not_found_error", + err: errors2.NotFound("NOT_FOUND", "not found"), + wantWritten: true, + wantHTTPCode: http.StatusNotFound, + wantBody: Response{ + Code: http.StatusNotFound, + Message: "not found", + Reason: "NOT_FOUND", + }, + }, + { + name: "conflict_error", + err: errors2.Conflict("CONFLICT", "conflict"), + wantWritten: true, + wantHTTPCode: http.StatusConflict, + wantBody: Response{ + Code: http.StatusConflict, + Message: "conflict", + Reason: "CONFLICT", + }, + }, + { + name: "unknown_error_defaults_to_500", + err: errors.New("boom"), + wantWritten: true, + wantHTTPCode: http.StatusInternalServerError, + wantBody: Response{ + Code: http.StatusInternalServerError, + Message: errors2.UnknownMessage, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + written := ErrorFrom(c, tt.err) + require.Equal(t, tt.wantWritten, written) + + if !tt.wantWritten { + require.Equal(t, 200, w.Code) + require.Empty(t, w.Body.String()) + return + } + + require.Equal(t, tt.wantHTTPCode, w.Code) + var got Response + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got)) + require.Equal(t, tt.wantBody, got) + }) + } +} diff --git a/backend/internal/pkg/sysutil/restart.go b/backend/internal/pkg/sysutil/restart.go new file mode 100644 index 00000000..2146596f --- /dev/null +++ b/backend/internal/pkg/sysutil/restart.go @@ -0,0 +1,48 @@ +// Package sysutil provides system-level utilities for process management. +package sysutil + +import ( + "log" + "os" + "runtime" + "time" +) + +// RestartService triggers a service restart by gracefully exiting. +// +// This relies on systemd's Restart=always configuration to automatically +// restart the service after it exits. This is the industry-standard approach: +// - Simple and reliable +// - No sudo permissions needed +// - No complex process management +// - Leverages systemd's native restart capability +// +// Prerequisites: +// - Linux OS with systemd +// - Service configured with Restart=always in systemd unit file +func RestartService() error { + if runtime.GOOS != "linux" { + log.Println("Service restart via exit only works on Linux with systemd") + return nil + } + + log.Println("Initiating service restart by graceful exit...") + log.Println("systemd will automatically restart the service (Restart=always)") + + // Give a moment for logs to flush and response to be sent + go func() { + time.Sleep(100 * time.Millisecond) + os.Exit(0) + }() + + return nil +} + +// RestartServiceAsync is a fire-and-forget version of RestartService. +// It logs errors instead of returning them, suitable for goroutine usage. +func RestartServiceAsync() { + if err := RestartService(); err != nil { + log.Printf("Service restart failed: %v", err) + log.Println("Please restart the service manually: sudo systemctl restart sub2api") + } +} diff --git a/backend/internal/pkg/timezone/timezone.go b/backend/internal/pkg/timezone/timezone.go new file mode 100644 index 00000000..40f6e38f --- /dev/null +++ b/backend/internal/pkg/timezone/timezone.go @@ -0,0 +1,161 @@ +// Package timezone provides global timezone management for the application. +// Similar to PHP's date_default_timezone_set, this package allows setting +// a global timezone that affects all time.Now() calls. +package timezone + +import ( + "fmt" + "log" + "time" +) + +var ( + // location is the global timezone location + location *time.Location + // tzName stores the timezone name for logging/debugging + tzName string +) + +// Init initializes the global timezone setting. +// This should be called once at application startup. +// Example timezone values: "Asia/Shanghai", "America/New_York", "UTC" +func Init(tz string) error { + if tz == "" { + tz = "Asia/Shanghai" // Default timezone + } + + loc, err := time.LoadLocation(tz) + if err != nil { + return fmt.Errorf("invalid timezone %q: %w", tz, err) + } + + // Set the global Go time.Local to our timezone + // This affects time.Now() throughout the application + time.Local = loc + location = loc + tzName = tz + + log.Printf("Timezone initialized: %s (UTC offset: %s)", tz, getUTCOffset(loc)) + return nil +} + +// getUTCOffset returns the current UTC offset for a location +func getUTCOffset(loc *time.Location) string { + _, offset := time.Now().In(loc).Zone() + hours := offset / 3600 + minutes := (offset % 3600) / 60 + if minutes < 0 { + minutes = -minutes + } + sign := "+" + if hours < 0 { + sign = "-" + hours = -hours + } + return fmt.Sprintf("%s%02d:%02d", sign, hours, minutes) +} + +// Now returns the current time in the configured timezone. +// This is equivalent to time.Now() after Init() is called, +// but provided for explicit timezone-aware code. +func Now() time.Time { + if location == nil { + return time.Now() + } + return time.Now().In(location) +} + +// Location returns the configured timezone location. +func Location() *time.Location { + if location == nil { + return time.Local + } + return location +} + +// Name returns the configured timezone name. +func Name() string { + if tzName == "" { + return "Local" + } + return tzName +} + +// StartOfDay returns the start of the given day (00:00:00) in the configured timezone. +func StartOfDay(t time.Time) time.Time { + loc := Location() + t = t.In(loc) + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc) +} + +// Today returns the start of today (00:00:00) in the configured timezone. +func Today() time.Time { + return StartOfDay(Now()) +} + +// EndOfDay returns the end of the given day (23:59:59.999999999) in the configured timezone. +func EndOfDay(t time.Time) time.Time { + loc := Location() + t = t.In(loc) + return time.Date(t.Year(), t.Month(), t.Day(), 23, 59, 59, 999999999, loc) +} + +// StartOfWeek returns the start of the week (Monday 00:00:00) for the given time. +func StartOfWeek(t time.Time) time.Time { + loc := Location() + t = t.In(loc) + weekday := int(t.Weekday()) + if weekday == 0 { + weekday = 7 // Sunday is day 7 + } + return time.Date(t.Year(), t.Month(), t.Day()-weekday+1, 0, 0, 0, 0, loc) +} + +// StartOfMonth returns the start of the month (1st day 00:00:00) for the given time. +func StartOfMonth(t time.Time) time.Time { + loc := Location() + t = t.In(loc) + return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, loc) +} + +// ParseInLocation parses a time string in the configured timezone. +func ParseInLocation(layout, value string) (time.Time, error) { + return time.ParseInLocation(layout, value, Location()) +} + +// ParseInUserLocation parses a time string in the user's timezone. +// If userTZ is empty or invalid, falls back to the configured server timezone. +func ParseInUserLocation(layout, value, userTZ string) (time.Time, error) { + loc := Location() // default to server timezone + if userTZ != "" { + if userLoc, err := time.LoadLocation(userTZ); err == nil { + loc = userLoc + } + } + return time.ParseInLocation(layout, value, loc) +} + +// NowInUserLocation returns the current time in the user's timezone. +// If userTZ is empty or invalid, falls back to the configured server timezone. +func NowInUserLocation(userTZ string) time.Time { + if userTZ == "" { + return Now() + } + if userLoc, err := time.LoadLocation(userTZ); err == nil { + return time.Now().In(userLoc) + } + return Now() +} + +// StartOfDayInUserLocation returns the start of the given day in the user's timezone. +// If userTZ is empty or invalid, falls back to the configured server timezone. +func StartOfDayInUserLocation(t time.Time, userTZ string) time.Time { + loc := Location() + if userTZ != "" { + if userLoc, err := time.LoadLocation(userTZ); err == nil { + loc = userLoc + } + } + t = t.In(loc) + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, loc) +} diff --git a/backend/internal/pkg/timezone/timezone_test.go b/backend/internal/pkg/timezone/timezone_test.go new file mode 100644 index 00000000..ac9cdde6 --- /dev/null +++ b/backend/internal/pkg/timezone/timezone_test.go @@ -0,0 +1,137 @@ +package timezone + +import ( + "testing" + "time" +) + +func TestInit(t *testing.T) { + // Test with valid timezone + err := Init("Asia/Shanghai") + if err != nil { + t.Fatalf("Init failed with valid timezone: %v", err) + } + + // Verify time.Local was set + if time.Local.String() != "Asia/Shanghai" { + t.Errorf("time.Local not set correctly, got %s", time.Local.String()) + } + + // Verify our location variable + if Location().String() != "Asia/Shanghai" { + t.Errorf("Location() not set correctly, got %s", Location().String()) + } + + // Test Name() + if Name() != "Asia/Shanghai" { + t.Errorf("Name() not set correctly, got %s", Name()) + } +} + +func TestInitInvalidTimezone(t *testing.T) { + err := Init("Invalid/Timezone") + if err == nil { + t.Error("Init should fail with invalid timezone") + } +} + +func TestTimeNowAffected(t *testing.T) { + // Reset to UTC first + if err := Init("UTC"); err != nil { + t.Fatalf("Init failed with UTC: %v", err) + } + utcNow := time.Now() + + // Switch to Shanghai (UTC+8) + if err := Init("Asia/Shanghai"); err != nil { + t.Fatalf("Init failed with Asia/Shanghai: %v", err) + } + shanghaiNow := time.Now() + + // The times should be the same instant, but different timezone representation + // Shanghai should be 8 hours ahead in display + _, utcOffset := utcNow.Zone() + _, shanghaiOffset := shanghaiNow.Zone() + + expectedDiff := 8 * 3600 // 8 hours in seconds + actualDiff := shanghaiOffset - utcOffset + + if actualDiff != expectedDiff { + t.Errorf("Timezone offset difference incorrect: expected %d, got %d", expectedDiff, actualDiff) + } +} + +func TestToday(t *testing.T) { + if err := Init("Asia/Shanghai"); err != nil { + t.Fatalf("Init failed with Asia/Shanghai: %v", err) + } + + today := Today() + now := Now() + + // Today should be at 00:00:00 + if today.Hour() != 0 || today.Minute() != 0 || today.Second() != 0 { + t.Errorf("Today() not at start of day: %v", today) + } + + // Today should be same date as now + if today.Year() != now.Year() || today.Month() != now.Month() || today.Day() != now.Day() { + t.Errorf("Today() date mismatch: today=%v, now=%v", today, now) + } +} + +func TestStartOfDay(t *testing.T) { + if err := Init("Asia/Shanghai"); err != nil { + t.Fatalf("Init failed with Asia/Shanghai: %v", err) + } + + // Create a time at 15:30:45 + testTime := time.Date(2024, 6, 15, 15, 30, 45, 123456789, Location()) + startOfDay := StartOfDay(testTime) + + expected := time.Date(2024, 6, 15, 0, 0, 0, 0, Location()) + if !startOfDay.Equal(expected) { + t.Errorf("StartOfDay incorrect: expected %v, got %v", expected, startOfDay) + } +} + +func TestTruncateVsStartOfDay(t *testing.T) { + // This test demonstrates why Truncate(24*time.Hour) can be problematic + // and why StartOfDay is more reliable for timezone-aware code + + if err := Init("Asia/Shanghai"); err != nil { + t.Fatalf("Init failed with Asia/Shanghai: %v", err) + } + + now := Now() + + // Truncate operates on UTC, not local time + truncated := now.Truncate(24 * time.Hour) + + // StartOfDay operates on local time + startOfDay := StartOfDay(now) + + // These will likely be different for non-UTC timezones + t.Logf("Now: %v", now) + t.Logf("Truncate(24h): %v", truncated) + t.Logf("StartOfDay: %v", startOfDay) + + // The truncated time may not be at local midnight + // StartOfDay is always at local midnight + if startOfDay.Hour() != 0 { + t.Errorf("StartOfDay should be at hour 0, got %d", startOfDay.Hour()) + } +} + +func TestDSTAwareness(t *testing.T) { + // Test with a timezone that has DST (America/New_York) + err := Init("America/New_York") + if err != nil { + t.Skipf("America/New_York timezone not available: %v", err) + } + + // Just verify it doesn't crash + _ = Today() + _ = Now() + _ = StartOfDay(Now()) +} diff --git a/backend/internal/pkg/usagestats/account_stats.go b/backend/internal/pkg/usagestats/account_stats.go new file mode 100644 index 00000000..9ac49625 --- /dev/null +++ b/backend/internal/pkg/usagestats/account_stats.go @@ -0,0 +1,14 @@ +package usagestats + +// AccountStats 账号使用统计 +// +// cost: 账号口径费用(使用 total_cost * account_rate_multiplier) +// standard_cost: 标准费用(使用 total_cost,不含倍率) +// user_cost: 用户/API Key 口径费用(使用 actual_cost,受分组倍率影响) +type AccountStats struct { + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` + StandardCost float64 `json:"standard_cost"` + UserCost float64 `json:"user_cost"` +} diff --git a/backend/internal/pkg/usagestats/usage_log_types.go b/backend/internal/pkg/usagestats/usage_log_types.go new file mode 100644 index 00000000..2f6c7fe0 --- /dev/null +++ b/backend/internal/pkg/usagestats/usage_log_types.go @@ -0,0 +1,228 @@ +// Package usagestats provides types for usage statistics and reporting. +package usagestats + +import "time" + +// DashboardStats 仪表盘统计 +type DashboardStats struct { + // 用户统计 + TotalUsers int64 `json:"total_users"` + TodayNewUsers int64 `json:"today_new_users"` // 今日新增用户数 + ActiveUsers int64 `json:"active_users"` // 今日有请求的用户数 + // 小时活跃用户数(UTC 当前小时) + HourlyActiveUsers int64 `json:"hourly_active_users"` + + // 预聚合新鲜度 + StatsUpdatedAt string `json:"stats_updated_at"` + StatsStale bool `json:"stats_stale"` + + // API Key 统计 + TotalAPIKeys int64 `json:"total_api_keys"` + ActiveAPIKeys int64 `json:"active_api_keys"` // 状态为 active 的 API Key 数 + + // 账户统计 + TotalAccounts int64 `json:"total_accounts"` + NormalAccounts int64 `json:"normal_accounts"` // 正常账户数 (schedulable=true, status=active) + ErrorAccounts int64 `json:"error_accounts"` // 异常账户数 (status=error) + RateLimitAccounts int64 `json:"ratelimit_accounts"` // 限流账户数 + OverloadAccounts int64 `json:"overload_accounts"` // 过载账户数 + + // 累计 Token 使用统计 + TotalRequests int64 `json:"total_requests"` + TotalInputTokens int64 `json:"total_input_tokens"` + TotalOutputTokens int64 `json:"total_output_tokens"` + TotalCacheCreationTokens int64 `json:"total_cache_creation_tokens"` + TotalCacheReadTokens int64 `json:"total_cache_read_tokens"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` // 累计标准计费 + TotalActualCost float64 `json:"total_actual_cost"` // 累计实际扣除 + + // 今日 Token 使用统计 + TodayRequests int64 `json:"today_requests"` + TodayInputTokens int64 `json:"today_input_tokens"` + TodayOutputTokens int64 `json:"today_output_tokens"` + TodayCacheCreationTokens int64 `json:"today_cache_creation_tokens"` + TodayCacheReadTokens int64 `json:"today_cache_read_tokens"` + TodayTokens int64 `json:"today_tokens"` + TodayCost float64 `json:"today_cost"` // 今日标准计费 + TodayActualCost float64 `json:"today_actual_cost"` // 今日实际扣除 + + // 系统运行统计 + AverageDurationMs float64 `json:"average_duration_ms"` // 平均响应时间 + + // 性能指标 + Rpm int64 `json:"rpm"` // 近5分钟平均每分钟请求数 + Tpm int64 `json:"tpm"` // 近5分钟平均每分钟Token数 +} + +// TrendDataPoint represents a single point in trend data +type TrendDataPoint struct { + Date string `json:"date"` + Requests int64 `json:"requests"` + InputTokens int64 `json:"input_tokens"` + OutputTokens int64 `json:"output_tokens"` + CacheTokens int64 `json:"cache_tokens"` + TotalTokens int64 `json:"total_tokens"` + Cost float64 `json:"cost"` // 标准计费 + ActualCost float64 `json:"actual_cost"` // 实际扣除 +} + +// ModelStat represents usage statistics for a single model +type ModelStat struct { + Model string `json:"model"` + Requests int64 `json:"requests"` + InputTokens int64 `json:"input_tokens"` + OutputTokens int64 `json:"output_tokens"` + TotalTokens int64 `json:"total_tokens"` + Cost float64 `json:"cost"` // 标准计费 + ActualCost float64 `json:"actual_cost"` // 实际扣除 +} + +// UserUsageTrendPoint represents user usage trend data point +type UserUsageTrendPoint struct { + Date string `json:"date"` + UserID int64 `json:"user_id"` + Email string `json:"email"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` // 标准计费 + ActualCost float64 `json:"actual_cost"` // 实际扣除 +} + +// APIKeyUsageTrendPoint represents API key usage trend data point +type APIKeyUsageTrendPoint struct { + Date string `json:"date"` + APIKeyID int64 `json:"api_key_id"` + KeyName string `json:"key_name"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` +} + +// UserDashboardStats 用户仪表盘统计 +type UserDashboardStats struct { + // API Key 统计 + TotalAPIKeys int64 `json:"total_api_keys"` + ActiveAPIKeys int64 `json:"active_api_keys"` + + // 累计 Token 使用统计 + TotalRequests int64 `json:"total_requests"` + TotalInputTokens int64 `json:"total_input_tokens"` + TotalOutputTokens int64 `json:"total_output_tokens"` + TotalCacheCreationTokens int64 `json:"total_cache_creation_tokens"` + TotalCacheReadTokens int64 `json:"total_cache_read_tokens"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` // 累计标准计费 + TotalActualCost float64 `json:"total_actual_cost"` // 累计实际扣除 + + // 今日 Token 使用统计 + TodayRequests int64 `json:"today_requests"` + TodayInputTokens int64 `json:"today_input_tokens"` + TodayOutputTokens int64 `json:"today_output_tokens"` + TodayCacheCreationTokens int64 `json:"today_cache_creation_tokens"` + TodayCacheReadTokens int64 `json:"today_cache_read_tokens"` + TodayTokens int64 `json:"today_tokens"` + TodayCost float64 `json:"today_cost"` // 今日标准计费 + TodayActualCost float64 `json:"today_actual_cost"` // 今日实际扣除 + + // 性能统计 + AverageDurationMs float64 `json:"average_duration_ms"` + + // 性能指标 + Rpm int64 `json:"rpm"` // 近5分钟平均每分钟请求数 + Tpm int64 `json:"tpm"` // 近5分钟平均每分钟Token数 +} + +// UsageLogFilters represents filters for usage log queries +type UsageLogFilters struct { + UserID int64 + APIKeyID int64 + AccountID int64 + GroupID int64 + Model string + Stream *bool + BillingType *int8 + StartTime *time.Time + EndTime *time.Time +} + +// UsageStats represents usage statistics +type UsageStats struct { + TotalRequests int64 `json:"total_requests"` + TotalInputTokens int64 `json:"total_input_tokens"` + TotalOutputTokens int64 `json:"total_output_tokens"` + TotalCacheTokens int64 `json:"total_cache_tokens"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` + TotalActualCost float64 `json:"total_actual_cost"` + TotalAccountCost *float64 `json:"total_account_cost,omitempty"` + AverageDurationMs float64 `json:"average_duration_ms"` +} + +// BatchUserUsageStats represents usage stats for a single user +type BatchUserUsageStats struct { + UserID int64 `json:"user_id"` + TodayActualCost float64 `json:"today_actual_cost"` + TotalActualCost float64 `json:"total_actual_cost"` +} + +// BatchAPIKeyUsageStats represents usage stats for a single API key +type BatchAPIKeyUsageStats struct { + APIKeyID int64 `json:"api_key_id"` + TodayActualCost float64 `json:"today_actual_cost"` + TotalActualCost float64 `json:"total_actual_cost"` +} + +// AccountUsageHistory represents daily usage history for an account +type AccountUsageHistory struct { + Date string `json:"date"` + Label string `json:"label"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` // 标准计费(total_cost) + ActualCost float64 `json:"actual_cost"` // 账号口径费用(total_cost * account_rate_multiplier) + UserCost float64 `json:"user_cost"` // 用户口径费用(actual_cost,受分组倍率影响) +} + +// AccountUsageSummary represents summary statistics for an account +type AccountUsageSummary struct { + Days int `json:"days"` + ActualDaysUsed int `json:"actual_days_used"` + TotalCost float64 `json:"total_cost"` // 账号口径费用 + TotalUserCost float64 `json:"total_user_cost"` // 用户口径费用 + TotalStandardCost float64 `json:"total_standard_cost"` + TotalRequests int64 `json:"total_requests"` + TotalTokens int64 `json:"total_tokens"` + AvgDailyCost float64 `json:"avg_daily_cost"` // 账号口径日均 + AvgDailyUserCost float64 `json:"avg_daily_user_cost"` + AvgDailyRequests float64 `json:"avg_daily_requests"` + AvgDailyTokens float64 `json:"avg_daily_tokens"` + AvgDurationMs float64 `json:"avg_duration_ms"` + Today *struct { + Date string `json:"date"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + } `json:"today"` + HighestCostDay *struct { + Date string `json:"date"` + Label string `json:"label"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + Requests int64 `json:"requests"` + } `json:"highest_cost_day"` + HighestRequestDay *struct { + Date string `json:"date"` + Label string `json:"label"` + Requests int64 `json:"requests"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + } `json:"highest_request_day"` +} + +// AccountUsageStatsResponse represents the full usage statistics response for an account +type AccountUsageStatsResponse struct { + History []AccountUsageHistory `json:"history"` + Summary AccountUsageSummary `json:"summary"` + Models []ModelStat `json:"models"` +} diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go new file mode 100644 index 00000000..ba8751df --- /dev/null +++ b/backend/internal/repository/account_repo.go @@ -0,0 +1,1425 @@ +// Package repository 实现数据访问层(Repository Pattern)。 +// +// 该包提供了与数据库交互的所有操作,包括 CRUD、复杂查询和批量操作。 +// 采用 Repository 模式将数据访问逻辑与业务逻辑分离,便于测试和维护。 +// +// 主要特性: +// - 使用 Ent ORM 进行类型安全的数据库操作 +// - 对于复杂查询(如批量更新、聚合统计)使用原生 SQL +// - 提供统一的错误翻译机制,将数据库错误转换为业务错误 +// - 支持软删除,所有查询自动过滤已删除记录 +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "log" + "strconv" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + dbaccount "github.com/Wei-Shaw/sub2api/ent/account" + dbaccountgroup "github.com/Wei-Shaw/sub2api/ent/accountgroup" + dbgroup "github.com/Wei-Shaw/sub2api/ent/group" + dbpredicate "github.com/Wei-Shaw/sub2api/ent/predicate" + dbproxy "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" + + entsql "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqljson" +) + +// accountRepository 实现 service.AccountRepository 接口。 +// 提供 AI API 账户的完整数据访问功能。 +// +// 设计说明: +// - client: Ent 客户端,用于类型安全的 ORM 操作 +// - sql: 原生 SQL 执行器,用于复杂查询和批量操作 +type accountRepository struct { + client *dbent.Client // Ent ORM 客户端 + sql sqlExecutor // 原生 SQL 执行接口 +} + +type tempUnschedSnapshot struct { + until *time.Time + reason string +} + +// NewAccountRepository 创建账户仓储实例。 +// 这是对外暴露的构造函数,返回接口类型以便于依赖注入。 +func NewAccountRepository(client *dbent.Client, sqlDB *sql.DB) service.AccountRepository { + return newAccountRepositoryWithSQL(client, sqlDB) +} + +// newAccountRepositoryWithSQL 是内部构造函数,支持依赖注入 SQL 执行器。 +// 这种设计便于单元测试时注入 mock 对象。 +func newAccountRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *accountRepository { + return &accountRepository{client: client, sql: sqlq} +} + +func (r *accountRepository) Create(ctx context.Context, account *service.Account) error { + if account == nil { + return service.ErrAccountNilInput + } + + builder := r.client.Account.Create(). + SetName(account.Name). + SetNillableNotes(account.Notes). + SetPlatform(account.Platform). + SetType(account.Type). + SetCredentials(normalizeJSONMap(account.Credentials)). + SetExtra(normalizeJSONMap(account.Extra)). + SetConcurrency(account.Concurrency). + SetPriority(account.Priority). + SetStatus(account.Status). + SetErrorMessage(account.ErrorMessage). + SetSchedulable(account.Schedulable). + SetAutoPauseOnExpired(account.AutoPauseOnExpired) + + if account.RateMultiplier != nil { + builder.SetRateMultiplier(*account.RateMultiplier) + } + + if account.ProxyID != nil { + builder.SetProxyID(*account.ProxyID) + } + if account.LastUsedAt != nil { + builder.SetLastUsedAt(*account.LastUsedAt) + } + if account.ExpiresAt != nil { + builder.SetExpiresAt(*account.ExpiresAt) + } + if account.RateLimitedAt != nil { + builder.SetRateLimitedAt(*account.RateLimitedAt) + } + if account.RateLimitResetAt != nil { + builder.SetRateLimitResetAt(*account.RateLimitResetAt) + } + if account.OverloadUntil != nil { + builder.SetOverloadUntil(*account.OverloadUntil) + } + if account.SessionWindowStart != nil { + builder.SetSessionWindowStart(*account.SessionWindowStart) + } + if account.SessionWindowEnd != nil { + builder.SetSessionWindowEnd(*account.SessionWindowEnd) + } + if account.SessionWindowStatus != "" { + builder.SetSessionWindowStatus(account.SessionWindowStatus) + } + + created, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAccountNotFound, nil) + } + + account.ID = created.ID + account.CreatedAt = created.CreatedAt + account.UpdatedAt = created.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account create failed: account=%d err=%v", account.ID, err) + } + return nil +} + +func (r *accountRepository) GetByID(ctx context.Context, id int64) (*service.Account, error) { + m, err := r.client.Account.Query().Where(dbaccount.IDEQ(id)).Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAccountNotFound, nil) + } + + accounts, err := r.accountsToService(ctx, []*dbent.Account{m}) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, service.ErrAccountNotFound + } + return &accounts[0], nil +} + +func (r *accountRepository) GetByIDs(ctx context.Context, ids []int64) ([]*service.Account, error) { + if len(ids) == 0 { + return []*service.Account{}, nil + } + + // De-duplicate while preserving order of first occurrence. + uniqueIDs := make([]int64, 0, len(ids)) + seen := make(map[int64]struct{}, len(ids)) + for _, id := range ids { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + uniqueIDs = append(uniqueIDs, id) + } + if len(uniqueIDs) == 0 { + return []*service.Account{}, nil + } + + entAccounts, err := r.client.Account. + Query(). + Where(dbaccount.IDIn(uniqueIDs...)). + WithProxy(). + All(ctx) + if err != nil { + return nil, err + } + if len(entAccounts) == 0 { + return []*service.Account{}, nil + } + + accountIDs := make([]int64, 0, len(entAccounts)) + entByID := make(map[int64]*dbent.Account, len(entAccounts)) + for _, acc := range entAccounts { + entByID[acc.ID] = acc + accountIDs = append(accountIDs, acc.ID) + } + + tempUnschedMap, err := r.loadTempUnschedStates(ctx, accountIDs) + if err != nil { + return nil, err + } + + groupsByAccount, groupIDsByAccount, accountGroupsByAccount, err := r.loadAccountGroups(ctx, accountIDs) + if err != nil { + return nil, err + } + + outByID := make(map[int64]*service.Account, len(entAccounts)) + for _, entAcc := range entAccounts { + out := accountEntityToService(entAcc) + if out == nil { + continue + } + + // Prefer the preloaded proxy edge when available. + if entAcc.Edges.Proxy != nil { + out.Proxy = proxyEntityToService(entAcc.Edges.Proxy) + } + + if groups, ok := groupsByAccount[entAcc.ID]; ok { + out.Groups = groups + } + if groupIDs, ok := groupIDsByAccount[entAcc.ID]; ok { + out.GroupIDs = groupIDs + } + if ags, ok := accountGroupsByAccount[entAcc.ID]; ok { + out.AccountGroups = ags + } + if snap, ok := tempUnschedMap[entAcc.ID]; ok { + out.TempUnschedulableUntil = snap.until + out.TempUnschedulableReason = snap.reason + } + outByID[entAcc.ID] = out + } + + // Preserve input order (first occurrence), and ignore missing IDs. + out := make([]*service.Account, 0, len(uniqueIDs)) + for _, id := range uniqueIDs { + if _, ok := entByID[id]; !ok { + continue + } + if acc, ok := outByID[id]; ok && acc != nil { + out = append(out, acc) + } + } + + return out, nil +} + +// ExistsByID 检查指定 ID 的账号是否存在。 +// 相比 GetByID,此方法性能更优,因为: +// - 使用 Exist() 方法生成 SELECT EXISTS 查询,只返回布尔值 +// - 不加载完整的账号实体及其关联数据(Groups、Proxy 等) +// - 适用于删除前的存在性检查等只需判断有无的场景 +func (r *accountRepository) ExistsByID(ctx context.Context, id int64) (bool, error) { + exists, err := r.client.Account.Query().Where(dbaccount.IDEQ(id)).Exist(ctx) + if err != nil { + return false, err + } + return exists, nil +} + +func (r *accountRepository) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*service.Account, error) { + if crsAccountID == "" { + return nil, nil + } + + // 使用 sqljson.ValueEQ 生成 JSON 路径过滤,避免手写 SQL 片段导致语法兼容问题。 + m, err := r.client.Account.Query(). + Where(func(s *entsql.Selector) { + s.Where(sqljson.ValueEQ(dbaccount.FieldExtra, crsAccountID, sqljson.Path("crs_account_id"))) + }). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + accounts, err := r.accountsToService(ctx, []*dbent.Account{m}) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, nil + } + return &accounts[0], nil +} + +func (r *accountRepository) Update(ctx context.Context, account *service.Account) error { + if account == nil { + return nil + } + + builder := r.client.Account.UpdateOneID(account.ID). + SetName(account.Name). + SetNillableNotes(account.Notes). + SetPlatform(account.Platform). + SetType(account.Type). + SetCredentials(normalizeJSONMap(account.Credentials)). + SetExtra(normalizeJSONMap(account.Extra)). + SetConcurrency(account.Concurrency). + SetPriority(account.Priority). + SetStatus(account.Status). + SetErrorMessage(account.ErrorMessage). + SetSchedulable(account.Schedulable). + SetAutoPauseOnExpired(account.AutoPauseOnExpired) + + if account.RateMultiplier != nil { + builder.SetRateMultiplier(*account.RateMultiplier) + } + + if account.ProxyID != nil { + builder.SetProxyID(*account.ProxyID) + } else { + builder.ClearProxyID() + } + if account.LastUsedAt != nil { + builder.SetLastUsedAt(*account.LastUsedAt) + } else { + builder.ClearLastUsedAt() + } + if account.ExpiresAt != nil { + builder.SetExpiresAt(*account.ExpiresAt) + } else { + builder.ClearExpiresAt() + } + if account.RateLimitedAt != nil { + builder.SetRateLimitedAt(*account.RateLimitedAt) + } else { + builder.ClearRateLimitedAt() + } + if account.RateLimitResetAt != nil { + builder.SetRateLimitResetAt(*account.RateLimitResetAt) + } else { + builder.ClearRateLimitResetAt() + } + if account.OverloadUntil != nil { + builder.SetOverloadUntil(*account.OverloadUntil) + } else { + builder.ClearOverloadUntil() + } + if account.SessionWindowStart != nil { + builder.SetSessionWindowStart(*account.SessionWindowStart) + } else { + builder.ClearSessionWindowStart() + } + if account.SessionWindowEnd != nil { + builder.SetSessionWindowEnd(*account.SessionWindowEnd) + } else { + builder.ClearSessionWindowEnd() + } + if account.SessionWindowStatus != "" { + builder.SetSessionWindowStatus(account.SessionWindowStatus) + } else { + builder.ClearSessionWindowStatus() + } + if account.Notes == nil { + builder.ClearNotes() + } + + updated, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrAccountNotFound, nil) + } + account.UpdatedAt = updated.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account update failed: account=%d err=%v", account.ID, err) + } + return nil +} + +func (r *accountRepository) Delete(ctx context.Context, id int64) error { + groupIDs, err := r.loadAccountGroupIDs(ctx, id) + if err != nil { + return err + } + // 使用事务保证账号与关联分组的删除原子性 + tx, err := r.client.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return err + } + + var txClient *dbent.Client + if err == nil { + defer func() { _ = tx.Rollback() }() + txClient = tx.Client() + } else { + // 已处于外部事务中(ErrTxStarted),复用当前 client + txClient = r.client + } + + if _, err := txClient.AccountGroup.Delete().Where(dbaccountgroup.AccountIDEQ(id)).Exec(ctx); err != nil { + return err + } + if _, err := txClient.Account.Delete().Where(dbaccount.IDEQ(id)).Exec(ctx); err != nil { + return err + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return err + } + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, buildSchedulerGroupPayload(groupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account delete failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Account, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "", "", "") +} + +func (r *accountRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]service.Account, *pagination.PaginationResult, error) { + q := r.client.Account.Query() + + if platform != "" { + q = q.Where(dbaccount.PlatformEQ(platform)) + } + if accountType != "" { + q = q.Where(dbaccount.TypeEQ(accountType)) + } + if status != "" { + q = q.Where(dbaccount.StatusEQ(status)) + } + if search != "" { + q = q.Where(dbaccount.NameContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + accounts, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(dbaccount.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outAccounts, err := r.accountsToService(ctx, accounts) + if err != nil { + return nil, nil, err + } + return outAccounts, paginationResultFromTotal(int64(total), params), nil +} + +func (r *accountRepository) ListByGroup(ctx context.Context, groupID int64) ([]service.Account, error) { + accounts, err := r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + }) + if err != nil { + return nil, err + } + return accounts, nil +} + +func (r *accountRepository) ListActive(ctx context.Context) ([]service.Account, error) { + accounts, err := r.client.Account.Query(). + Where(dbaccount.StatusEQ(service.StatusActive)). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) ListByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + accounts, err := r.client.Account.Query(). + Where( + dbaccount.PlatformEQ(platform), + dbaccount.StatusEQ(service.StatusActive), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error { + now := time.Now() + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetLastUsedAt(now). + Save(ctx) + if err != nil { + return err + } + payload := map[string]any{ + "last_used": map[string]int64{ + strconv.FormatInt(id, 10): now.Unix(), + }, + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, &id, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue last used failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + if len(updates) == 0 { + return nil + } + + ids := make([]int64, 0, len(updates)) + args := make([]any, 0, len(updates)*2+1) + caseSQL := "UPDATE accounts SET last_used_at = CASE id" + + idx := 1 + for id, ts := range updates { + caseSQL += " WHEN $" + itoa(idx) + " THEN $" + itoa(idx+1) + "::timestamptz" + args = append(args, id, ts) + ids = append(ids, id) + idx += 2 + } + + caseSQL += " END, updated_at = NOW() WHERE id = ANY($" + itoa(idx) + ") AND deleted_at IS NULL" + args = append(args, pq.Array(ids)) + + _, err := r.sql.ExecContext(ctx, caseSQL, args...) + if err != nil { + return err + } + lastUsedPayload := make(map[string]int64, len(updates)) + for id, ts := range updates { + lastUsedPayload[strconv.FormatInt(id, 10)] = ts.Unix() + } + payload := map[string]any{"last_used": lastUsedPayload} + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, nil, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue batch last used failed: err=%v", err) + } + return nil +} + +func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg string) error { + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetStatus(service.StatusError). + SetErrorMessage(errorMsg). + Save(ctx) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue set error failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID int64, priority int) error { + _, err := r.client.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(priority). + Save(ctx) + if err != nil { + return err + } + payload := buildSchedulerGroupPayload([]int64{groupID}) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue add to group failed: account=%d group=%d err=%v", accountID, groupID, err) + } + return nil +} + +func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, groupID int64) error { + _, err := r.client.AccountGroup.Delete(). + Where( + dbaccountgroup.AccountIDEQ(accountID), + dbaccountgroup.GroupIDEQ(groupID), + ). + Exec(ctx) + if err != nil { + return err + } + payload := buildSchedulerGroupPayload([]int64{groupID}) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue remove from group failed: account=%d group=%d err=%v", accountID, groupID, err) + } + return nil +} + +func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]service.Group, error) { + groups, err := r.client.Group.Query(). + Where( + dbgroup.HasAccountsWith(dbaccount.IDEQ(accountID)), + ). + All(ctx) + if err != nil { + return nil, err + } + + outGroups := make([]service.Group, 0, len(groups)) + for i := range groups { + outGroups = append(outGroups, *groupEntityToService(groups[i])) + } + return outGroups, nil +} + +func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + existingGroupIDs, err := r.loadAccountGroupIDs(ctx, accountID) + if err != nil { + return err + } + // 使用事务保证删除旧绑定与创建新绑定的原子性 + tx, err := r.client.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return err + } + + var txClient *dbent.Client + if err == nil { + defer func() { _ = tx.Rollback() }() + txClient = tx.Client() + } else { + // 已处于外部事务中(ErrTxStarted),复用当前 client + txClient = r.client + } + + if _, err := txClient.AccountGroup.Delete().Where(dbaccountgroup.AccountIDEQ(accountID)).Exec(ctx); err != nil { + return err + } + + if len(groupIDs) == 0 { + if tx != nil { + return tx.Commit() + } + return nil + } + + builders := make([]*dbent.AccountGroupCreate, 0, len(groupIDs)) + for i, groupID := range groupIDs { + builders = append(builders, txClient.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(i+1), + ) + } + + if _, err := txClient.AccountGroup.CreateBulk(builders...).Save(ctx); err != nil { + return err + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return err + } + } + payload := buildSchedulerGroupPayload(mergeGroupIDs(existingGroupIDs, groupIDs)) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue bind groups failed: account=%d err=%v", accountID, err) + } + return nil +} + +func (r *accountRepository) ListSchedulable(ctx context.Context) ([]service.Account, error) { + now := time.Now() + accounts, err := r.client.Account.Query(). + Where( + dbaccount.StatusEQ(service.StatusActive), + dbaccount.SchedulableEQ(true), + tempUnschedulablePredicate(), + notExpiredPredicate(now), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]service.Account, error) { + return r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + schedulable: true, + }) +} + +func (r *accountRepository) ListSchedulableByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + now := time.Now() + accounts, err := r.client.Account.Query(). + Where( + dbaccount.PlatformEQ(platform), + dbaccount.StatusEQ(service.StatusActive), + dbaccount.SchedulableEQ(true), + tempUnschedulablePredicate(), + notExpiredPredicate(now), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]service.Account, error) { + // 单平台查询复用多平台逻辑,保持过滤条件与排序策略一致。 + return r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + schedulable: true, + platforms: []string{platform}, + }) +} + +func (r *accountRepository) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]service.Account, error) { + if len(platforms) == 0 { + return nil, nil + } + // 仅返回可调度的活跃账号,并过滤处于过载/限流窗口的账号。 + // 代理与分组信息统一在 accountsToService 中批量加载,避免 N+1 查询。 + now := time.Now() + accounts, err := r.client.Account.Query(). + Where( + dbaccount.PlatformIn(platforms...), + dbaccount.StatusEQ(service.StatusActive), + dbaccount.SchedulableEQ(true), + tempUnschedulablePredicate(), + notExpiredPredicate(now), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ). + Order(dbent.Asc(dbaccount.FieldPriority)). + All(ctx) + if err != nil { + return nil, err + } + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]service.Account, error) { + if len(platforms) == 0 { + return nil, nil + } + // 复用按分组查询逻辑,保证分组优先级 + 账号优先级的排序与筛选一致。 + return r.queryAccountsByGroup(ctx, groupID, accountGroupQueryOptions{ + status: service.StatusActive, + schedulable: true, + platforms: platforms, + }) +} + +func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + now := time.Now() + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetRateLimitedAt(now). + SetRateLimitResetAt(resetAt). + Save(ctx) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue rate limit failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { + now := time.Now().UTC() + payload := map[string]string{ + "rate_limited_at": now.Format(time.RFC3339), + "rate_limit_reset_at": resetAt.UTC().Format(time.RFC3339), + } + raw, err := json.Marshal(payload) + if err != nil { + return err + } + + path := "{antigravity_quota_scopes," + string(scope) + "}" + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL", + path, + raw, + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue quota scope failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetOverloadUntil(until). + Save(ctx) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue overload failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + _, err := r.sql.ExecContext(ctx, ` + UPDATE accounts + SET temp_unschedulable_until = $1, + temp_unschedulable_reason = $2, + updated_at = NOW() + WHERE id = $3 + AND deleted_at IS NULL + AND (temp_unschedulable_until IS NULL OR temp_unschedulable_until < $1) + `, until, reason, id) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue temp unschedulable failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64) error { + _, err := r.sql.ExecContext(ctx, ` + UPDATE accounts + SET temp_unschedulable_until = NULL, + temp_unschedulable_reason = NULL, + updated_at = NOW() + WHERE id = $1 + AND deleted_at IS NULL + `, id) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear temp unschedulable failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error { + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + ClearRateLimitedAt(). + ClearRateLimitResetAt(). + ClearOverloadUntil(). + Save(ctx) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear rate limit failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = COALESCE(extra, '{}'::jsonb) - 'antigravity_quota_scopes', updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL", + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear quota scopes failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + builder := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetSessionWindowStatus(status) + if start != nil { + builder.SetSessionWindowStart(*start) + } + if end != nil { + builder.SetSessionWindowEnd(*end) + } + _, err := builder.Save(ctx) + return err +} + +func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + _, err := r.client.Account.Update(). + Where(dbaccount.IDEQ(id)). + SetSchedulable(schedulable). + Save(ctx) + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue schedulable change failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + result, err := r.sql.ExecContext(ctx, ` + UPDATE accounts + SET schedulable = FALSE, + updated_at = NOW() + WHERE deleted_at IS NULL + AND schedulable = TRUE + AND auto_pause_on_expired = TRUE + AND expires_at IS NOT NULL + AND expires_at <= $1 + `, now) + if err != nil { + return 0, err + } + rows, err := result.RowsAffected() + if err != nil { + return 0, err + } + if rows > 0 { + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventFullRebuild, nil, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue auto pause rebuild failed: err=%v", err) + } + } + return rows, nil +} + +func (r *accountRepository) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + if len(updates) == 0 { + return nil + } + + // 使用 JSONB 合并操作实现原子更新,避免读-改-写的并发丢失更新问题 + payload, err := json.Marshal(updates) + if err != nil { + return err + } + + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = COALESCE(extra, '{}'::jsonb) || $1::jsonb, updated_at = NOW() WHERE id = $2 AND deleted_at IS NULL", + payload, id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue extra update failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates service.AccountBulkUpdate) (int64, error) { + if len(ids) == 0 { + return 0, nil + } + + setClauses := make([]string, 0, 8) + args := make([]any, 0, 8) + + idx := 1 + if updates.Name != nil { + setClauses = append(setClauses, "name = $"+itoa(idx)) + args = append(args, *updates.Name) + idx++ + } + if updates.ProxyID != nil { + // 0 表示清除代理(前端发送 0 而不是 null 来表达清除意图) + if *updates.ProxyID == 0 { + setClauses = append(setClauses, "proxy_id = NULL") + } else { + setClauses = append(setClauses, "proxy_id = $"+itoa(idx)) + args = append(args, *updates.ProxyID) + idx++ + } + } + if updates.Concurrency != nil { + setClauses = append(setClauses, "concurrency = $"+itoa(idx)) + args = append(args, *updates.Concurrency) + idx++ + } + if updates.Priority != nil { + setClauses = append(setClauses, "priority = $"+itoa(idx)) + args = append(args, *updates.Priority) + idx++ + } + if updates.RateMultiplier != nil { + setClauses = append(setClauses, "rate_multiplier = $"+itoa(idx)) + args = append(args, *updates.RateMultiplier) + idx++ + } + if updates.Status != nil { + setClauses = append(setClauses, "status = $"+itoa(idx)) + args = append(args, *updates.Status) + idx++ + } + if updates.Schedulable != nil { + setClauses = append(setClauses, "schedulable = $"+itoa(idx)) + args = append(args, *updates.Schedulable) + idx++ + } + // JSONB 需要合并而非覆盖,使用 raw SQL 保持旧行为。 + if len(updates.Credentials) > 0 { + payload, err := json.Marshal(updates.Credentials) + if err != nil { + return 0, err + } + setClauses = append(setClauses, "credentials = COALESCE(credentials, '{}'::jsonb) || $"+itoa(idx)+"::jsonb") + args = append(args, payload) + idx++ + } + if len(updates.Extra) > 0 { + payload, err := json.Marshal(updates.Extra) + if err != nil { + return 0, err + } + setClauses = append(setClauses, "extra = COALESCE(extra, '{}'::jsonb) || $"+itoa(idx)+"::jsonb") + args = append(args, payload) + idx++ + } + + if len(setClauses) == 0 { + return 0, nil + } + + setClauses = append(setClauses, "updated_at = NOW()") + + query := "UPDATE accounts SET " + joinClauses(setClauses, ", ") + " WHERE id = ANY($" + itoa(idx) + ") AND deleted_at IS NULL" + args = append(args, pq.Array(ids)) + + result, err := r.sql.ExecContext(ctx, query, args...) + if err != nil { + return 0, err + } + rows, err := result.RowsAffected() + if err != nil { + return 0, err + } + if rows > 0 { + payload := map[string]any{"account_ids": ids} + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountBulkChanged, nil, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue bulk update failed: err=%v", err) + } + } + return rows, nil +} + +type accountGroupQueryOptions struct { + status string + schedulable bool + platforms []string // 允许的多个平台,空切片表示不进行平台过滤 +} + +func (r *accountRepository) queryAccountsByGroup(ctx context.Context, groupID int64, opts accountGroupQueryOptions) ([]service.Account, error) { + q := r.client.AccountGroup.Query(). + Where(dbaccountgroup.GroupIDEQ(groupID)) + + // 通过 account_groups 中间表查询账号,并按需叠加状态/平台/调度能力过滤。 + preds := make([]dbpredicate.Account, 0, 6) + preds = append(preds, dbaccount.DeletedAtIsNil()) + if opts.status != "" { + preds = append(preds, dbaccount.StatusEQ(opts.status)) + } + if len(opts.platforms) > 0 { + preds = append(preds, dbaccount.PlatformIn(opts.platforms...)) + } + if opts.schedulable { + now := time.Now() + preds = append(preds, + dbaccount.SchedulableEQ(true), + tempUnschedulablePredicate(), + notExpiredPredicate(now), + dbaccount.Or(dbaccount.OverloadUntilIsNil(), dbaccount.OverloadUntilLTE(now)), + dbaccount.Or(dbaccount.RateLimitResetAtIsNil(), dbaccount.RateLimitResetAtLTE(now)), + ) + } + + if len(preds) > 0 { + q = q.Where(dbaccountgroup.HasAccountWith(preds...)) + } + + groups, err := q. + Order( + dbaccountgroup.ByPriority(), + dbaccountgroup.ByAccountField(dbaccount.FieldPriority), + ). + WithAccount(). + All(ctx) + if err != nil { + return nil, err + } + + orderedIDs := make([]int64, 0, len(groups)) + accountMap := make(map[int64]*dbent.Account, len(groups)) + for _, ag := range groups { + if ag.Edges.Account == nil { + continue + } + if _, exists := accountMap[ag.AccountID]; exists { + continue + } + accountMap[ag.AccountID] = ag.Edges.Account + orderedIDs = append(orderedIDs, ag.AccountID) + } + + accounts := make([]*dbent.Account, 0, len(orderedIDs)) + for _, id := range orderedIDs { + if acc, ok := accountMap[id]; ok { + accounts = append(accounts, acc) + } + } + + return r.accountsToService(ctx, accounts) +} + +func (r *accountRepository) accountsToService(ctx context.Context, accounts []*dbent.Account) ([]service.Account, error) { + if len(accounts) == 0 { + return []service.Account{}, nil + } + + accountIDs := make([]int64, 0, len(accounts)) + proxyIDs := make([]int64, 0, len(accounts)) + for _, acc := range accounts { + accountIDs = append(accountIDs, acc.ID) + if acc.ProxyID != nil { + proxyIDs = append(proxyIDs, *acc.ProxyID) + } + } + + proxyMap, err := r.loadProxies(ctx, proxyIDs) + if err != nil { + return nil, err + } + tempUnschedMap, err := r.loadTempUnschedStates(ctx, accountIDs) + if err != nil { + return nil, err + } + groupsByAccount, groupIDsByAccount, accountGroupsByAccount, err := r.loadAccountGroups(ctx, accountIDs) + if err != nil { + return nil, err + } + + outAccounts := make([]service.Account, 0, len(accounts)) + for _, acc := range accounts { + out := accountEntityToService(acc) + if out == nil { + continue + } + if acc.ProxyID != nil { + if proxy, ok := proxyMap[*acc.ProxyID]; ok { + out.Proxy = proxy + } + } + if groups, ok := groupsByAccount[acc.ID]; ok { + out.Groups = groups + } + if groupIDs, ok := groupIDsByAccount[acc.ID]; ok { + out.GroupIDs = groupIDs + } + if ags, ok := accountGroupsByAccount[acc.ID]; ok { + out.AccountGroups = ags + } + if snap, ok := tempUnschedMap[acc.ID]; ok { + out.TempUnschedulableUntil = snap.until + out.TempUnschedulableReason = snap.reason + } + outAccounts = append(outAccounts, *out) + } + + return outAccounts, nil +} + +func tempUnschedulablePredicate() dbpredicate.Account { + return dbpredicate.Account(func(s *entsql.Selector) { + col := s.C("temp_unschedulable_until") + s.Where(entsql.Or( + entsql.IsNull(col), + entsql.LTE(col, entsql.Expr("NOW()")), + )) + }) +} + +func notExpiredPredicate(now time.Time) dbpredicate.Account { + return dbaccount.Or( + dbaccount.ExpiresAtIsNil(), + dbaccount.ExpiresAtGT(now), + dbaccount.AutoPauseOnExpiredEQ(false), + ) +} + +func (r *accountRepository) loadTempUnschedStates(ctx context.Context, accountIDs []int64) (map[int64]tempUnschedSnapshot, error) { + out := make(map[int64]tempUnschedSnapshot) + if len(accountIDs) == 0 { + return out, nil + } + + rows, err := r.sql.QueryContext(ctx, ` + SELECT id, temp_unschedulable_until, temp_unschedulable_reason + FROM accounts + WHERE id = ANY($1) + `, pq.Array(accountIDs)) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var id int64 + var until sql.NullTime + var reason sql.NullString + if err := rows.Scan(&id, &until, &reason); err != nil { + return nil, err + } + var untilPtr *time.Time + if until.Valid { + tmp := until.Time + untilPtr = &tmp + } + if reason.Valid { + out[id] = tempUnschedSnapshot{until: untilPtr, reason: reason.String} + } else { + out[id] = tempUnschedSnapshot{until: untilPtr, reason: ""} + } + } + + if err := rows.Err(); err != nil { + return nil, err + } + + return out, nil +} + +func (r *accountRepository) loadProxies(ctx context.Context, proxyIDs []int64) (map[int64]*service.Proxy, error) { + proxyMap := make(map[int64]*service.Proxy) + if len(proxyIDs) == 0 { + return proxyMap, nil + } + + proxies, err := r.client.Proxy.Query().Where(dbproxy.IDIn(proxyIDs...)).All(ctx) + if err != nil { + return nil, err + } + + for _, p := range proxies { + proxyMap[p.ID] = proxyEntityToService(p) + } + return proxyMap, nil +} + +func (r *accountRepository) loadAccountGroups(ctx context.Context, accountIDs []int64) (map[int64][]*service.Group, map[int64][]int64, map[int64][]service.AccountGroup, error) { + groupsByAccount := make(map[int64][]*service.Group) + groupIDsByAccount := make(map[int64][]int64) + accountGroupsByAccount := make(map[int64][]service.AccountGroup) + + if len(accountIDs) == 0 { + return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil + } + + entries, err := r.client.AccountGroup.Query(). + Where(dbaccountgroup.AccountIDIn(accountIDs...)). + WithGroup(). + Order(dbaccountgroup.ByAccountID(), dbaccountgroup.ByPriority()). + All(ctx) + if err != nil { + return nil, nil, nil, err + } + + for _, ag := range entries { + groupSvc := groupEntityToService(ag.Edges.Group) + agSvc := service.AccountGroup{ + AccountID: ag.AccountID, + GroupID: ag.GroupID, + Priority: ag.Priority, + CreatedAt: ag.CreatedAt, + Group: groupSvc, + } + accountGroupsByAccount[ag.AccountID] = append(accountGroupsByAccount[ag.AccountID], agSvc) + groupIDsByAccount[ag.AccountID] = append(groupIDsByAccount[ag.AccountID], ag.GroupID) + if groupSvc != nil { + groupsByAccount[ag.AccountID] = append(groupsByAccount[ag.AccountID], groupSvc) + } + } + + return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil +} + +func (r *accountRepository) loadAccountGroupIDs(ctx context.Context, accountID int64) ([]int64, error) { + entries, err := r.client.AccountGroup. + Query(). + Where(dbaccountgroup.AccountIDEQ(accountID)). + All(ctx) + if err != nil { + return nil, err + } + ids := make([]int64, 0, len(entries)) + for _, entry := range entries { + ids = append(ids, entry.GroupID) + } + return ids, nil +} + +func mergeGroupIDs(a []int64, b []int64) []int64 { + seen := make(map[int64]struct{}, len(a)+len(b)) + out := make([]int64, 0, len(a)+len(b)) + for _, id := range a { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + for _, id := range b { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + return out +} + +func buildSchedulerGroupPayload(groupIDs []int64) map[string]any { + if len(groupIDs) == 0 { + return nil + } + return map[string]any{"group_ids": groupIDs} +} + +func accountEntityToService(m *dbent.Account) *service.Account { + if m == nil { + return nil + } + + rateMultiplier := m.RateMultiplier + + return &service.Account{ + ID: m.ID, + Name: m.Name, + Notes: m.Notes, + Platform: m.Platform, + Type: m.Type, + Credentials: copyJSONMap(m.Credentials), + Extra: copyJSONMap(m.Extra), + ProxyID: m.ProxyID, + Concurrency: m.Concurrency, + Priority: m.Priority, + RateMultiplier: &rateMultiplier, + Status: m.Status, + ErrorMessage: derefString(m.ErrorMessage), + LastUsedAt: m.LastUsedAt, + ExpiresAt: m.ExpiresAt, + AutoPauseOnExpired: m.AutoPauseOnExpired, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + Schedulable: m.Schedulable, + RateLimitedAt: m.RateLimitedAt, + RateLimitResetAt: m.RateLimitResetAt, + OverloadUntil: m.OverloadUntil, + SessionWindowStart: m.SessionWindowStart, + SessionWindowEnd: m.SessionWindowEnd, + SessionWindowStatus: derefString(m.SessionWindowStatus), + } +} + +func normalizeJSONMap(in map[string]any) map[string]any { + if in == nil { + return map[string]any{} + } + return in +} + +func copyJSONMap(in map[string]any) map[string]any { + if in == nil { + return nil + } + out := make(map[string]any, len(in)) + for k, v := range in { + out[k] = v + } + return out +} + +func joinClauses(clauses []string, sep string) string { + if len(clauses) == 0 { + return "" + } + out := clauses[0] + for i := 1; i < len(clauses); i++ { + out += sep + clauses[i] + } + return out +} + +func itoa(v int) string { + return strconv.Itoa(v) +} diff --git a/backend/internal/repository/account_repo_integration_test.go b/backend/internal/repository/account_repo_integration_test.go new file mode 100644 index 00000000..250b141d --- /dev/null +++ b/backend/internal/repository/account_repo_integration_test.go @@ -0,0 +1,587 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/accountgroup" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type AccountRepoSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + repo *accountRepository +} + +func (s *AccountRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.client = tx.Client() + s.repo = newAccountRepositoryWithSQL(s.client, tx) +} + +func TestAccountRepoSuite(t *testing.T) { + suite.Run(t, new(AccountRepoSuite)) +} + +// --- Create / GetByID / Update / Delete --- + +func (s *AccountRepoSuite) TestCreate() { + account := &service.Account{ + Name: "test-create", + Platform: service.PlatformAnthropic, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + Credentials: map[string]any{}, + Extra: map[string]any{}, + Concurrency: 3, + Priority: 50, + Schedulable: true, + } + + err := s.repo.Create(s.ctx, account) + s.Require().NoError(err, "Create") + s.Require().NotZero(account.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("test-create", got.Name) +} + +func (s *AccountRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *AccountRepoSuite) TestUpdate() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "original"}) + + account.Name = "updated" + err := s.repo.Update(s.ctx, account) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("updated", got.Name) +} + +func (s *AccountRepoSuite) TestDelete() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "to-delete"}) + + err := s.repo.Delete(s.ctx, account.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, account.ID) + s.Require().Error(err, "expected error after delete") +} + +func (s *AccountRepoSuite) TestDelete_WithGroupBindings() { + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-del"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-del"}) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) + + err := s.repo.Delete(s.ctx, account.ID) + s.Require().NoError(err, "Delete should cascade remove bindings") + + count, err := s.client.AccountGroup.Query().Where(accountgroup.AccountIDEQ(account.ID)).Count(s.ctx) + s.Require().NoError(err) + s.Require().Zero(count, "expected bindings to be removed") +} + +// --- List / ListWithFilters --- + +func (s *AccountRepoSuite) TestList() { + mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc1"}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc2"}) + + accounts, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List") + s.Require().Len(accounts, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *AccountRepoSuite) TestListWithFilters() { + tests := []struct { + name string + setup func(client *dbent.Client) + platform string + accType string + status string + search string + wantCount int + validate func(accounts []service.Account) + }{ + { + name: "filter_by_platform", + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic}) + mustCreateAccount(s.T(), client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI}) + }, + platform: service.PlatformOpenAI, + wantCount: 1, + validate: func(accounts []service.Account) { + s.Require().Equal(service.PlatformOpenAI, accounts[0].Platform) + }, + }, + { + name: "filter_by_type", + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "t1", Type: service.AccountTypeOAuth}) + mustCreateAccount(s.T(), client, &service.Account{Name: "t2", Type: service.AccountTypeAPIKey}) + }, + accType: service.AccountTypeAPIKey, + wantCount: 1, + validate: func(accounts []service.Account) { + s.Require().Equal(service.AccountTypeAPIKey, accounts[0].Type) + }, + }, + { + name: "filter_by_status", + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "s1", Status: service.StatusActive}) + mustCreateAccount(s.T(), client, &service.Account{Name: "s2", Status: service.StatusDisabled}) + }, + status: service.StatusDisabled, + wantCount: 1, + validate: func(accounts []service.Account) { + s.Require().Equal(service.StatusDisabled, accounts[0].Status) + }, + }, + { + name: "filter_by_search", + setup: func(client *dbent.Client) { + mustCreateAccount(s.T(), client, &service.Account{Name: "alpha-account"}) + mustCreateAccount(s.T(), client, &service.Account{Name: "beta-account"}) + }, + search: "alpha", + wantCount: 1, + validate: func(accounts []service.Account) { + s.Require().Contains(accounts[0].Name, "alpha") + }, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + // 每个 case 重新获取隔离资源 + tx := testEntTx(s.T()) + client := tx.Client() + repo := newAccountRepositoryWithSQL(client, tx) + ctx := context.Background() + + tt.setup(client) + + accounts, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, tt.platform, tt.accType, tt.status, tt.search) + s.Require().NoError(err) + s.Require().Len(accounts, tt.wantCount) + if tt.validate != nil { + tt.validate(accounts) + } + }) + } +} + +// --- ListByGroup / ListActive / ListByPlatform --- + +func (s *AccountRepoSuite) TestListByGroup() { + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-list"}) + acc1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Status: service.StatusActive}) + acc2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Status: service.StatusActive}) + mustBindAccountToGroup(s.T(), s.client, acc1.ID, group.ID, 2) + mustBindAccountToGroup(s.T(), s.client, acc2.ID, group.ID, 1) + + accounts, err := s.repo.ListByGroup(s.ctx, group.ID) + s.Require().NoError(err, "ListByGroup") + s.Require().Len(accounts, 2) + // Should be ordered by priority + s.Require().Equal(acc2.ID, accounts[0].ID, "expected acc2 first (priority=1)") +} + +func (s *AccountRepoSuite) TestListActive() { + mustCreateAccount(s.T(), s.client, &service.Account{Name: "active1", Status: service.StatusActive}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "inactive1", Status: service.StatusDisabled}) + + accounts, err := s.repo.ListActive(s.ctx) + s.Require().NoError(err, "ListActive") + s.Require().Len(accounts, 1) + s.Require().Equal("active1", accounts[0].Name) +} + +func (s *AccountRepoSuite) TestListByPlatform() { + mustCreateAccount(s.T(), s.client, &service.Account{Name: "p1", Platform: service.PlatformAnthropic, Status: service.StatusActive}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "p2", Platform: service.PlatformOpenAI, Status: service.StatusActive}) + + accounts, err := s.repo.ListByPlatform(s.ctx, service.PlatformAnthropic) + s.Require().NoError(err, "ListByPlatform") + s.Require().Len(accounts, 1) + s.Require().Equal(service.PlatformAnthropic, accounts[0].Platform) +} + +// --- Preload and VirtualFields --- + +func (s *AccountRepoSuite) TestPreload_And_VirtualFields() { + proxy := mustCreateProxy(s.T(), s.client, &service.Proxy{Name: "p1"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g1"}) + + account := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "acc1", + ProxyID: &proxy.ID, + }) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err, "GetByID") + s.Require().NotNil(got.Proxy, "expected Proxy preload") + s.Require().Equal(proxy.ID, got.Proxy.ID) + s.Require().Len(got.GroupIDs, 1, "expected GroupIDs to be populated") + s.Require().Equal(group.ID, got.GroupIDs[0]) + s.Require().Len(got.Groups, 1, "expected Groups to be populated") + s.Require().Equal(group.ID, got.Groups[0].ID) + + accounts, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "", "acc") + s.Require().NoError(err, "ListWithFilters") + s.Require().Equal(int64(1), page.Total) + s.Require().Len(accounts, 1) + s.Require().NotNil(accounts[0].Proxy, "expected Proxy preload in list") + s.Require().Equal(proxy.ID, accounts[0].Proxy.ID) + s.Require().Len(accounts[0].GroupIDs, 1, "expected GroupIDs in list") + s.Require().Equal(group.ID, accounts[0].GroupIDs[0]) +} + +// --- GroupBinding / AddToGroup / RemoveFromGroup / BindGroups / GetGroups --- + +func (s *AccountRepoSuite) TestGroupBinding_And_BindGroups() { + g1 := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g1"}) + g2 := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc"}) + + s.Require().NoError(s.repo.AddToGroup(s.ctx, account.ID, g1.ID, 10), "AddToGroup") + groups, err := s.repo.GetGroups(s.ctx, account.ID) + s.Require().NoError(err, "GetGroups") + s.Require().Len(groups, 1, "expected 1 group") + s.Require().Equal(g1.ID, groups[0].ID) + + s.Require().NoError(s.repo.RemoveFromGroup(s.ctx, account.ID, g1.ID), "RemoveFromGroup") + groups, err = s.repo.GetGroups(s.ctx, account.ID) + s.Require().NoError(err, "GetGroups after remove") + s.Require().Empty(groups, "expected 0 groups after remove") + + s.Require().NoError(s.repo.BindGroups(s.ctx, account.ID, []int64{g1.ID, g2.ID}), "BindGroups") + groups, err = s.repo.GetGroups(s.ctx, account.ID) + s.Require().NoError(err, "GetGroups after bind") + s.Require().Len(groups, 2, "expected 2 groups after bind") +} + +func (s *AccountRepoSuite) TestBindGroups_EmptyList() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-empty"}) + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-empty"}) + mustBindAccountToGroup(s.T(), s.client, account.ID, group.ID, 1) + + s.Require().NoError(s.repo.BindGroups(s.ctx, account.ID, []int64{}), "BindGroups empty") + + groups, err := s.repo.GetGroups(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().Empty(groups, "expected 0 groups after binding empty list") +} + +// --- Schedulable --- + +func (s *AccountRepoSuite) TestListSchedulable() { + now := time.Now() + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sched"}) + + okAcc := mustCreateAccount(s.T(), s.client, &service.Account{Name: "ok", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, okAcc.ID, group.ID, 1) + + future := now.Add(10 * time.Minute) + overloaded := mustCreateAccount(s.T(), s.client, &service.Account{Name: "over", Schedulable: true, OverloadUntil: &future}) + mustBindAccountToGroup(s.T(), s.client, overloaded.ID, group.ID, 1) + + sched, err := s.repo.ListSchedulable(s.ctx) + s.Require().NoError(err, "ListSchedulable") + ids := idsOfAccounts(sched) + s.Require().Contains(ids, okAcc.ID) + s.Require().NotContains(ids, overloaded.ID) +} + +func (s *AccountRepoSuite) TestListSchedulableByGroupID_TimeBoundaries_And_StatusUpdates() { + now := time.Now() + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sched"}) + + okAcc := mustCreateAccount(s.T(), s.client, &service.Account{Name: "ok", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, okAcc.ID, group.ID, 1) + + future := now.Add(10 * time.Minute) + overloaded := mustCreateAccount(s.T(), s.client, &service.Account{Name: "over", Schedulable: true, OverloadUntil: &future}) + mustBindAccountToGroup(s.T(), s.client, overloaded.ID, group.ID, 1) + + rateLimited := mustCreateAccount(s.T(), s.client, &service.Account{Name: "rl", Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, rateLimited.ID, group.ID, 1) + s.Require().NoError(s.repo.SetRateLimited(s.ctx, rateLimited.ID, now.Add(10*time.Minute)), "SetRateLimited") + + s.Require().NoError(s.repo.SetError(s.ctx, overloaded.ID, "boom"), "SetError") + + sched, err := s.repo.ListSchedulableByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "ListSchedulableByGroupID") + s.Require().Len(sched, 1, "expected only ok account schedulable") + s.Require().Equal(okAcc.ID, sched[0].ID) + + s.Require().NoError(s.repo.ClearRateLimit(s.ctx, rateLimited.ID), "ClearRateLimit") + sched2, err := s.repo.ListSchedulableByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "ListSchedulableByGroupID after ClearRateLimit") + s.Require().Len(sched2, 2, "expected 2 schedulable accounts after ClearRateLimit") +} + +func (s *AccountRepoSuite) TestListSchedulableByPlatform() { + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) + + accounts, err := s.repo.ListSchedulableByPlatform(s.ctx, service.PlatformAnthropic) + s.Require().NoError(err) + s.Require().Len(accounts, 1) + s.Require().Equal(service.PlatformAnthropic, accounts[0].Platform) +} + +func (s *AccountRepoSuite) TestListSchedulableByGroupIDAndPlatform() { + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-sp"}) + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a1", Platform: service.PlatformAnthropic, Schedulable: true}) + a2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a2", Platform: service.PlatformOpenAI, Schedulable: true}) + mustBindAccountToGroup(s.T(), s.client, a1.ID, group.ID, 1) + mustBindAccountToGroup(s.T(), s.client, a2.ID, group.ID, 2) + + accounts, err := s.repo.ListSchedulableByGroupIDAndPlatform(s.ctx, group.ID, service.PlatformAnthropic) + s.Require().NoError(err) + s.Require().Len(accounts, 1) + s.Require().Equal(a1.ID, accounts[0].ID) +} + +func (s *AccountRepoSuite) TestSetSchedulable() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-sched", Schedulable: true}) + + s.Require().NoError(s.repo.SetSchedulable(s.ctx, account.ID, false)) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().False(got.Schedulable) +} + +// --- SetOverloaded / SetRateLimited / ClearRateLimit --- + +func (s *AccountRepoSuite) TestSetOverloaded() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-over"}) + until := time.Date(2025, 6, 15, 12, 0, 0, 0, time.UTC) + + s.Require().NoError(s.repo.SetOverloaded(s.ctx, account.ID, until)) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().NotNil(got.OverloadUntil) + s.Require().WithinDuration(until, *got.OverloadUntil, time.Second) +} + +func (s *AccountRepoSuite) TestSetRateLimited() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-rl"}) + resetAt := time.Date(2025, 6, 15, 14, 0, 0, 0, time.UTC) + + s.Require().NoError(s.repo.SetRateLimited(s.ctx, account.ID, resetAt)) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().NotNil(got.RateLimitedAt) + s.Require().NotNil(got.RateLimitResetAt) + s.Require().WithinDuration(resetAt, *got.RateLimitResetAt, time.Second) +} + +func (s *AccountRepoSuite) TestClearRateLimit() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-clear"}) + until := time.Now().Add(1 * time.Hour) + s.Require().NoError(s.repo.SetOverloaded(s.ctx, account.ID, until)) + s.Require().NoError(s.repo.SetRateLimited(s.ctx, account.ID, until)) + + s.Require().NoError(s.repo.ClearRateLimit(s.ctx, account.ID)) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().Nil(got.RateLimitedAt) + s.Require().Nil(got.RateLimitResetAt) + s.Require().Nil(got.OverloadUntil) +} + +// --- UpdateLastUsed --- + +func (s *AccountRepoSuite) TestUpdateLastUsed() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-used"}) + s.Require().Nil(account.LastUsedAt) + + s.Require().NoError(s.repo.UpdateLastUsed(s.ctx, account.ID)) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().NotNil(got.LastUsedAt) +} + +// --- SetError --- + +func (s *AccountRepoSuite) TestSetError() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-err", Status: service.StatusActive}) + + s.Require().NoError(s.repo.SetError(s.ctx, account.ID, "something went wrong")) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().Equal(service.StatusError, got.Status) + s.Require().Equal("something went wrong", got.ErrorMessage) +} + +// --- UpdateSessionWindow --- + +func (s *AccountRepoSuite) TestUpdateSessionWindow() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-win"}) + start := time.Date(2025, 6, 15, 10, 0, 0, 0, time.UTC) + end := time.Date(2025, 6, 15, 15, 0, 0, 0, time.UTC) + + s.Require().NoError(s.repo.UpdateSessionWindow(s.ctx, account.ID, &start, &end, "active")) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().NotNil(got.SessionWindowStart) + s.Require().NotNil(got.SessionWindowEnd) + s.Require().Equal("active", got.SessionWindowStatus) +} + +// --- UpdateExtra --- + +func (s *AccountRepoSuite) TestUpdateExtra_MergesFields() { + account := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "acc-extra", + Extra: map[string]any{"a": "1"}, + }) + s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{"b": "2"}), "UpdateExtra") + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("1", got.Extra["a"]) + s.Require().Equal("2", got.Extra["b"]) +} + +func (s *AccountRepoSuite) TestUpdateExtra_EmptyUpdates() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-extra-empty"}) + s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{})) +} + +func (s *AccountRepoSuite) TestUpdateExtra_NilExtra() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-nil-extra", Extra: nil}) + s.Require().NoError(s.repo.UpdateExtra(s.ctx, account.ID, map[string]any{"key": "val"})) + + got, err := s.repo.GetByID(s.ctx, account.ID) + s.Require().NoError(err) + s.Require().Equal("val", got.Extra["key"]) +} + +// --- GetByCRSAccountID --- + +func (s *AccountRepoSuite) TestGetByCRSAccountID() { + crsID := "crs-12345" + mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "acc-crs", + Extra: map[string]any{"crs_account_id": crsID}, + }) + + got, err := s.repo.GetByCRSAccountID(s.ctx, crsID) + s.Require().NoError(err) + s.Require().NotNil(got) + s.Require().Equal("acc-crs", got.Name) +} + +func (s *AccountRepoSuite) TestGetByCRSAccountID_NotFound() { + got, err := s.repo.GetByCRSAccountID(s.ctx, "non-existent") + s.Require().NoError(err) + s.Require().Nil(got) +} + +func (s *AccountRepoSuite) TestGetByCRSAccountID_EmptyString() { + got, err := s.repo.GetByCRSAccountID(s.ctx, "") + s.Require().NoError(err) + s.Require().Nil(got) +} + +// --- BulkUpdate --- + +func (s *AccountRepoSuite) TestBulkUpdate() { + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk1", Priority: 1}) + a2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk2", Priority: 1}) + + newPriority := 99 + affected, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID, a2.ID}, service.AccountBulkUpdate{ + Priority: &newPriority, + }) + s.Require().NoError(err) + s.Require().GreaterOrEqual(affected, int64(1), "expected at least one affected row") + + got1, _ := s.repo.GetByID(s.ctx, a1.ID) + got2, _ := s.repo.GetByID(s.ctx, a2.ID) + s.Require().Equal(99, got1.Priority) + s.Require().Equal(99, got2.Priority) +} + +func (s *AccountRepoSuite) TestBulkUpdate_MergeCredentials() { + a1 := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "bulk-cred", + Credentials: map[string]any{"existing": "value"}, + }) + + _, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{ + Credentials: map[string]any{"new_key": "new_value"}, + }) + s.Require().NoError(err) + + got, _ := s.repo.GetByID(s.ctx, a1.ID) + s.Require().Equal("value", got.Credentials["existing"]) + s.Require().Equal("new_value", got.Credentials["new_key"]) +} + +func (s *AccountRepoSuite) TestBulkUpdate_MergeExtra() { + a1 := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "bulk-extra", + Extra: map[string]any{"existing": "val"}, + }) + + _, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{ + Extra: map[string]any{"new_key": "new_val"}, + }) + s.Require().NoError(err) + + got, _ := s.repo.GetByID(s.ctx, a1.ID) + s.Require().Equal("val", got.Extra["existing"]) + s.Require().Equal("new_val", got.Extra["new_key"]) +} + +func (s *AccountRepoSuite) TestBulkUpdate_EmptyIDs() { + affected, err := s.repo.BulkUpdate(s.ctx, []int64{}, service.AccountBulkUpdate{}) + s.Require().NoError(err) + s.Require().Zero(affected) +} + +func (s *AccountRepoSuite) TestBulkUpdate_EmptyUpdates() { + a1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "bulk-empty"}) + + affected, err := s.repo.BulkUpdate(s.ctx, []int64{a1.ID}, service.AccountBulkUpdate{}) + s.Require().NoError(err) + s.Require().Zero(affected) +} + +func idsOfAccounts(accounts []service.Account) []int64 { + out := make([]int64, 0, len(accounts)) + for i := range accounts { + out = append(out, accounts[i].ID) + } + return out +} diff --git a/backend/internal/repository/allowed_groups_contract_integration_test.go b/backend/internal/repository/allowed_groups_contract_integration_test.go new file mode 100644 index 00000000..0d0f11e5 --- /dev/null +++ b/backend/internal/repository/allowed_groups_contract_integration_test.go @@ -0,0 +1,145 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func uniqueTestValue(t *testing.T, prefix string) string { + t.Helper() + safeName := strings.NewReplacer("/", "_", " ", "_").Replace(t.Name()) + return fmt.Sprintf("%s-%s", prefix, safeName) +} + +func TestUserRepository_RemoveGroupFromAllowedGroups_RemovesAllOccurrences(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + entClient := tx.Client() + + targetGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "target-group")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + otherGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "other-group")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + + repo := newUserRepositoryWithSQL(entClient, tx) + + u1 := &service.User{ + Email: uniqueTestValue(t, "u1") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID, otherGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u1)) + + u2 := &service.User{ + Email: uniqueTestValue(t, "u2") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u2)) + + u3 := &service.User{ + Email: uniqueTestValue(t, "u3") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{otherGroup.ID}, + } + require.NoError(t, repo.Create(ctx, u3)) + + affected, err := repo.RemoveGroupFromAllowedGroups(ctx, targetGroup.ID) + require.NoError(t, err) + require.Equal(t, int64(2), affected) + + u1After, err := repo.GetByID(ctx, u1.ID) + require.NoError(t, err) + require.NotContains(t, u1After.AllowedGroups, targetGroup.ID) + require.Contains(t, u1After.AllowedGroups, otherGroup.ID) + + u2After, err := repo.GetByID(ctx, u2.ID) + require.NoError(t, err) + require.NotContains(t, u2After.AllowedGroups, targetGroup.ID) +} + +func TestGroupRepository_DeleteCascade_RemovesAllowedGroupsAndClearsApiKeys(t *testing.T) { + ctx := context.Background() + tx := testEntTx(t) + entClient := tx.Client() + + targetGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "delete-cascade-target")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + otherGroup, err := entClient.Group.Create(). + SetName(uniqueTestValue(t, "delete-cascade-other")). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err) + + userRepo := newUserRepositoryWithSQL(entClient, tx) + groupRepo := newGroupRepositoryWithSQL(entClient, tx) + apiKeyRepo := NewAPIKeyRepository(entClient) + + u := &service.User{ + Email: uniqueTestValue(t, "cascade-user") + "@example.com", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + Concurrency: 5, + AllowedGroups: []int64{targetGroup.ID, otherGroup.ID}, + } + require.NoError(t, userRepo.Create(ctx, u)) + + key := &service.APIKey{ + UserID: u.ID, + Key: uniqueTestValue(t, "sk-test-delete-cascade"), + Name: "test key", + GroupID: &targetGroup.ID, + Status: service.StatusActive, + } + require.NoError(t, apiKeyRepo.Create(ctx, key)) + + _, err = groupRepo.DeleteCascade(ctx, targetGroup.ID) + require.NoError(t, err) + + // Deleted group should be hidden by default queries (soft-delete semantics). + _, err = groupRepo.GetByID(ctx, targetGroup.ID) + require.ErrorIs(t, err, service.ErrGroupNotFound) + + activeGroups, err := groupRepo.ListActive(ctx) + require.NoError(t, err) + for _, g := range activeGroups { + require.NotEqual(t, targetGroup.ID, g.ID) + } + + // User.allowed_groups should no longer include the deleted group. + uAfter, err := userRepo.GetByID(ctx, u.ID) + require.NoError(t, err) + require.NotContains(t, uAfter.AllowedGroups, targetGroup.ID) + require.Contains(t, uAfter.AllowedGroups, otherGroup.ID) + + // API keys bound to the deleted group should have group_id cleared. + keyAfter, err := apiKeyRepo.GetByID(ctx, key.ID) + require.NoError(t, err) + require.Nil(t, keyAfter.GroupID) +} diff --git a/backend/internal/repository/api_key_cache.go b/backend/internal/repository/api_key_cache.go new file mode 100644 index 00000000..6d834b40 --- /dev/null +++ b/backend/internal/repository/api_key_cache.go @@ -0,0 +1,93 @@ +package repository + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + apiKeyRateLimitKeyPrefix = "apikey:ratelimit:" + apiKeyRateLimitDuration = 24 * time.Hour + apiKeyAuthCachePrefix = "apikey:auth:" +) + +// apiKeyRateLimitKey generates the Redis key for API key creation rate limiting. +func apiKeyRateLimitKey(userID int64) string { + return fmt.Sprintf("%s%d", apiKeyRateLimitKeyPrefix, userID) +} + +func apiKeyAuthCacheKey(key string) string { + return fmt.Sprintf("%s%s", apiKeyAuthCachePrefix, key) +} + +type apiKeyCache struct { + rdb *redis.Client +} + +func NewAPIKeyCache(rdb *redis.Client) service.APIKeyCache { + return &apiKeyCache{rdb: rdb} +} + +func (c *apiKeyCache) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { + key := apiKeyRateLimitKey(userID) + count, err := c.rdb.Get(ctx, key).Int() + if errors.Is(err, redis.Nil) { + return 0, nil + } + return count, err +} + +func (c *apiKeyCache) IncrementCreateAttemptCount(ctx context.Context, userID int64) error { + key := apiKeyRateLimitKey(userID) + pipe := c.rdb.Pipeline() + pipe.Incr(ctx, key) + pipe.Expire(ctx, key, apiKeyRateLimitDuration) + _, err := pipe.Exec(ctx) + return err +} + +func (c *apiKeyCache) DeleteCreateAttemptCount(ctx context.Context, userID int64) error { + key := apiKeyRateLimitKey(userID) + return c.rdb.Del(ctx, key).Err() +} + +func (c *apiKeyCache) IncrementDailyUsage(ctx context.Context, apiKey string) error { + return c.rdb.Incr(ctx, apiKey).Err() +} + +func (c *apiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { + return c.rdb.Expire(ctx, apiKey, ttl).Err() +} + +func (c *apiKeyCache) GetAuthCache(ctx context.Context, key string) (*service.APIKeyAuthCacheEntry, error) { + val, err := c.rdb.Get(ctx, apiKeyAuthCacheKey(key)).Bytes() + if err != nil { + return nil, err + } + var entry service.APIKeyAuthCacheEntry + if err := json.Unmarshal(val, &entry); err != nil { + return nil, err + } + return &entry, nil +} + +func (c *apiKeyCache) SetAuthCache(ctx context.Context, key string, entry *service.APIKeyAuthCacheEntry, ttl time.Duration) error { + if entry == nil { + return nil + } + payload, err := json.Marshal(entry) + if err != nil { + return err + } + return c.rdb.Set(ctx, apiKeyAuthCacheKey(key), payload, ttl).Err() +} + +func (c *apiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { + return c.rdb.Del(ctx, apiKeyAuthCacheKey(key)).Err() +} diff --git a/backend/internal/repository/api_key_cache_integration_test.go b/backend/internal/repository/api_key_cache_integration_test.go new file mode 100644 index 00000000..e9394917 --- /dev/null +++ b/backend/internal/repository/api_key_cache_integration_test.go @@ -0,0 +1,127 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type ApiKeyCacheSuite struct { + IntegrationRedisSuite +} + +func (s *ApiKeyCacheSuite) TestCreateAttemptCount() { + tests := []struct { + name string + fn func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) + }{ + { + name: "missing_key_returns_zero_nil", + fn: func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) { + userID := int64(1) + + count, err := cache.GetCreateAttemptCount(ctx, userID) + + require.NoError(s.T(), err, "expected nil error for missing key") + require.Equal(s.T(), 0, count, "expected zero count for missing key") + }, + }, + { + name: "increment_increases_count_and_sets_ttl", + fn: func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) { + userID := int64(1) + key := fmt.Sprintf("%s%d", apiKeyRateLimitKeyPrefix, userID) + + require.NoError(s.T(), cache.IncrementCreateAttemptCount(ctx, userID), "IncrementCreateAttemptCount") + require.NoError(s.T(), cache.IncrementCreateAttemptCount(ctx, userID), "IncrementCreateAttemptCount 2") + + count, err := cache.GetCreateAttemptCount(ctx, userID) + require.NoError(s.T(), err, "GetCreateAttemptCount") + require.Equal(s.T(), 2, count, "count mismatch") + + ttl, err := rdb.TTL(ctx, key).Result() + require.NoError(s.T(), err, "TTL") + s.AssertTTLWithin(ttl, 1*time.Second, apiKeyRateLimitDuration) + }, + }, + { + name: "delete_removes_key", + fn: func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) { + userID := int64(1) + + require.NoError(s.T(), cache.IncrementCreateAttemptCount(ctx, userID)) + require.NoError(s.T(), cache.DeleteCreateAttemptCount(ctx, userID), "DeleteCreateAttemptCount") + + count, err := cache.GetCreateAttemptCount(ctx, userID) + require.NoError(s.T(), err, "expected nil error after delete") + require.Equal(s.T(), 0, count, "expected zero count after delete") + }, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + // 每个 case 重新获取隔离资源 + rdb := testRedis(s.T()) + cache := &apiKeyCache{rdb: rdb} + ctx := context.Background() + + tt.fn(ctx, rdb, cache) + }) + } +} + +func (s *ApiKeyCacheSuite) TestDailyUsage() { + tests := []struct { + name string + fn func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) + }{ + { + name: "increment_increases_count", + fn: func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) { + dailyKey := "daily:sk-test" + + require.NoError(s.T(), cache.IncrementDailyUsage(ctx, dailyKey), "IncrementDailyUsage") + require.NoError(s.T(), cache.IncrementDailyUsage(ctx, dailyKey), "IncrementDailyUsage 2") + + n, err := rdb.Get(ctx, dailyKey).Int() + require.NoError(s.T(), err, "Get dailyKey") + require.Equal(s.T(), 2, n, "expected daily usage=2") + }, + }, + { + name: "set_expiry_sets_ttl", + fn: func(ctx context.Context, rdb *redis.Client, cache *apiKeyCache) { + dailyKey := "daily:sk-test-expiry" + + require.NoError(s.T(), cache.IncrementDailyUsage(ctx, dailyKey)) + require.NoError(s.T(), cache.SetDailyUsageExpiry(ctx, dailyKey, 1*time.Hour), "SetDailyUsageExpiry") + + ttl, err := rdb.TTL(ctx, dailyKey).Result() + require.NoError(s.T(), err, "TTL dailyKey") + require.Greater(s.T(), ttl, time.Duration(0), "expected ttl > 0") + }, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + rdb := testRedis(s.T()) + cache := &apiKeyCache{rdb: rdb} + ctx := context.Background() + + tt.fn(ctx, rdb, cache) + }) + } +} + +func TestApiKeyCacheSuite(t *testing.T) { + suite.Run(t, new(ApiKeyCacheSuite)) +} diff --git a/backend/internal/repository/api_key_cache_test.go b/backend/internal/repository/api_key_cache_test.go new file mode 100644 index 00000000..7ad84ba2 --- /dev/null +++ b/backend/internal/repository/api_key_cache_test.go @@ -0,0 +1,46 @@ +//go:build unit + +package repository + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestApiKeyRateLimitKey(t *testing.T) { + tests := []struct { + name string + userID int64 + expected string + }{ + { + name: "normal_user_id", + userID: 123, + expected: "apikey:ratelimit:123", + }, + { + name: "zero_user_id", + userID: 0, + expected: "apikey:ratelimit:0", + }, + { + name: "negative_user_id", + userID: -1, + expected: "apikey:ratelimit:-1", + }, + { + name: "max_int64", + userID: math.MaxInt64, + expected: "apikey:ratelimit:9223372036854775807", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := apiKeyRateLimitKey(tc.userID) + require.Equal(t, tc.expected, got) + }) + } +} diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go new file mode 100644 index 00000000..77a3f233 --- /dev/null +++ b/backend/internal/repository/api_key_repo.go @@ -0,0 +1,435 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +type apiKeyRepository struct { + client *dbent.Client +} + +func NewAPIKeyRepository(client *dbent.Client) service.APIKeyRepository { + return &apiKeyRepository{client: client} +} + +func (r *apiKeyRepository) activeQuery() *dbent.APIKeyQuery { + // 默认过滤已软删除记录,避免删除后仍被查询到。 + return r.client.APIKey.Query().Where(apikey.DeletedAtIsNil()) +} + +func (r *apiKeyRepository) Create(ctx context.Context, key *service.APIKey) error { + builder := r.client.APIKey.Create(). + SetUserID(key.UserID). + SetKey(key.Key). + SetName(key.Name). + SetStatus(key.Status). + SetNillableGroupID(key.GroupID) + + if len(key.IPWhitelist) > 0 { + builder.SetIPWhitelist(key.IPWhitelist) + } + if len(key.IPBlacklist) > 0 { + builder.SetIPBlacklist(key.IPBlacklist) + } + + created, err := builder.Save(ctx) + if err == nil { + key.ID = created.ID + key.CreatedAt = created.CreatedAt + key.UpdatedAt = created.UpdatedAt + } + return translatePersistenceError(err, nil, service.ErrAPIKeyExists) +} + +func (r *apiKeyRepository) GetByID(ctx context.Context, id int64) (*service.APIKey, error) { + m, err := r.activeQuery(). + Where(apikey.IDEQ(id)). + WithUser(). + WithGroup(). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrAPIKeyNotFound + } + return nil, err + } + return apiKeyEntityToService(m), nil +} + +// GetKeyAndOwnerID 根据 API Key ID 获取其 key 与所有者(用户)ID。 +// 相比 GetByID,此方法性能更优,因为: +// - 使用 Select() 只查询必要字段,减少数据传输量 +// - 不加载完整的 API Key 实体及其关联数据(User、Group 等) +// - 适用于删除等只需 key 与用户 ID 的场景 +func (r *apiKeyRepository) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + m, err := r.activeQuery(). + Where(apikey.IDEQ(id)). + Select(apikey.FieldKey, apikey.FieldUserID). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return "", 0, service.ErrAPIKeyNotFound + } + return "", 0, err + } + return m.Key, m.UserID, nil +} + +func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { + m, err := r.activeQuery(). + Where(apikey.KeyEQ(key)). + WithUser(). + WithGroup(). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrAPIKeyNotFound + } + return nil, err + } + return apiKeyEntityToService(m), nil +} + +func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + m, err := r.activeQuery(). + Where(apikey.KeyEQ(key)). + Select( + apikey.FieldID, + apikey.FieldUserID, + apikey.FieldGroupID, + apikey.FieldStatus, + apikey.FieldIPWhitelist, + apikey.FieldIPBlacklist, + ). + WithUser(func(q *dbent.UserQuery) { + q.Select( + user.FieldID, + user.FieldStatus, + user.FieldRole, + user.FieldBalance, + user.FieldConcurrency, + ) + }). + WithGroup(func(q *dbent.GroupQuery) { + q.Select( + group.FieldID, + group.FieldName, + group.FieldPlatform, + group.FieldStatus, + group.FieldSubscriptionType, + group.FieldRateMultiplier, + group.FieldDailyLimitUsd, + group.FieldWeeklyLimitUsd, + group.FieldMonthlyLimitUsd, + group.FieldImagePrice1k, + group.FieldImagePrice2k, + group.FieldImagePrice4k, + group.FieldClaudeCodeOnly, + group.FieldFallbackGroupID, + ) + }). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrAPIKeyNotFound + } + return nil, err + } + return apiKeyEntityToService(m), nil +} + +func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) error { + // 使用原子操作:将软删除检查与更新合并到同一语句,避免竞态条件。 + // 之前的实现先检查 Exist 再 UpdateOneID,若在两步之间发生软删除, + // 则会更新已删除的记录。 + // 这里选择 Update().Where(),确保只有未软删除记录能被更新。 + // 同时显式设置 updated_at,避免二次查询带来的并发可见性问题。 + now := time.Now() + builder := r.client.APIKey.Update(). + Where(apikey.IDEQ(key.ID), apikey.DeletedAtIsNil()). + SetName(key.Name). + SetStatus(key.Status). + SetUpdatedAt(now) + if key.GroupID != nil { + builder.SetGroupID(*key.GroupID) + } else { + builder.ClearGroupID() + } + + // IP 限制字段 + if len(key.IPWhitelist) > 0 { + builder.SetIPWhitelist(key.IPWhitelist) + } else { + builder.ClearIPWhitelist() + } + if len(key.IPBlacklist) > 0 { + builder.SetIPBlacklist(key.IPBlacklist) + } else { + builder.ClearIPBlacklist() + } + + affected, err := builder.Save(ctx) + if err != nil { + return err + } + if affected == 0 { + // 更新影响行数为 0,说明记录不存在或已被软删除。 + return service.ErrAPIKeyNotFound + } + + // 使用同一时间戳回填,避免并发删除导致二次查询失败。 + key.UpdatedAt = now + return nil +} + +func (r *apiKeyRepository) Delete(ctx context.Context, id int64) error { + // 显式软删除:避免依赖 Hook 行为,确保 deleted_at 一定被设置。 + affected, err := r.client.APIKey.Update(). + Where(apikey.IDEQ(id), apikey.DeletedAtIsNil()). + SetDeletedAt(time.Now()). + Save(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return service.ErrAPIKeyNotFound + } + return err + } + if affected == 0 { + exists, err := r.client.APIKey.Query(). + Where(apikey.IDEQ(id)). + Exist(mixins.SkipSoftDelete(ctx)) + if err != nil { + return err + } + if exists { + return nil + } + return service.ErrAPIKeyNotFound + } + return nil +} + +func (r *apiKeyRepository) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + q := r.activeQuery().Where(apikey.UserIDEQ(userID)) + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + keys, err := q. + WithGroup(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(apikey.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outKeys := make([]service.APIKey, 0, len(keys)) + for i := range keys { + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) + } + + return outKeys, paginationResultFromTotal(int64(total), params), nil +} + +func (r *apiKeyRepository) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + if len(apiKeyIDs) == 0 { + return []int64{}, nil + } + + ids, err := r.client.APIKey.Query(). + Where(apikey.UserIDEQ(userID), apikey.IDIn(apiKeyIDs...), apikey.DeletedAtIsNil()). + IDs(ctx) + if err != nil { + return nil, err + } + return ids, nil +} + +func (r *apiKeyRepository) CountByUserID(ctx context.Context, userID int64) (int64, error) { + count, err := r.activeQuery().Where(apikey.UserIDEQ(userID)).Count(ctx) + return int64(count), err +} + +func (r *apiKeyRepository) ExistsByKey(ctx context.Context, key string) (bool, error) { + count, err := r.activeQuery().Where(apikey.KeyEQ(key)).Count(ctx) + return count > 0, err +} + +func (r *apiKeyRepository) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + q := r.activeQuery().Where(apikey.GroupIDEQ(groupID)) + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + keys, err := q. + WithUser(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(apikey.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outKeys := make([]service.APIKey, 0, len(keys)) + for i := range keys { + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) + } + + return outKeys, paginationResultFromTotal(int64(total), params), nil +} + +// SearchAPIKeys searches API keys by user ID and/or keyword (name) +func (r *apiKeyRepository) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.APIKey, error) { + q := r.activeQuery() + if userID > 0 { + q = q.Where(apikey.UserIDEQ(userID)) + } + + if keyword != "" { + q = q.Where(apikey.NameContainsFold(keyword)) + } + + keys, err := q.Limit(limit).Order(dbent.Desc(apikey.FieldID)).All(ctx) + if err != nil { + return nil, err + } + + outKeys := make([]service.APIKey, 0, len(keys)) + for i := range keys { + outKeys = append(outKeys, *apiKeyEntityToService(keys[i])) + } + return outKeys, nil +} + +// ClearGroupIDByGroupID 将指定分组的所有 API Key 的 group_id 设为 nil +func (r *apiKeyRepository) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + n, err := r.client.APIKey.Update(). + Where(apikey.GroupIDEQ(groupID), apikey.DeletedAtIsNil()). + ClearGroupID(). + Save(ctx) + return int64(n), err +} + +// CountByGroupID 获取分组的 API Key 数量 +func (r *apiKeyRepository) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + count, err := r.activeQuery().Where(apikey.GroupIDEQ(groupID)).Count(ctx) + return int64(count), err +} + +func (r *apiKeyRepository) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + keys, err := r.activeQuery(). + Where(apikey.UserIDEQ(userID)). + Select(apikey.FieldKey). + Strings(ctx) + if err != nil { + return nil, err + } + return keys, nil +} + +func (r *apiKeyRepository) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + keys, err := r.activeQuery(). + Where(apikey.GroupIDEQ(groupID)). + Select(apikey.FieldKey). + Strings(ctx) + if err != nil { + return nil, err + } + return keys, nil +} + +func apiKeyEntityToService(m *dbent.APIKey) *service.APIKey { + if m == nil { + return nil + } + out := &service.APIKey{ + ID: m.ID, + UserID: m.UserID, + Key: m.Key, + Name: m.Name, + Status: m.Status, + IPWhitelist: m.IPWhitelist, + IPBlacklist: m.IPBlacklist, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + GroupID: m.GroupID, + } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + return out +} + +func userEntityToService(u *dbent.User) *service.User { + if u == nil { + return nil + } + return &service.User{ + ID: u.ID, + Email: u.Email, + Username: u.Username, + Notes: u.Notes, + PasswordHash: u.PasswordHash, + Role: u.Role, + Balance: u.Balance, + Concurrency: u.Concurrency, + Status: u.Status, + CreatedAt: u.CreatedAt, + UpdatedAt: u.UpdatedAt, + } +} + +func groupEntityToService(g *dbent.Group) *service.Group { + if g == nil { + return nil + } + return &service.Group{ + ID: g.ID, + Name: g.Name, + Description: derefString(g.Description), + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + Hydrated: true, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUsd, + WeeklyLimitUSD: g.WeeklyLimitUsd, + MonthlyLimitUSD: g.MonthlyLimitUsd, + ImagePrice1K: g.ImagePrice1k, + ImagePrice2K: g.ImagePrice2k, + ImagePrice4K: g.ImagePrice4k, + DefaultValidityDays: g.DefaultValidityDays, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + } +} + +func derefString(s *string) string { + if s == nil { + return "" + } + return *s +} diff --git a/backend/internal/repository/api_key_repo_integration_test.go b/backend/internal/repository/api_key_repo_integration_test.go new file mode 100644 index 00000000..879a0576 --- /dev/null +++ b/backend/internal/repository/api_key_repo_integration_test.go @@ -0,0 +1,385 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type APIKeyRepoSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + repo *apiKeyRepository +} + +func (s *APIKeyRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.client = tx.Client() + s.repo = NewAPIKeyRepository(s.client).(*apiKeyRepository) +} + +func TestAPIKeyRepoSuite(t *testing.T) { + suite.Run(t, new(APIKeyRepoSuite)) +} + +// --- Create / GetByID / GetByKey --- + +func (s *APIKeyRepoSuite) TestCreate() { + user := s.mustCreateUser("create@test.com") + + key := &service.APIKey{ + UserID: user.ID, + Key: "sk-create-test", + Name: "Test Key", + Status: service.StatusActive, + } + + err := s.repo.Create(s.ctx, key) + s.Require().NoError(err, "Create") + s.Require().NotZero(key.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, key.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("sk-create-test", got.Key) +} + +func (s *APIKeyRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *APIKeyRepoSuite) TestGetByKey() { + user := s.mustCreateUser("getbykey@test.com") + group := s.mustCreateGroup("g-key") + + key := &service.APIKey{ + UserID: user.ID, + Key: "sk-getbykey", + Name: "My Key", + GroupID: &group.ID, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) + + got, err := s.repo.GetByKey(s.ctx, key.Key) + s.Require().NoError(err, "GetByKey") + s.Require().Equal(key.ID, got.ID) + s.Require().NotNil(got.User, "expected User preload") + s.Require().Equal(user.ID, got.User.ID) + s.Require().NotNil(got.Group, "expected Group preload") + s.Require().Equal(group.ID, got.Group.ID) +} + +func (s *APIKeyRepoSuite) TestGetByKey_NotFound() { + _, err := s.repo.GetByKey(s.ctx, "non-existent-key") + s.Require().Error(err, "expected error for non-existent key") +} + +// --- Update --- + +func (s *APIKeyRepoSuite) TestUpdate() { + user := s.mustCreateUser("update@test.com") + key := &service.APIKey{ + UserID: user.ID, + Key: "sk-update", + Name: "Original", + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) + + key.Name = "Renamed" + key.Status = service.StatusDisabled + err := s.repo.Update(s.ctx, key) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, key.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("sk-update", got.Key, "Update should not change key") + s.Require().Equal(user.ID, got.UserID, "Update should not change user_id") + s.Require().Equal("Renamed", got.Name) + s.Require().Equal(service.StatusDisabled, got.Status) +} + +func (s *APIKeyRepoSuite) TestUpdate_ClearGroupID() { + user := s.mustCreateUser("cleargroup@test.com") + group := s.mustCreateGroup("g-clear") + key := &service.APIKey{ + UserID: user.ID, + Key: "sk-clear-group", + Name: "Group Key", + GroupID: &group.ID, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) + + key.GroupID = nil + err := s.repo.Update(s.ctx, key) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, key.ID) + s.Require().NoError(err) + s.Require().Nil(got.GroupID, "expected GroupID to be cleared") +} + +// --- Delete --- + +func (s *APIKeyRepoSuite) TestDelete() { + user := s.mustCreateUser("delete@test.com") + key := &service.APIKey{ + UserID: user.ID, + Key: "sk-delete", + Name: "Delete Me", + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, key)) + + err := s.repo.Delete(s.ctx, key.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, key.ID) + s.Require().Error(err, "expected error after delete") +} + +// --- ListByUserID / CountByUserID --- + +func (s *APIKeyRepoSuite) TestListByUserID() { + user := s.mustCreateUser("listbyuser@test.com") + s.mustCreateApiKey(user.ID, "sk-list-1", "Key 1", nil) + s.mustCreateApiKey(user.ID, "sk-list-2", "Key 2", nil) + + keys, page, err := s.repo.ListByUserID(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByUserID") + s.Require().Len(keys, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *APIKeyRepoSuite) TestListByUserID_Pagination() { + user := s.mustCreateUser("paging@test.com") + for i := 0; i < 5; i++ { + s.mustCreateApiKey(user.ID, "sk-page-"+string(rune('a'+i)), "Key", nil) + } + + keys, page, err := s.repo.ListByUserID(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 2}) + s.Require().NoError(err) + s.Require().Len(keys, 2) + s.Require().Equal(int64(5), page.Total) + s.Require().Equal(3, page.Pages) +} + +func (s *APIKeyRepoSuite) TestCountByUserID() { + user := s.mustCreateUser("count@test.com") + s.mustCreateApiKey(user.ID, "sk-count-1", "K1", nil) + s.mustCreateApiKey(user.ID, "sk-count-2", "K2", nil) + + count, err := s.repo.CountByUserID(s.ctx, user.ID) + s.Require().NoError(err, "CountByUserID") + s.Require().Equal(int64(2), count) +} + +// --- ListByGroupID / CountByGroupID --- + +func (s *APIKeyRepoSuite) TestListByGroupID() { + user := s.mustCreateUser("listbygroup@test.com") + group := s.mustCreateGroup("g-list") + + s.mustCreateApiKey(user.ID, "sk-grp-1", "K1", &group.ID) + s.mustCreateApiKey(user.ID, "sk-grp-2", "K2", &group.ID) + s.mustCreateApiKey(user.ID, "sk-grp-3", "K3", nil) // no group + + keys, page, err := s.repo.ListByGroupID(s.ctx, group.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByGroupID") + s.Require().Len(keys, 2) + s.Require().Equal(int64(2), page.Total) + // User preloaded + s.Require().NotNil(keys[0].User) +} + +func (s *APIKeyRepoSuite) TestCountByGroupID() { + user := s.mustCreateUser("countgroup@test.com") + group := s.mustCreateGroup("g-count") + s.mustCreateApiKey(user.ID, "sk-gc-1", "K1", &group.ID) + + count, err := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "CountByGroupID") + s.Require().Equal(int64(1), count) +} + +// --- ExistsByKey --- + +func (s *APIKeyRepoSuite) TestExistsByKey() { + user := s.mustCreateUser("exists@test.com") + s.mustCreateApiKey(user.ID, "sk-exists", "K", nil) + + exists, err := s.repo.ExistsByKey(s.ctx, "sk-exists") + s.Require().NoError(err, "ExistsByKey") + s.Require().True(exists) + + notExists, err := s.repo.ExistsByKey(s.ctx, "sk-not-exists") + s.Require().NoError(err) + s.Require().False(notExists) +} + +// --- SearchAPIKeys --- + +func (s *APIKeyRepoSuite) TestSearchAPIKeys() { + user := s.mustCreateUser("search@test.com") + s.mustCreateApiKey(user.ID, "sk-search-1", "Production Key", nil) + s.mustCreateApiKey(user.ID, "sk-search-2", "Development Key", nil) + + found, err := s.repo.SearchAPIKeys(s.ctx, user.ID, "prod", 10) + s.Require().NoError(err, "SearchAPIKeys") + s.Require().Len(found, 1) + s.Require().Contains(found[0].Name, "Production") +} + +func (s *APIKeyRepoSuite) TestSearchAPIKeys_NoKeyword() { + user := s.mustCreateUser("searchnokw@test.com") + s.mustCreateApiKey(user.ID, "sk-nk-1", "K1", nil) + s.mustCreateApiKey(user.ID, "sk-nk-2", "K2", nil) + + found, err := s.repo.SearchAPIKeys(s.ctx, user.ID, "", 10) + s.Require().NoError(err) + s.Require().Len(found, 2) +} + +func (s *APIKeyRepoSuite) TestSearchAPIKeys_NoUserID() { + user := s.mustCreateUser("searchnouid@test.com") + s.mustCreateApiKey(user.ID, "sk-nu-1", "TestKey", nil) + + found, err := s.repo.SearchAPIKeys(s.ctx, 0, "testkey", 10) + s.Require().NoError(err) + s.Require().Len(found, 1) +} + +// --- ClearGroupIDByGroupID --- + +func (s *APIKeyRepoSuite) TestClearGroupIDByGroupID() { + user := s.mustCreateUser("cleargrp@test.com") + group := s.mustCreateGroup("g-clear-bulk") + + k1 := s.mustCreateApiKey(user.ID, "sk-clr-1", "K1", &group.ID) + k2 := s.mustCreateApiKey(user.ID, "sk-clr-2", "K2", &group.ID) + s.mustCreateApiKey(user.ID, "sk-clr-3", "K3", nil) // no group + + affected, err := s.repo.ClearGroupIDByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "ClearGroupIDByGroupID") + s.Require().Equal(int64(2), affected) + + got1, _ := s.repo.GetByID(s.ctx, k1.ID) + got2, _ := s.repo.GetByID(s.ctx, k2.ID) + s.Require().Nil(got1.GroupID) + s.Require().Nil(got2.GroupID) + + count, _ := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().Zero(count) +} + +// --- Combined CRUD/Search/ClearGroupID (original test preserved as integration) --- + +func (s *APIKeyRepoSuite) TestCRUD_Search_ClearGroupID() { + user := s.mustCreateUser("k@example.com") + group := s.mustCreateGroup("g-k") + key := s.mustCreateApiKey(user.ID, "sk-test-1", "My Key", &group.ID) + key.GroupID = &group.ID + + got, err := s.repo.GetByKey(s.ctx, key.Key) + s.Require().NoError(err, "GetByKey") + s.Require().Equal(key.ID, got.ID) + s.Require().NotNil(got.User) + s.Require().Equal(user.ID, got.User.ID) + s.Require().NotNil(got.Group) + s.Require().Equal(group.ID, got.Group.ID) + + key.Name = "Renamed" + key.Status = service.StatusDisabled + key.GroupID = nil + s.Require().NoError(s.repo.Update(s.ctx, key), "Update") + + got2, err := s.repo.GetByID(s.ctx, key.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("sk-test-1", got2.Key, "Update should not change key") + s.Require().Equal(user.ID, got2.UserID, "Update should not change user_id") + s.Require().Equal("Renamed", got2.Name) + s.Require().Equal(service.StatusDisabled, got2.Status) + s.Require().Nil(got2.GroupID) + + keys, page, err := s.repo.ListByUserID(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByUserID") + s.Require().Equal(int64(1), page.Total) + s.Require().Len(keys, 1) + + exists, err := s.repo.ExistsByKey(s.ctx, "sk-test-1") + s.Require().NoError(err, "ExistsByKey") + s.Require().True(exists, "expected key to exist") + + found, err := s.repo.SearchAPIKeys(s.ctx, user.ID, "renam", 10) + s.Require().NoError(err, "SearchAPIKeys") + s.Require().Len(found, 1) + s.Require().Equal(key.ID, found[0].ID) + + // ClearGroupIDByGroupID + k2 := s.mustCreateApiKey(user.ID, "sk-test-2", "Group Key", &group.ID) + k2.GroupID = &group.ID + + countBefore, err := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "CountByGroupID") + s.Require().Equal(int64(1), countBefore, "expected 1 key in group before clear") + + affected, err := s.repo.ClearGroupIDByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "ClearGroupIDByGroupID") + s.Require().Equal(int64(1), affected, "expected 1 affected row") + + got3, err := s.repo.GetByID(s.ctx, k2.ID) + s.Require().NoError(err, "GetByID") + s.Require().Nil(got3.GroupID, "expected GroupID cleared") + + countAfter, err := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "CountByGroupID after clear") + s.Require().Equal(int64(0), countAfter, "expected 0 keys in group after clear") +} + +func (s *APIKeyRepoSuite) mustCreateUser(email string) *service.User { + s.T().Helper() + + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + SetStatus(service.StatusActive). + SetRole(service.RoleUser). + Save(s.ctx) + s.Require().NoError(err, "create user") + return userEntityToService(u) +} + +func (s *APIKeyRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *APIKeyRepoSuite) mustCreateApiKey(userID int64, key, name string, groupID *int64) *service.APIKey { + s.T().Helper() + + k := &service.APIKey{ + UserID: userID, + Key: key, + Name: name, + GroupID: groupID, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, k), "create api key") + return k +} diff --git a/backend/internal/repository/billing_cache.go b/backend/internal/repository/billing_cache.go new file mode 100644 index 00000000..ac5803a1 --- /dev/null +++ b/backend/internal/repository/billing_cache.go @@ -0,0 +1,183 @@ +package repository + +import ( + "context" + "errors" + "fmt" + "log" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + billingBalanceKeyPrefix = "billing:balance:" + billingSubKeyPrefix = "billing:sub:" + billingCacheTTL = 5 * time.Minute +) + +// billingBalanceKey generates the Redis key for user balance cache. +func billingBalanceKey(userID int64) string { + return fmt.Sprintf("%s%d", billingBalanceKeyPrefix, userID) +} + +// billingSubKey generates the Redis key for subscription cache. +func billingSubKey(userID, groupID int64) string { + return fmt.Sprintf("%s%d:%d", billingSubKeyPrefix, userID, groupID) +} + +const ( + subFieldStatus = "status" + subFieldExpiresAt = "expires_at" + subFieldDailyUsage = "daily_usage" + subFieldWeeklyUsage = "weekly_usage" + subFieldMonthlyUsage = "monthly_usage" + subFieldVersion = "version" +) + +var ( + deductBalanceScript = redis.NewScript(` + local current = redis.call('GET', KEYS[1]) + if current == false then + return 0 + end + local newVal = tonumber(current) - tonumber(ARGV[1]) + redis.call('SET', KEYS[1], newVal) + redis.call('EXPIRE', KEYS[1], ARGV[2]) + return 1 + `) + + updateSubUsageScript = redis.NewScript(` + local exists = redis.call('EXISTS', KEYS[1]) + if exists == 0 then + return 0 + end + local cost = tonumber(ARGV[1]) + redis.call('HINCRBYFLOAT', KEYS[1], 'daily_usage', cost) + redis.call('HINCRBYFLOAT', KEYS[1], 'weekly_usage', cost) + redis.call('HINCRBYFLOAT', KEYS[1], 'monthly_usage', cost) + redis.call('EXPIRE', KEYS[1], ARGV[2]) + return 1 + `) +) + +type billingCache struct { + rdb *redis.Client +} + +func NewBillingCache(rdb *redis.Client) service.BillingCache { + return &billingCache{rdb: rdb} +} + +func (c *billingCache) GetUserBalance(ctx context.Context, userID int64) (float64, error) { + key := billingBalanceKey(userID) + val, err := c.rdb.Get(ctx, key).Result() + if err != nil { + return 0, err + } + return strconv.ParseFloat(val, 64) +} + +func (c *billingCache) SetUserBalance(ctx context.Context, userID int64, balance float64) error { + key := billingBalanceKey(userID) + return c.rdb.Set(ctx, key, balance, billingCacheTTL).Err() +} + +func (c *billingCache) DeductUserBalance(ctx context.Context, userID int64, amount float64) error { + key := billingBalanceKey(userID) + _, err := deductBalanceScript.Run(ctx, c.rdb, []string{key}, amount, int(billingCacheTTL.Seconds())).Result() + if err != nil && !errors.Is(err, redis.Nil) { + log.Printf("Warning: deduct balance cache failed for user %d: %v", userID, err) + } + return nil +} + +func (c *billingCache) InvalidateUserBalance(ctx context.Context, userID int64) error { + key := billingBalanceKey(userID) + return c.rdb.Del(ctx, key).Err() +} + +func (c *billingCache) GetSubscriptionCache(ctx context.Context, userID, groupID int64) (*service.SubscriptionCacheData, error) { + key := billingSubKey(userID, groupID) + result, err := c.rdb.HGetAll(ctx, key).Result() + if err != nil { + return nil, err + } + if len(result) == 0 { + return nil, redis.Nil + } + return c.parseSubscriptionCache(result) +} + +func (c *billingCache) parseSubscriptionCache(data map[string]string) (*service.SubscriptionCacheData, error) { + result := &service.SubscriptionCacheData{} + + result.Status = data[subFieldStatus] + if result.Status == "" { + return nil, errors.New("invalid cache: missing status") + } + + if expiresStr, ok := data[subFieldExpiresAt]; ok { + expiresAt, err := strconv.ParseInt(expiresStr, 10, 64) + if err == nil { + result.ExpiresAt = time.Unix(expiresAt, 0) + } + } + + if dailyStr, ok := data[subFieldDailyUsage]; ok { + result.DailyUsage, _ = strconv.ParseFloat(dailyStr, 64) + } + + if weeklyStr, ok := data[subFieldWeeklyUsage]; ok { + result.WeeklyUsage, _ = strconv.ParseFloat(weeklyStr, 64) + } + + if monthlyStr, ok := data[subFieldMonthlyUsage]; ok { + result.MonthlyUsage, _ = strconv.ParseFloat(monthlyStr, 64) + } + + if versionStr, ok := data[subFieldVersion]; ok { + result.Version, _ = strconv.ParseInt(versionStr, 10, 64) + } + + return result, nil +} + +func (c *billingCache) SetSubscriptionCache(ctx context.Context, userID, groupID int64, data *service.SubscriptionCacheData) error { + if data == nil { + return nil + } + + key := billingSubKey(userID, groupID) + + fields := map[string]any{ + subFieldStatus: data.Status, + subFieldExpiresAt: data.ExpiresAt.Unix(), + subFieldDailyUsage: data.DailyUsage, + subFieldWeeklyUsage: data.WeeklyUsage, + subFieldMonthlyUsage: data.MonthlyUsage, + subFieldVersion: data.Version, + } + + pipe := c.rdb.Pipeline() + pipe.HSet(ctx, key, fields) + pipe.Expire(ctx, key, billingCacheTTL) + _, err := pipe.Exec(ctx) + return err +} + +func (c *billingCache) UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, cost float64) error { + key := billingSubKey(userID, groupID) + _, err := updateSubUsageScript.Run(ctx, c.rdb, []string{key}, cost, int(billingCacheTTL.Seconds())).Result() + if err != nil && !errors.Is(err, redis.Nil) { + log.Printf("Warning: update subscription usage cache failed for user %d group %d: %v", userID, groupID, err) + } + return nil +} + +func (c *billingCache) InvalidateSubscriptionCache(ctx context.Context, userID, groupID int64) error { + key := billingSubKey(userID, groupID) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/billing_cache_integration_test.go b/backend/internal/repository/billing_cache_integration_test.go new file mode 100644 index 00000000..2f7c69a7 --- /dev/null +++ b/backend/internal/repository/billing_cache_integration_test.go @@ -0,0 +1,283 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type BillingCacheSuite struct { + IntegrationRedisSuite +} + +func (s *BillingCacheSuite) TestUserBalance() { + tests := []struct { + name string + fn func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) + }{ + { + name: "missing_key_returns_redis_nil", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + _, err := cache.GetUserBalance(ctx, 1) + require.ErrorIs(s.T(), err, redis.Nil, "expected redis.Nil for missing balance key") + }, + }, + { + name: "deduct_on_nonexistent_is_noop", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(1) + balanceKey := fmt.Sprintf("%s%d", billingBalanceKeyPrefix, userID) + + require.NoError(s.T(), cache.DeductUserBalance(ctx, userID, 1), "DeductUserBalance should not error") + + _, err := rdb.Get(ctx, balanceKey).Result() + require.ErrorIs(s.T(), err, redis.Nil, "expected missing key after deduct on non-existent") + }, + }, + { + name: "set_and_get_with_ttl", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(2) + balanceKey := fmt.Sprintf("%s%d", billingBalanceKeyPrefix, userID) + + require.NoError(s.T(), cache.SetUserBalance(ctx, userID, 10.5), "SetUserBalance") + + got, err := cache.GetUserBalance(ctx, userID) + require.NoError(s.T(), err, "GetUserBalance") + require.Equal(s.T(), 10.5, got, "balance mismatch") + + ttl, err := rdb.TTL(ctx, balanceKey).Result() + require.NoError(s.T(), err, "TTL") + s.AssertTTLWithin(ttl, 1*time.Second, billingCacheTTL) + }, + }, + { + name: "deduct_reduces_balance", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(3) + + require.NoError(s.T(), cache.SetUserBalance(ctx, userID, 10.5), "SetUserBalance") + require.NoError(s.T(), cache.DeductUserBalance(ctx, userID, 2.25), "DeductUserBalance") + + got, err := cache.GetUserBalance(ctx, userID) + require.NoError(s.T(), err, "GetUserBalance after deduct") + require.Equal(s.T(), 8.25, got, "deduct mismatch") + }, + }, + { + name: "invalidate_removes_key", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(100) + balanceKey := fmt.Sprintf("%s%d", billingBalanceKeyPrefix, userID) + + require.NoError(s.T(), cache.SetUserBalance(ctx, userID, 50.0), "SetUserBalance") + + exists, err := rdb.Exists(ctx, balanceKey).Result() + require.NoError(s.T(), err, "Exists") + require.Equal(s.T(), int64(1), exists, "expected balance key to exist") + + require.NoError(s.T(), cache.InvalidateUserBalance(ctx, userID), "InvalidateUserBalance") + + exists, err = rdb.Exists(ctx, balanceKey).Result() + require.NoError(s.T(), err, "Exists after invalidate") + require.Equal(s.T(), int64(0), exists, "expected balance key to be removed after invalidate") + + _, err = cache.GetUserBalance(ctx, userID) + require.ErrorIs(s.T(), err, redis.Nil, "expected redis.Nil after invalidate") + }, + }, + { + name: "deduct_refreshes_ttl", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(103) + balanceKey := fmt.Sprintf("%s%d", billingBalanceKeyPrefix, userID) + + require.NoError(s.T(), cache.SetUserBalance(ctx, userID, 100.0), "SetUserBalance") + + ttl1, err := rdb.TTL(ctx, balanceKey).Result() + require.NoError(s.T(), err, "TTL before deduct") + s.AssertTTLWithin(ttl1, 1*time.Second, billingCacheTTL) + + require.NoError(s.T(), cache.DeductUserBalance(ctx, userID, 25.0), "DeductUserBalance") + + balance, err := cache.GetUserBalance(ctx, userID) + require.NoError(s.T(), err, "GetUserBalance") + require.Equal(s.T(), 75.0, balance, "expected balance 75.0") + + ttl2, err := rdb.TTL(ctx, balanceKey).Result() + require.NoError(s.T(), err, "TTL after deduct") + s.AssertTTLWithin(ttl2, 1*time.Second, billingCacheTTL) + }, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + rdb := testRedis(s.T()) + cache := NewBillingCache(rdb) + ctx := context.Background() + + tt.fn(ctx, rdb, cache) + }) + } +} + +func (s *BillingCacheSuite) TestSubscriptionCache() { + tests := []struct { + name string + fn func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) + }{ + { + name: "missing_key_returns_redis_nil", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(10) + groupID := int64(20) + + _, err := cache.GetSubscriptionCache(ctx, userID, groupID) + require.ErrorIs(s.T(), err, redis.Nil, "expected redis.Nil for missing subscription key") + }, + }, + { + name: "update_usage_on_nonexistent_is_noop", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(11) + groupID := int64(21) + subKey := fmt.Sprintf("%s%d:%d", billingSubKeyPrefix, userID, groupID) + + require.NoError(s.T(), cache.UpdateSubscriptionUsage(ctx, userID, groupID, 1.0), "UpdateSubscriptionUsage should not error") + + exists, err := rdb.Exists(ctx, subKey).Result() + require.NoError(s.T(), err, "Exists") + require.Equal(s.T(), int64(0), exists, "expected missing subscription key after UpdateSubscriptionUsage on non-existent") + }, + }, + { + name: "set_and_get_with_ttl", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(12) + groupID := int64(22) + subKey := fmt.Sprintf("%s%d:%d", billingSubKeyPrefix, userID, groupID) + + data := &service.SubscriptionCacheData{ + Status: "active", + ExpiresAt: time.Now().Add(1 * time.Hour), + DailyUsage: 1.0, + WeeklyUsage: 2.0, + MonthlyUsage: 3.0, + Version: 7, + } + require.NoError(s.T(), cache.SetSubscriptionCache(ctx, userID, groupID, data), "SetSubscriptionCache") + + gotSub, err := cache.GetSubscriptionCache(ctx, userID, groupID) + require.NoError(s.T(), err, "GetSubscriptionCache") + require.Equal(s.T(), "active", gotSub.Status) + require.Equal(s.T(), int64(7), gotSub.Version) + require.Equal(s.T(), 1.0, gotSub.DailyUsage) + + ttl, err := rdb.TTL(ctx, subKey).Result() + require.NoError(s.T(), err, "TTL subKey") + s.AssertTTLWithin(ttl, 1*time.Second, billingCacheTTL) + }, + }, + { + name: "update_usage_increments_all_fields", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(13) + groupID := int64(23) + + data := &service.SubscriptionCacheData{ + Status: "active", + ExpiresAt: time.Now().Add(1 * time.Hour), + DailyUsage: 1.0, + WeeklyUsage: 2.0, + MonthlyUsage: 3.0, + Version: 1, + } + require.NoError(s.T(), cache.SetSubscriptionCache(ctx, userID, groupID, data), "SetSubscriptionCache") + + require.NoError(s.T(), cache.UpdateSubscriptionUsage(ctx, userID, groupID, 0.5), "UpdateSubscriptionUsage") + + gotSub, err := cache.GetSubscriptionCache(ctx, userID, groupID) + require.NoError(s.T(), err, "GetSubscriptionCache after update") + require.Equal(s.T(), 1.5, gotSub.DailyUsage) + require.Equal(s.T(), 2.5, gotSub.WeeklyUsage) + require.Equal(s.T(), 3.5, gotSub.MonthlyUsage) + }, + }, + { + name: "invalidate_removes_key", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(101) + groupID := int64(10) + subKey := fmt.Sprintf("%s%d:%d", billingSubKeyPrefix, userID, groupID) + + data := &service.SubscriptionCacheData{ + Status: "active", + ExpiresAt: time.Now().Add(1 * time.Hour), + DailyUsage: 1.0, + WeeklyUsage: 2.0, + MonthlyUsage: 3.0, + Version: 1, + } + require.NoError(s.T(), cache.SetSubscriptionCache(ctx, userID, groupID, data), "SetSubscriptionCache") + + exists, err := rdb.Exists(ctx, subKey).Result() + require.NoError(s.T(), err, "Exists") + require.Equal(s.T(), int64(1), exists, "expected subscription key to exist") + + require.NoError(s.T(), cache.InvalidateSubscriptionCache(ctx, userID, groupID), "InvalidateSubscriptionCache") + + exists, err = rdb.Exists(ctx, subKey).Result() + require.NoError(s.T(), err, "Exists after invalidate") + require.Equal(s.T(), int64(0), exists, "expected subscription key to be removed after invalidate") + + _, err = cache.GetSubscriptionCache(ctx, userID, groupID) + require.ErrorIs(s.T(), err, redis.Nil, "expected redis.Nil after invalidate") + }, + }, + { + name: "missing_status_returns_parsing_error", + fn: func(ctx context.Context, rdb *redis.Client, cache service.BillingCache) { + userID := int64(102) + groupID := int64(11) + subKey := fmt.Sprintf("%s%d:%d", billingSubKeyPrefix, userID, groupID) + + fields := map[string]any{ + "expires_at": time.Now().Add(1 * time.Hour).Unix(), + "daily_usage": 1.0, + "weekly_usage": 2.0, + "monthly_usage": 3.0, + "version": 1, + } + require.NoError(s.T(), rdb.HSet(ctx, subKey, fields).Err(), "HSet") + + _, err := cache.GetSubscriptionCache(ctx, userID, groupID) + require.Error(s.T(), err, "expected error for missing status field") + require.NotErrorIs(s.T(), err, redis.Nil, "expected parsing error, not redis.Nil") + require.Equal(s.T(), "invalid cache: missing status", err.Error()) + }, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + rdb := testRedis(s.T()) + cache := NewBillingCache(rdb) + ctx := context.Background() + + tt.fn(ctx, rdb, cache) + }) + } +} + +func TestBillingCacheSuite(t *testing.T) { + suite.Run(t, new(BillingCacheSuite)) +} diff --git a/backend/internal/repository/billing_cache_test.go b/backend/internal/repository/billing_cache_test.go new file mode 100644 index 00000000..7d3fd19d --- /dev/null +++ b/backend/internal/repository/billing_cache_test.go @@ -0,0 +1,87 @@ +//go:build unit + +package repository + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBillingBalanceKey(t *testing.T) { + tests := []struct { + name string + userID int64 + expected string + }{ + { + name: "normal_user_id", + userID: 123, + expected: "billing:balance:123", + }, + { + name: "zero_user_id", + userID: 0, + expected: "billing:balance:0", + }, + { + name: "negative_user_id", + userID: -1, + expected: "billing:balance:-1", + }, + { + name: "max_int64", + userID: math.MaxInt64, + expected: "billing:balance:9223372036854775807", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := billingBalanceKey(tc.userID) + require.Equal(t, tc.expected, got) + }) + } +} + +func TestBillingSubKey(t *testing.T) { + tests := []struct { + name string + userID int64 + groupID int64 + expected string + }{ + { + name: "normal_ids", + userID: 123, + groupID: 456, + expected: "billing:sub:123:456", + }, + { + name: "zero_ids", + userID: 0, + groupID: 0, + expected: "billing:sub:0:0", + }, + { + name: "negative_ids", + userID: -1, + groupID: -2, + expected: "billing:sub:-1:-2", + }, + { + name: "max_int64_ids", + userID: math.MaxInt64, + groupID: math.MaxInt64, + expected: "billing:sub:9223372036854775807:9223372036854775807", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := billingSubKey(tc.userID, tc.groupID) + require.Equal(t, tc.expected, got) + }) + } +} diff --git a/backend/internal/repository/claude_oauth_service.go b/backend/internal/repository/claude_oauth_service.go new file mode 100644 index 00000000..677fce52 --- /dev/null +++ b/backend/internal/repository/claude_oauth_service.go @@ -0,0 +1,248 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/oauth" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/util/logredact" + + "github.com/imroc/req/v3" +) + +func NewClaudeOAuthClient() service.ClaudeOAuthClient { + return &claudeOAuthService{ + baseURL: "https://claude.ai", + tokenURL: oauth.TokenURL, + clientFactory: createReqClient, + } +} + +type claudeOAuthService struct { + baseURL string + tokenURL string + clientFactory func(proxyURL string) *req.Client +} + +func (s *claudeOAuthService) GetOrganizationUUID(ctx context.Context, sessionKey, proxyURL string) (string, error) { + client := s.clientFactory(proxyURL) + + var orgs []struct { + UUID string `json:"uuid"` + } + + targetURL := s.baseURL + "/api/organizations" + log.Printf("[OAuth] Step 1: Getting organization UUID from %s", targetURL) + + resp, err := client.R(). + SetContext(ctx). + SetCookies(&http.Cookie{ + Name: "sessionKey", + Value: sessionKey, + }). + SetSuccessResult(&orgs). + Get(targetURL) + + if err != nil { + log.Printf("[OAuth] Step 1 FAILED - Request error: %v", err) + return "", fmt.Errorf("request failed: %w", err) + } + + log.Printf("[OAuth] Step 1 Response - Status: %d", resp.StatusCode) + + if !resp.IsSuccessState() { + return "", fmt.Errorf("failed to get organizations: status %d, body: %s", resp.StatusCode, resp.String()) + } + + if len(orgs) == 0 { + return "", fmt.Errorf("no organizations found") + } + + log.Printf("[OAuth] Step 1 SUCCESS - Got org UUID: %s", orgs[0].UUID) + return orgs[0].UUID, nil +} + +func (s *claudeOAuthService) GetAuthorizationCode(ctx context.Context, sessionKey, orgUUID, scope, codeChallenge, state, proxyURL string) (string, error) { + client := s.clientFactory(proxyURL) + + authURL := fmt.Sprintf("%s/v1/oauth/%s/authorize", s.baseURL, orgUUID) + + reqBody := map[string]any{ + "response_type": "code", + "client_id": oauth.ClientID, + "organization_uuid": orgUUID, + "redirect_uri": oauth.RedirectURI, + "scope": scope, + "state": state, + "code_challenge": codeChallenge, + "code_challenge_method": "S256", + } + + log.Printf("[OAuth] Step 2: Getting authorization code from %s", authURL) + reqBodyJSON, _ := json.Marshal(logredact.RedactMap(reqBody)) + log.Printf("[OAuth] Step 2 Request Body: %s", string(reqBodyJSON)) + + var result struct { + RedirectURI string `json:"redirect_uri"` + } + + resp, err := client.R(). + SetContext(ctx). + SetCookies(&http.Cookie{ + Name: "sessionKey", + Value: sessionKey, + }). + SetHeader("Accept", "application/json"). + SetHeader("Accept-Language", "en-US,en;q=0.9"). + SetHeader("Cache-Control", "no-cache"). + SetHeader("Origin", "https://claude.ai"). + SetHeader("Referer", "https://claude.ai/new"). + SetHeader("Content-Type", "application/json"). + SetBody(reqBody). + SetSuccessResult(&result). + Post(authURL) + + if err != nil { + log.Printf("[OAuth] Step 2 FAILED - Request error: %v", err) + return "", fmt.Errorf("request failed: %w", err) + } + + log.Printf("[OAuth] Step 2 Response - Status: %d, Body: %s", resp.StatusCode, logredact.RedactJSON(resp.Bytes())) + + if !resp.IsSuccessState() { + return "", fmt.Errorf("failed to get authorization code: status %d, body: %s", resp.StatusCode, resp.String()) + } + + if result.RedirectURI == "" { + return "", fmt.Errorf("no redirect_uri in response") + } + + parsedURL, err := url.Parse(result.RedirectURI) + if err != nil { + return "", fmt.Errorf("failed to parse redirect_uri: %w", err) + } + + queryParams := parsedURL.Query() + authCode := queryParams.Get("code") + responseState := queryParams.Get("state") + + if authCode == "" { + return "", fmt.Errorf("no authorization code in redirect_uri") + } + + fullCode := authCode + if responseState != "" { + fullCode = authCode + "#" + responseState + } + + log.Printf("[OAuth] Step 2 SUCCESS - Got authorization code") + return fullCode, nil +} + +func (s *claudeOAuthService) ExchangeCodeForToken(ctx context.Context, code, codeVerifier, state, proxyURL string, isSetupToken bool) (*oauth.TokenResponse, error) { + client := s.clientFactory(proxyURL) + + // Parse code which may contain state in format "authCode#state" + authCode := code + codeState := "" + if idx := strings.Index(code, "#"); idx != -1 { + authCode = code[:idx] + codeState = code[idx+1:] + } + + reqBody := map[string]any{ + "code": authCode, + "grant_type": "authorization_code", + "client_id": oauth.ClientID, + "redirect_uri": oauth.RedirectURI, + "code_verifier": codeVerifier, + } + + if codeState != "" { + reqBody["state"] = codeState + } + + // Setup token requires longer expiration (1 year) + if isSetupToken { + reqBody["expires_in"] = 31536000 // 365 * 24 * 60 * 60 seconds + } + + log.Printf("[OAuth] Step 3: Exchanging code for token at %s", s.tokenURL) + reqBodyJSON, _ := json.Marshal(logredact.RedactMap(reqBody)) + log.Printf("[OAuth] Step 3 Request Body: %s", string(reqBodyJSON)) + + var tokenResp oauth.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetHeader("Content-Type", "application/json"). + SetBody(reqBody). + SetSuccessResult(&tokenResp). + Post(s.tokenURL) + + if err != nil { + log.Printf("[OAuth] Step 3 FAILED - Request error: %v", err) + return nil, fmt.Errorf("request failed: %w", err) + } + + log.Printf("[OAuth] Step 3 Response - Status: %d, Body: %s", resp.StatusCode, logredact.RedactJSON(resp.Bytes())) + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token exchange failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + log.Printf("[OAuth] Step 3 SUCCESS - Got access token") + return &tokenResp, nil +} + +func (s *claudeOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*oauth.TokenResponse, error) { + client := s.clientFactory(proxyURL) + + // 使用 JSON 格式(与 ExchangeCodeForToken 保持一致) + // Anthropic OAuth API 期望 JSON 格式的请求体 + reqBody := map[string]any{ + "grant_type": "refresh_token", + "refresh_token": refreshToken, + "client_id": oauth.ClientID, + } + + var tokenResp oauth.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetHeader("Content-Type", "application/json"). + SetBody(reqBody). + SetSuccessResult(&tokenResp). + Post(s.tokenURL) + + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token refresh failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + return &tokenResp, nil +} + +func createReqClient(proxyURL string) *req.Client { + // 禁用 CookieJar,确保每次授权都是干净的会话 + client := req.C(). + SetTimeout(60 * time.Second). + ImpersonateChrome(). + SetCookieJar(nil) // 禁用 CookieJar + + if strings.TrimSpace(proxyURL) != "" { + client.SetProxyURL(strings.TrimSpace(proxyURL)) + } + + return client +} diff --git a/backend/internal/repository/claude_oauth_service_test.go b/backend/internal/repository/claude_oauth_service_test.go new file mode 100644 index 00000000..a7f76056 --- /dev/null +++ b/backend/internal/repository/claude_oauth_service_test.go @@ -0,0 +1,396 @@ +package repository + +import ( + "context" + "encoding/json" + "io" + "net/http" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/oauth" + "github.com/imroc/req/v3" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type ClaudeOAuthServiceSuite struct { + suite.Suite + client *claudeOAuthService +} + +// requestCapture holds captured request data for assertions in the main goroutine. +type requestCapture struct { + path string + method string + cookies []*http.Cookie + body []byte + bodyJSON map[string]any + contentType string +} + +func newTestReqClient(rt http.RoundTripper) *req.Client { + c := req.C() + c.GetClient().Transport = rt + return c +} + +func (s *ClaudeOAuthServiceSuite) TestGetOrganizationUUID() { + tests := []struct { + name string + handler http.HandlerFunc + wantErr bool + errContain string + wantUUID string + validate func(captured requestCapture) + }{ + { + name: "success", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte(`[{"uuid":"org-1"}]`)) + }, + wantUUID: "org-1", + validate: func(captured requestCapture) { + require.Equal(s.T(), "/api/organizations", captured.path, "unexpected path") + require.Len(s.T(), captured.cookies, 1, "expected 1 cookie") + require.Equal(s.T(), "sessionKey", captured.cookies[0].Name) + require.Equal(s.T(), "sess", captured.cookies[0].Value) + }, + }, + { + name: "non_200_returns_error", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte("unauthorized")) + }, + wantErr: true, + errContain: "401", + }, + { + name: "invalid_json_returns_error", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write([]byte("not-json")) + }, + wantErr: true, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + var captured requestCapture + + rt := newInProcessTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + captured.path = r.URL.Path + captured.cookies = r.Cookies() + tt.handler(w, r) + }), nil) + + client, ok := NewClaudeOAuthClient().(*claudeOAuthService) + require.True(s.T(), ok, "type assertion failed") + s.client = client + s.client.baseURL = "http://in-process" + s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) } + + got, err := s.client.GetOrganizationUUID(context.Background(), "sess", "") + + if tt.wantErr { + require.Error(s.T(), err) + if tt.errContain != "" { + require.ErrorContains(s.T(), err, tt.errContain) + } + return + } + + require.NoError(s.T(), err) + require.Equal(s.T(), tt.wantUUID, got) + if tt.validate != nil { + tt.validate(captured) + } + }) + } +} + +func (s *ClaudeOAuthServiceSuite) TestGetAuthorizationCode() { + tests := []struct { + name string + handler http.HandlerFunc + wantErr bool + wantCode string + validate func(captured requestCapture) + }{ + { + name: "parses_redirect_uri", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]string{ + "redirect_uri": oauth.RedirectURI + "?code=AUTH&state=STATE", + }) + }, + wantCode: "AUTH#STATE", + validate: func(captured requestCapture) { + require.True(s.T(), strings.HasPrefix(captured.path, "/v1/oauth/") && strings.HasSuffix(captured.path, "/authorize"), "unexpected path: %s", captured.path) + require.Equal(s.T(), http.MethodPost, captured.method, "expected POST") + require.Len(s.T(), captured.cookies, 1, "expected 1 cookie") + require.Equal(s.T(), "sess", captured.cookies[0].Value) + require.Equal(s.T(), "org-1", captured.bodyJSON["organization_uuid"]) + require.Equal(s.T(), oauth.ClientID, captured.bodyJSON["client_id"]) + require.Equal(s.T(), oauth.RedirectURI, captured.bodyJSON["redirect_uri"]) + require.Equal(s.T(), "st", captured.bodyJSON["state"]) + }, + }, + { + name: "missing_code_returns_error", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(map[string]string{ + "redirect_uri": oauth.RedirectURI + "?state=STATE", // no code + }) + }, + wantErr: true, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + var captured requestCapture + + rt := newInProcessTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + captured.path = r.URL.Path + captured.method = r.Method + captured.cookies = r.Cookies() + captured.body, _ = io.ReadAll(r.Body) + _ = json.Unmarshal(captured.body, &captured.bodyJSON) + tt.handler(w, r) + }), nil) + + client, ok := NewClaudeOAuthClient().(*claudeOAuthService) + require.True(s.T(), ok, "type assertion failed") + s.client = client + s.client.baseURL = "http://in-process" + s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) } + + code, err := s.client.GetAuthorizationCode(context.Background(), "sess", "org-1", oauth.ScopeProfile, "cc", "st", "") + + if tt.wantErr { + require.Error(s.T(), err) + return + } + + require.NoError(s.T(), err) + require.Equal(s.T(), tt.wantCode, code) + if tt.validate != nil { + tt.validate(captured) + } + }) + } +} + +func (s *ClaudeOAuthServiceSuite) TestExchangeCodeForToken() { + tests := []struct { + name string + handler http.HandlerFunc + code string + isSetupToken bool + wantErr bool + wantResp *oauth.TokenResponse + validate func(captured requestCapture) + }{ + { + name: "sends_state_when_embedded", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(oauth.TokenResponse{ + AccessToken: "at", + TokenType: "bearer", + ExpiresIn: 3600, + RefreshToken: "rt", + Scope: "s", + }) + }, + code: "AUTH#STATE2", + isSetupToken: false, + wantResp: &oauth.TokenResponse{ + AccessToken: "at", + RefreshToken: "rt", + }, + validate: func(captured requestCapture) { + require.Equal(s.T(), http.MethodPost, captured.method, "expected POST") + require.True(s.T(), strings.HasPrefix(captured.contentType, "application/json"), "unexpected content-type") + require.Equal(s.T(), "AUTH", captured.bodyJSON["code"]) + require.Equal(s.T(), "STATE2", captured.bodyJSON["state"]) + require.Equal(s.T(), oauth.ClientID, captured.bodyJSON["client_id"]) + require.Equal(s.T(), oauth.RedirectURI, captured.bodyJSON["redirect_uri"]) + require.Equal(s.T(), "ver", captured.bodyJSON["code_verifier"]) + // Regular OAuth should not include expires_in + require.Nil(s.T(), captured.bodyJSON["expires_in"], "regular OAuth should not include expires_in") + }, + }, + { + name: "setup_token_includes_expires_in", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(oauth.TokenResponse{ + AccessToken: "at", + TokenType: "bearer", + ExpiresIn: 31536000, + }) + }, + code: "AUTH", + isSetupToken: true, + wantResp: &oauth.TokenResponse{ + AccessToken: "at", + }, + validate: func(captured requestCapture) { + // Setup token should include expires_in with 1 year value + require.Equal(s.T(), float64(31536000), captured.bodyJSON["expires_in"], + "setup token should include expires_in: 31536000") + }, + }, + { + name: "non_200_returns_error", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte("bad request")) + }, + code: "AUTH", + isSetupToken: false, + wantErr: true, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + var captured requestCapture + + rt := newInProcessTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + captured.method = r.Method + captured.contentType = r.Header.Get("Content-Type") + captured.body, _ = io.ReadAll(r.Body) + _ = json.Unmarshal(captured.body, &captured.bodyJSON) + tt.handler(w, r) + }), nil) + + client, ok := NewClaudeOAuthClient().(*claudeOAuthService) + require.True(s.T(), ok, "type assertion failed") + s.client = client + s.client.tokenURL = "http://in-process/token" + s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) } + + resp, err := s.client.ExchangeCodeForToken(context.Background(), tt.code, "ver", "", "", tt.isSetupToken) + + if tt.wantErr { + require.Error(s.T(), err) + return + } + + require.NoError(s.T(), err) + require.Equal(s.T(), tt.wantResp.AccessToken, resp.AccessToken) + require.Equal(s.T(), tt.wantResp.RefreshToken, resp.RefreshToken) + if tt.validate != nil { + tt.validate(captured) + } + }) + } +} + +func (s *ClaudeOAuthServiceSuite) TestRefreshToken() { + tests := []struct { + name string + handler http.HandlerFunc + wantErr bool + wantResp *oauth.TokenResponse + validate func(captured requestCapture) + }{ + { + name: "sends_json_format", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(oauth.TokenResponse{ + AccessToken: "new_access_token", + TokenType: "bearer", + ExpiresIn: 28800, + RefreshToken: "new_refresh_token", + Scope: "user:profile user:inference", + }) + }, + wantResp: &oauth.TokenResponse{ + AccessToken: "new_access_token", + RefreshToken: "new_refresh_token", + }, + validate: func(captured requestCapture) { + require.Equal(s.T(), http.MethodPost, captured.method, "expected POST") + // 验证使用 JSON 格式(不是 form 格式) + require.True(s.T(), strings.HasPrefix(captured.contentType, "application/json"), + "expected JSON content-type, got: %s", captured.contentType) + // 验证 JSON body 内容 + require.Equal(s.T(), "refresh_token", captured.bodyJSON["grant_type"]) + require.Equal(s.T(), "rt", captured.bodyJSON["refresh_token"]) + require.Equal(s.T(), oauth.ClientID, captured.bodyJSON["client_id"]) + }, + }, + { + name: "returns_new_refresh_token", + handler: func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(oauth.TokenResponse{ + AccessToken: "at", + TokenType: "bearer", + ExpiresIn: 28800, + RefreshToken: "rotated_rt", // Anthropic rotates refresh tokens + }) + }, + wantResp: &oauth.TokenResponse{ + AccessToken: "at", + RefreshToken: "rotated_rt", + }, + }, + { + name: "non_200_returns_error", + handler: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = w.Write([]byte(`{"error":"invalid_grant"}`)) + }, + wantErr: true, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + var captured requestCapture + + rt := newInProcessTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + captured.method = r.Method + captured.contentType = r.Header.Get("Content-Type") + captured.body, _ = io.ReadAll(r.Body) + _ = json.Unmarshal(captured.body, &captured.bodyJSON) + tt.handler(w, r) + }), nil) + + client, ok := NewClaudeOAuthClient().(*claudeOAuthService) + require.True(s.T(), ok, "type assertion failed") + s.client = client + s.client.tokenURL = "http://in-process/token" + s.client.clientFactory = func(string) *req.Client { return newTestReqClient(rt) } + + resp, err := s.client.RefreshToken(context.Background(), "rt", "") + + if tt.wantErr { + require.Error(s.T(), err) + return + } + + require.NoError(s.T(), err) + require.Equal(s.T(), tt.wantResp.AccessToken, resp.AccessToken) + require.Equal(s.T(), tt.wantResp.RefreshToken, resp.RefreshToken) + if tt.validate != nil { + tt.validate(captured) + } + }) + } +} + +func TestClaudeOAuthServiceSuite(t *testing.T) { + suite.Run(t, new(ClaudeOAuthServiceSuite)) +} diff --git a/backend/internal/repository/claude_usage_service.go b/backend/internal/repository/claude_usage_service.go new file mode 100644 index 00000000..4c87b2de --- /dev/null +++ b/backend/internal/repository/claude_usage_service.go @@ -0,0 +1,62 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +const defaultClaudeUsageURL = "https://api.anthropic.com/api/oauth/usage" + +type claudeUsageService struct { + usageURL string + allowPrivateHosts bool +} + +func NewClaudeUsageFetcher() service.ClaudeUsageFetcher { + return &claudeUsageService{usageURL: defaultClaudeUsageURL} +} + +func (s *claudeUsageService) FetchUsage(ctx context.Context, accessToken, proxyURL string) (*service.ClaudeUsageResponse, error) { + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: proxyURL, + Timeout: 30 * time.Second, + ValidateResolvedIP: true, + AllowPrivateHosts: s.allowPrivateHosts, + }) + if err != nil { + client = &http.Client{Timeout: 30 * time.Second} + } + + req, err := http.NewRequestWithContext(ctx, "GET", s.usageURL, nil) + if err != nil { + return nil, fmt.Errorf("create request failed: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("anthropic-beta", "oauth-2025-04-20") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(body)) + } + + var usageResp service.ClaudeUsageResponse + if err := json.NewDecoder(resp.Body).Decode(&usageResp); err != nil { + return nil, fmt.Errorf("decode response failed: %w", err) + } + + return &usageResp, nil +} diff --git a/backend/internal/repository/claude_usage_service_test.go b/backend/internal/repository/claude_usage_service_test.go new file mode 100644 index 00000000..2e10f3e5 --- /dev/null +++ b/backend/internal/repository/claude_usage_service_test.go @@ -0,0 +1,117 @@ +package repository + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type ClaudeUsageServiceSuite struct { + suite.Suite + srv *httptest.Server + fetcher *claudeUsageService +} + +func (s *ClaudeUsageServiceSuite) TearDownTest() { + if s.srv != nil { + s.srv.Close() + s.srv = nil + } +} + +// usageRequestCapture holds captured request data for assertions in the main goroutine. +type usageRequestCapture struct { + authorization string + anthropicBeta string +} + +func (s *ClaudeUsageServiceSuite) TestFetchUsage_Success() { + var captured usageRequestCapture + + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + captured.authorization = r.Header.Get("Authorization") + captured.anthropicBeta = r.Header.Get("anthropic-beta") + + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{ + "five_hour": {"utilization": 12.5, "resets_at": "2025-01-01T00:00:00Z"}, + "seven_day": {"utilization": 34.0, "resets_at": "2025-01-08T00:00:00Z"}, + "seven_day_sonnet": {"utilization": 56.0, "resets_at": "2025-01-08T00:00:00Z"} +}`) + })) + + s.fetcher = &claudeUsageService{ + usageURL: s.srv.URL, + allowPrivateHosts: true, + } + + resp, err := s.fetcher.FetchUsage(context.Background(), "at", "://bad-proxy-url") + require.NoError(s.T(), err, "FetchUsage") + require.Equal(s.T(), 12.5, resp.FiveHour.Utilization, "FiveHour utilization mismatch") + require.Equal(s.T(), 34.0, resp.SevenDay.Utilization, "SevenDay utilization mismatch") + require.Equal(s.T(), 56.0, resp.SevenDaySonnet.Utilization, "SevenDaySonnet utilization mismatch") + + // Assertions on captured request data + require.Equal(s.T(), "Bearer at", captured.authorization, "Authorization header mismatch") + require.Equal(s.T(), "oauth-2025-04-20", captured.anthropicBeta, "anthropic-beta header mismatch") +} + +func (s *ClaudeUsageServiceSuite) TestFetchUsage_NonOK() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = io.WriteString(w, "nope") + })) + + s.fetcher = &claudeUsageService{ + usageURL: s.srv.URL, + allowPrivateHosts: true, + } + + _, err := s.fetcher.FetchUsage(context.Background(), "at", "") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "status 401") + require.ErrorContains(s.T(), err, "nope") +} + +func (s *ClaudeUsageServiceSuite) TestFetchUsage_BadJSON() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, "not-json") + })) + + s.fetcher = &claudeUsageService{ + usageURL: s.srv.URL, + allowPrivateHosts: true, + } + + _, err := s.fetcher.FetchUsage(context.Background(), "at", "") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "decode response failed") +} + +func (s *ClaudeUsageServiceSuite) TestFetchUsage_ContextCancel() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Never respond - simulate slow server + <-r.Context().Done() + })) + + s.fetcher = &claudeUsageService{ + usageURL: s.srv.URL, + allowPrivateHosts: true, + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + _, err := s.fetcher.FetchUsage(ctx, "at", "") + require.Error(s.T(), err, "expected error for cancelled context") +} + +func TestClaudeUsageServiceSuite(t *testing.T) { + suite.Run(t, new(ClaudeUsageServiceSuite)) +} diff --git a/backend/internal/repository/concurrency_cache.go b/backend/internal/repository/concurrency_cache.go new file mode 100644 index 00000000..b34961e1 --- /dev/null +++ b/backend/internal/repository/concurrency_cache.go @@ -0,0 +1,391 @@ +package repository + +import ( + "context" + "errors" + "fmt" + "strconv" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +// 并发控制缓存常量定义 +// +// 性能优化说明: +// 原实现使用 SCAN 命令遍历独立的槽位键(concurrency:account:{id}:{requestID}), +// 在高并发场景下 SCAN 需要多次往返,且遍历大量键时性能下降明显。 +// +// 新实现改用 Redis 有序集合(Sorted Set): +// 1. 每个账号/用户只有一个键,成员为 requestID,分数为时间戳 +// 2. 使用 ZCARD 原子获取并发数,时间复杂度 O(1) +// 3. 使用 ZREMRANGEBYSCORE 清理过期槽位,避免手动管理 TTL +// 4. 单次 Redis 调用完成计数,减少网络往返 +const ( + // 并发槽位键前缀(有序集合) + // 格式: concurrency:account:{accountID} + accountSlotKeyPrefix = "concurrency:account:" + // 格式: concurrency:user:{userID} + userSlotKeyPrefix = "concurrency:user:" + // 等待队列计数器格式: concurrency:wait:{userID} + waitQueueKeyPrefix = "concurrency:wait:" + // 账号级等待队列计数器格式: wait:account:{accountID} + accountWaitKeyPrefix = "wait:account:" + + // 默认槽位过期时间(分钟),可通过配置覆盖 + defaultSlotTTLMinutes = 15 +) + +var ( + // acquireScript 使用有序集合计数并在未达上限时添加槽位 + // 使用 Redis TIME 命令获取服务器时间,避免多实例时钟不同步问题 + // KEYS[1] = 有序集合键 (concurrency:account:{id} / concurrency:user:{id}) + // ARGV[1] = maxConcurrency + // ARGV[2] = TTL(秒) + // ARGV[3] = requestID + acquireScript = redis.NewScript(` + local key = KEYS[1] + local maxConcurrency = tonumber(ARGV[1]) + local ttl = tonumber(ARGV[2]) + local requestID = ARGV[3] + + -- 使用 Redis 服务器时间,确保多实例时钟一致 + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - ttl + + -- 清理过期槽位 + redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + + -- 检查是否已存在(支持重试场景刷新时间戳) + local exists = redis.call('ZSCORE', key, requestID) + if exists ~= false then + redis.call('ZADD', key, now, requestID) + redis.call('EXPIRE', key, ttl) + return 1 + end + + -- 检查是否达到并发上限 + local count = redis.call('ZCARD', key) + if count < maxConcurrency then + redis.call('ZADD', key, now, requestID) + redis.call('EXPIRE', key, ttl) + return 1 + end + + return 0 + `) + + // getCountScript 统计有序集合中的槽位数量并清理过期条目 + // 使用 Redis TIME 命令获取服务器时间 + // KEYS[1] = 有序集合键 + // ARGV[1] = TTL(秒) + getCountScript = redis.NewScript(` + local key = KEYS[1] + local ttl = tonumber(ARGV[1]) + + -- 使用 Redis 服务器时间 + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - ttl + + redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + return redis.call('ZCARD', key) + `) + + // incrementWaitScript - refreshes TTL on each increment to keep queue depth accurate + // KEYS[1] = wait queue key + // ARGV[1] = maxWait + // ARGV[2] = TTL in seconds + incrementWaitScript = redis.NewScript(` + local current = redis.call('GET', KEYS[1]) + if current == false then + current = 0 + else + current = tonumber(current) + end + + if current >= tonumber(ARGV[1]) then + return 0 + end + + local newVal = redis.call('INCR', KEYS[1]) + + -- Refresh TTL so long-running traffic doesn't expire active queue counters. + redis.call('EXPIRE', KEYS[1], ARGV[2]) + + return 1 + `) + + // incrementAccountWaitScript - account-level wait queue count (refresh TTL on each increment) + incrementAccountWaitScript = redis.NewScript(` + local current = redis.call('GET', KEYS[1]) + if current == false then + current = 0 + else + current = tonumber(current) + end + + if current >= tonumber(ARGV[1]) then + return 0 + end + + local newVal = redis.call('INCR', KEYS[1]) + + -- Refresh TTL so long-running traffic doesn't expire active queue counters. + redis.call('EXPIRE', KEYS[1], ARGV[2]) + + return 1 + `) + + // decrementWaitScript - same as before + decrementWaitScript = redis.NewScript(` + local current = redis.call('GET', KEYS[1]) + if current ~= false and tonumber(current) > 0 then + redis.call('DECR', KEYS[1]) + end + return 1 + `) + + // getAccountsLoadBatchScript - batch load query with expired slot cleanup + // ARGV[1] = slot TTL (seconds) + // ARGV[2..n] = accountID1, maxConcurrency1, accountID2, maxConcurrency2, ... + getAccountsLoadBatchScript = redis.NewScript(` + local result = {} + local slotTTL = tonumber(ARGV[1]) + + -- Get current server time + local timeResult = redis.call('TIME') + local nowSeconds = tonumber(timeResult[1]) + local cutoffTime = nowSeconds - slotTTL + + local i = 2 + while i <= #ARGV do + local accountID = ARGV[i] + local maxConcurrency = tonumber(ARGV[i + 1]) + + local slotKey = 'concurrency:account:' .. accountID + + -- Clean up expired slots before counting + redis.call('ZREMRANGEBYSCORE', slotKey, '-inf', cutoffTime) + local currentConcurrency = redis.call('ZCARD', slotKey) + + local waitKey = 'wait:account:' .. accountID + local waitingCount = redis.call('GET', waitKey) + if waitingCount == false then + waitingCount = 0 + else + waitingCount = tonumber(waitingCount) + end + + local loadRate = 0 + if maxConcurrency > 0 then + loadRate = math.floor((currentConcurrency + waitingCount) * 100 / maxConcurrency) + end + + table.insert(result, accountID) + table.insert(result, currentConcurrency) + table.insert(result, waitingCount) + table.insert(result, loadRate) + + i = i + 2 + end + + return result + `) + + // cleanupExpiredSlotsScript - remove expired slots + // KEYS[1] = concurrency:account:{accountID} + // ARGV[1] = TTL (seconds) + cleanupExpiredSlotsScript = redis.NewScript(` + local key = KEYS[1] + local ttl = tonumber(ARGV[1]) + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - ttl + return redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + `) +) + +type concurrencyCache struct { + rdb *redis.Client + slotTTLSeconds int // 槽位过期时间(秒) + waitQueueTTLSeconds int // 等待队列过期时间(秒) +} + +// NewConcurrencyCache 创建并发控制缓存 +// slotTTLMinutes: 槽位过期时间(分钟),0 或负数使用默认值 15 分钟 +// waitQueueTTLSeconds: 等待队列过期时间(秒),0 或负数使用 slot TTL +func NewConcurrencyCache(rdb *redis.Client, slotTTLMinutes int, waitQueueTTLSeconds int) service.ConcurrencyCache { + if slotTTLMinutes <= 0 { + slotTTLMinutes = defaultSlotTTLMinutes + } + if waitQueueTTLSeconds <= 0 { + waitQueueTTLSeconds = slotTTLMinutes * 60 + } + return &concurrencyCache{ + rdb: rdb, + slotTTLSeconds: slotTTLMinutes * 60, + waitQueueTTLSeconds: waitQueueTTLSeconds, + } +} + +// Helper functions for key generation +func accountSlotKey(accountID int64) string { + return fmt.Sprintf("%s%d", accountSlotKeyPrefix, accountID) +} + +func userSlotKey(userID int64) string { + return fmt.Sprintf("%s%d", userSlotKeyPrefix, userID) +} + +func waitQueueKey(userID int64) string { + return fmt.Sprintf("%s%d", waitQueueKeyPrefix, userID) +} + +func accountWaitKey(accountID int64) string { + return fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) +} + +// Account slot operations + +func (c *concurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + key := accountSlotKey(accountID) + // 时间戳在 Lua 脚本内使用 Redis TIME 命令获取,确保多实例时钟一致 + result, err := acquireScript.Run(ctx, c.rdb, []string{key}, maxConcurrency, c.slotTTLSeconds, requestID).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +func (c *concurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error { + key := accountSlotKey(accountID) + return c.rdb.ZRem(ctx, key, requestID).Err() +} + +func (c *concurrencyCache) GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) { + key := accountSlotKey(accountID) + // 时间戳在 Lua 脚本内使用 Redis TIME 命令获取 + result, err := getCountScript.Run(ctx, c.rdb, []string{key}, c.slotTTLSeconds).Int() + if err != nil { + return 0, err + } + return result, nil +} + +// User slot operations + +func (c *concurrencyCache) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) { + key := userSlotKey(userID) + // 时间戳在 Lua 脚本内使用 Redis TIME 命令获取,确保多实例时钟一致 + result, err := acquireScript.Run(ctx, c.rdb, []string{key}, maxConcurrency, c.slotTTLSeconds, requestID).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +func (c *concurrencyCache) ReleaseUserSlot(ctx context.Context, userID int64, requestID string) error { + key := userSlotKey(userID) + return c.rdb.ZRem(ctx, key, requestID).Err() +} + +func (c *concurrencyCache) GetUserConcurrency(ctx context.Context, userID int64) (int, error) { + key := userSlotKey(userID) + // 时间戳在 Lua 脚本内使用 Redis TIME 命令获取 + result, err := getCountScript.Run(ctx, c.rdb, []string{key}, c.slotTTLSeconds).Int() + if err != nil { + return 0, err + } + return result, nil +} + +// Wait queue operations + +func (c *concurrencyCache) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + key := waitQueueKey(userID) + result, err := incrementWaitScript.Run(ctx, c.rdb, []string{key}, maxWait, c.waitQueueTTLSeconds).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +func (c *concurrencyCache) DecrementWaitCount(ctx context.Context, userID int64) error { + key := waitQueueKey(userID) + _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() + return err +} + +// Account wait queue operations + +func (c *concurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + key := accountWaitKey(accountID) + result, err := incrementAccountWaitScript.Run(ctx, c.rdb, []string{key}, maxWait, c.waitQueueTTLSeconds).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +func (c *concurrencyCache) DecrementAccountWaitCount(ctx context.Context, accountID int64) error { + key := accountWaitKey(accountID) + _, err := decrementWaitScript.Run(ctx, c.rdb, []string{key}).Result() + return err +} + +func (c *concurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + key := accountWaitKey(accountID) + val, err := c.rdb.Get(ctx, key).Int() + if err != nil && !errors.Is(err, redis.Nil) { + return 0, err + } + if errors.Is(err, redis.Nil) { + return 0, nil + } + return val, nil +} + +func (c *concurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []service.AccountWithConcurrency) (map[int64]*service.AccountLoadInfo, error) { + if len(accounts) == 0 { + return map[int64]*service.AccountLoadInfo{}, nil + } + + args := []any{c.slotTTLSeconds} + for _, acc := range accounts { + args = append(args, acc.ID, acc.MaxConcurrency) + } + + result, err := getAccountsLoadBatchScript.Run(ctx, c.rdb, []string{}, args...).Slice() + if err != nil { + return nil, err + } + + loadMap := make(map[int64]*service.AccountLoadInfo) + for i := 0; i < len(result); i += 4 { + if i+3 >= len(result) { + break + } + + accountID, _ := strconv.ParseInt(fmt.Sprintf("%v", result[i]), 10, 64) + currentConcurrency, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+1])) + waitingCount, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+2])) + loadRate, _ := strconv.Atoi(fmt.Sprintf("%v", result[i+3])) + + loadMap[accountID] = &service.AccountLoadInfo{ + AccountID: accountID, + CurrentConcurrency: currentConcurrency, + WaitingCount: waitingCount, + LoadRate: loadRate, + } + } + + return loadMap, nil +} + +func (c *concurrencyCache) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + key := accountSlotKey(accountID) + _, err := cleanupExpiredSlotsScript.Run(ctx, c.rdb, []string{key}, c.slotTTLSeconds).Result() + return err +} diff --git a/backend/internal/repository/concurrency_cache_benchmark_test.go b/backend/internal/repository/concurrency_cache_benchmark_test.go new file mode 100644 index 00000000..25697ab1 --- /dev/null +++ b/backend/internal/repository/concurrency_cache_benchmark_test.go @@ -0,0 +1,135 @@ +package repository + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/redis/go-redis/v9" +) + +// 基准测试用 TTL 配置 +const benchSlotTTLMinutes = 15 + +var benchSlotTTL = time.Duration(benchSlotTTLMinutes) * time.Minute + +// BenchmarkAccountConcurrency 用于对比 SCAN 与有序集合的计数性能。 +func BenchmarkAccountConcurrency(b *testing.B) { + rdb := newBenchmarkRedisClient(b) + defer func() { + _ = rdb.Close() + }() + + cache, _ := NewConcurrencyCache(rdb, benchSlotTTLMinutes, int(benchSlotTTL.Seconds())).(*concurrencyCache) + ctx := context.Background() + + for _, size := range []int{10, 100, 1000} { + size := size + b.Run(fmt.Sprintf("zset/slots=%d", size), func(b *testing.B) { + accountID := time.Now().UnixNano() + key := accountSlotKey(accountID) + + b.StopTimer() + members := make([]redis.Z, 0, size) + now := float64(time.Now().Unix()) + for i := 0; i < size; i++ { + members = append(members, redis.Z{ + Score: now, + Member: fmt.Sprintf("req_%d", i), + }) + } + if err := rdb.ZAdd(ctx, key, members...).Err(); err != nil { + b.Fatalf("初始化有序集合失败: %v", err) + } + if err := rdb.Expire(ctx, key, benchSlotTTL).Err(); err != nil { + b.Fatalf("设置有序集合 TTL 失败: %v", err) + } + b.StartTimer() + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if _, err := cache.GetAccountConcurrency(ctx, accountID); err != nil { + b.Fatalf("获取并发数量失败: %v", err) + } + } + + b.StopTimer() + if err := rdb.Del(ctx, key).Err(); err != nil { + b.Fatalf("清理有序集合失败: %v", err) + } + }) + + b.Run(fmt.Sprintf("scan/slots=%d", size), func(b *testing.B) { + accountID := time.Now().UnixNano() + pattern := fmt.Sprintf("%s%d:*", accountSlotKeyPrefix, accountID) + keys := make([]string, 0, size) + + b.StopTimer() + pipe := rdb.Pipeline() + for i := 0; i < size; i++ { + key := fmt.Sprintf("%s%d:req_%d", accountSlotKeyPrefix, accountID, i) + keys = append(keys, key) + pipe.Set(ctx, key, "1", benchSlotTTL) + } + if _, err := pipe.Exec(ctx); err != nil { + b.Fatalf("初始化扫描键失败: %v", err) + } + b.StartTimer() + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if _, err := scanSlotCount(ctx, rdb, pattern); err != nil { + b.Fatalf("SCAN 计数失败: %v", err) + } + } + + b.StopTimer() + if err := rdb.Del(ctx, keys...).Err(); err != nil { + b.Fatalf("清理扫描键失败: %v", err) + } + }) + } +} + +func scanSlotCount(ctx context.Context, rdb *redis.Client, pattern string) (int, error) { + var cursor uint64 + count := 0 + for { + keys, nextCursor, err := rdb.Scan(ctx, cursor, pattern, 100).Result() + if err != nil { + return 0, err + } + count += len(keys) + if nextCursor == 0 { + break + } + cursor = nextCursor + } + return count, nil +} + +func newBenchmarkRedisClient(b *testing.B) *redis.Client { + b.Helper() + + redisURL := os.Getenv("TEST_REDIS_URL") + if redisURL == "" { + b.Skip("未设置 TEST_REDIS_URL,跳过 Redis 基准测试") + } + + opt, err := redis.ParseURL(redisURL) + if err != nil { + b.Fatalf("解析 TEST_REDIS_URL 失败: %v", err) + } + + client := redis.NewClient(opt) + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + if err := client.Ping(ctx).Err(); err != nil { + b.Fatalf("Redis 连接失败: %v", err) + } + + return client +} diff --git a/backend/internal/repository/concurrency_cache_integration_test.go b/backend/internal/repository/concurrency_cache_integration_test.go new file mode 100644 index 00000000..5983c832 --- /dev/null +++ b/backend/internal/repository/concurrency_cache_integration_test.go @@ -0,0 +1,412 @@ +//go:build integration + +package repository + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// 测试用 TTL 配置(15 分钟,与默认值一致) +const testSlotTTLMinutes = 15 + +// 测试用 TTL Duration,用于 TTL 断言 +var testSlotTTL = time.Duration(testSlotTTLMinutes) * time.Minute + +type ConcurrencyCacheSuite struct { + IntegrationRedisSuite + cache service.ConcurrencyCache +} + +func (s *ConcurrencyCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewConcurrencyCache(s.rdb, testSlotTTLMinutes, int(testSlotTTL.Seconds())) +} + +func (s *ConcurrencyCacheSuite) TestAccountSlot_AcquireAndRelease() { + accountID := int64(10) + reqID1, reqID2, reqID3 := "req1", "req2", "req3" + + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 2, reqID1) + require.NoError(s.T(), err, "AcquireAccountSlot 1") + require.True(s.T(), ok) + + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 2, reqID2) + require.NoError(s.T(), err, "AcquireAccountSlot 2") + require.True(s.T(), ok) + + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 2, reqID3) + require.NoError(s.T(), err, "AcquireAccountSlot 3") + require.False(s.T(), ok, "expected third acquire to fail") + + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err, "GetAccountConcurrency") + require.Equal(s.T(), 2, cur, "concurrency mismatch") + + require.NoError(s.T(), s.cache.ReleaseAccountSlot(s.ctx, accountID, reqID1), "ReleaseAccountSlot") + + cur, err = s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err, "GetAccountConcurrency after release") + require.Equal(s.T(), 1, cur, "expected 1 after release") +} + +func (s *ConcurrencyCacheSuite) TestAccountSlot_TTL() { + accountID := int64(11) + reqID := "req_ttl_test" + slotKey := fmt.Sprintf("%s%d", accountSlotKeyPrefix, accountID) + + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, reqID) + require.NoError(s.T(), err, "AcquireAccountSlot") + require.True(s.T(), ok) + + ttl, err := s.rdb.TTL(s.ctx, slotKey).Result() + require.NoError(s.T(), err, "TTL") + s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) +} + +func (s *ConcurrencyCacheSuite) TestAccountSlot_DuplicateReqID() { + accountID := int64(12) + reqID := "dup-req" + + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 2, reqID) + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Acquiring with same reqID should be idempotent + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 2, reqID) + require.NoError(s.T(), err) + require.True(s.T(), ok) + + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 1, cur, "expected concurrency=1 (idempotent)") +} + +func (s *ConcurrencyCacheSuite) TestAccountSlot_ReleaseIdempotent() { + accountID := int64(13) + reqID := "release-test" + + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 1, reqID) + require.NoError(s.T(), err) + require.True(s.T(), ok) + + require.NoError(s.T(), s.cache.ReleaseAccountSlot(s.ctx, accountID, reqID), "ReleaseAccountSlot") + // Releasing again should not error + require.NoError(s.T(), s.cache.ReleaseAccountSlot(s.ctx, accountID, reqID), "ReleaseAccountSlot again") + // Releasing non-existent should not error + require.NoError(s.T(), s.cache.ReleaseAccountSlot(s.ctx, accountID, "non-existent"), "ReleaseAccountSlot non-existent") + + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 0, cur) +} + +func (s *ConcurrencyCacheSuite) TestAccountSlot_MaxZero() { + accountID := int64(14) + reqID := "max-zero-test" + + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 0, reqID) + require.NoError(s.T(), err) + require.False(s.T(), ok, "expected acquire to fail with max=0") +} + +func (s *ConcurrencyCacheSuite) TestUserSlot_AcquireAndRelease() { + userID := int64(42) + reqID1, reqID2 := "req1", "req2" + + ok, err := s.cache.AcquireUserSlot(s.ctx, userID, 1, reqID1) + require.NoError(s.T(), err, "AcquireUserSlot") + require.True(s.T(), ok) + + ok, err = s.cache.AcquireUserSlot(s.ctx, userID, 1, reqID2) + require.NoError(s.T(), err, "AcquireUserSlot 2") + require.False(s.T(), ok, "expected second acquire to fail at max=1") + + cur, err := s.cache.GetUserConcurrency(s.ctx, userID) + require.NoError(s.T(), err, "GetUserConcurrency") + require.Equal(s.T(), 1, cur, "expected concurrency=1") + + require.NoError(s.T(), s.cache.ReleaseUserSlot(s.ctx, userID, reqID1), "ReleaseUserSlot") + // Releasing a non-existent slot should not error + require.NoError(s.T(), s.cache.ReleaseUserSlot(s.ctx, userID, "non-existent"), "ReleaseUserSlot non-existent") + + cur, err = s.cache.GetUserConcurrency(s.ctx, userID) + require.NoError(s.T(), err, "GetUserConcurrency after release") + require.Equal(s.T(), 0, cur, "expected concurrency=0 after release") +} + +func (s *ConcurrencyCacheSuite) TestUserSlot_TTL() { + userID := int64(200) + reqID := "req_ttl_test" + slotKey := fmt.Sprintf("%s%d", userSlotKeyPrefix, userID) + + ok, err := s.cache.AcquireUserSlot(s.ctx, userID, 5, reqID) + require.NoError(s.T(), err, "AcquireUserSlot") + require.True(s.T(), ok) + + ttl, err := s.rdb.TTL(s.ctx, slotKey).Result() + require.NoError(s.T(), err, "TTL") + s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) +} + +func (s *ConcurrencyCacheSuite) TestWaitQueue_IncrementAndDecrement() { + userID := int64(20) + waitKey := fmt.Sprintf("%s%d", waitQueueKeyPrefix, userID) + + ok, err := s.cache.IncrementWaitCount(s.ctx, userID, 2) + require.NoError(s.T(), err, "IncrementWaitCount 1") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementWaitCount(s.ctx, userID, 2) + require.NoError(s.T(), err, "IncrementWaitCount 2") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementWaitCount(s.ctx, userID, 2) + require.NoError(s.T(), err, "IncrementWaitCount 3") + require.False(s.T(), ok, "expected wait increment over max to fail") + + ttl, err := s.rdb.TTL(s.ctx, waitKey).Result() + require.NoError(s.T(), err, "TTL waitKey") + s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) + + require.NoError(s.T(), s.cache.DecrementWaitCount(s.ctx, userID), "DecrementWaitCount") + + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.Equal(s.T(), 1, val, "expected wait count 1") +} + +func (s *ConcurrencyCacheSuite) TestWaitQueue_DecrementNoNegative() { + userID := int64(300) + waitKey := fmt.Sprintf("%s%d", waitQueueKeyPrefix, userID) + + // Test decrement on non-existent key - should not error and should not create negative value + require.NoError(s.T(), s.cache.DecrementWaitCount(s.ctx, userID), "DecrementWaitCount on non-existent key") + + // Verify no key was created or it's not negative + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.GreaterOrEqual(s.T(), val, 0, "expected non-negative wait count after decrement on empty") + + // Set count to 1, then decrement twice + ok, err := s.cache.IncrementWaitCount(s.ctx, userID, 5) + require.NoError(s.T(), err, "IncrementWaitCount") + require.True(s.T(), ok) + + // Decrement once (1 -> 0) + require.NoError(s.T(), s.cache.DecrementWaitCount(s.ctx, userID), "DecrementWaitCount") + + // Decrement again on 0 - should not go negative + require.NoError(s.T(), s.cache.DecrementWaitCount(s.ctx, userID), "DecrementWaitCount on zero") + + // Verify count is 0, not negative + val, err = s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey after double decrement") + } + require.GreaterOrEqual(s.T(), val, 0, "expected non-negative wait count") +} + +func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_IncrementAndDecrement() { + accountID := int64(30) + waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) + + ok, err := s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 1") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 2") + require.True(s.T(), ok) + + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, accountID, 2) + require.NoError(s.T(), err, "IncrementAccountWaitCount 3") + require.False(s.T(), ok, "expected account wait increment over max to fail") + + ttl, err := s.rdb.TTL(s.ctx, waitKey).Result() + require.NoError(s.T(), err, "TTL account waitKey") + s.AssertTTLWithin(ttl, 1*time.Second, testSlotTTL) + + require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount") + + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.Equal(s.T(), 1, val, "expected account wait count 1") +} + +func (s *ConcurrencyCacheSuite) TestAccountWaitQueue_DecrementNoNegative() { + accountID := int64(301) + waitKey := fmt.Sprintf("%s%d", accountWaitKeyPrefix, accountID) + + require.NoError(s.T(), s.cache.DecrementAccountWaitCount(s.ctx, accountID), "DecrementAccountWaitCount on non-existent key") + + val, err := s.rdb.Get(s.ctx, waitKey).Int() + if !errors.Is(err, redis.Nil) { + require.NoError(s.T(), err, "Get waitKey") + } + require.GreaterOrEqual(s.T(), val, 0, "expected non-negative account wait count after decrement on empty") +} + +func (s *ConcurrencyCacheSuite) TestGetAccountConcurrency_Missing() { + // When no slots exist, GetAccountConcurrency should return 0 + cur, err := s.cache.GetAccountConcurrency(s.ctx, 999) + require.NoError(s.T(), err) + require.Equal(s.T(), 0, cur) +} + +func (s *ConcurrencyCacheSuite) TestGetUserConcurrency_Missing() { + // When no slots exist, GetUserConcurrency should return 0 + cur, err := s.cache.GetUserConcurrency(s.ctx, 999) + require.NoError(s.T(), err) + require.Equal(s.T(), 0, cur) +} + +func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch() { + s.T().Skip("TODO: Fix this test - CurrentConcurrency returns 0 instead of expected value in CI") + // Setup: Create accounts with different load states + account1 := int64(100) + account2 := int64(101) + account3 := int64(102) + + // Account 1: 2/3 slots used, 1 waiting + ok, err := s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, account1, 3, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.IncrementAccountWaitCount(s.ctx, account1, 5) + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Account 2: 1/2 slots used, 0 waiting + ok, err = s.cache.AcquireAccountSlot(s.ctx, account2, 2, "req3") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Account 3: 0/1 slots used, 0 waiting (idle) + + // Query batch load + accounts := []service.AccountWithConcurrency{ + {ID: account1, MaxConcurrency: 3}, + {ID: account2, MaxConcurrency: 2}, + {ID: account3, MaxConcurrency: 1}, + } + + loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, accounts) + require.NoError(s.T(), err) + require.Len(s.T(), loadMap, 3) + + // Verify account1: (2 + 1) / 3 = 100% + load1 := loadMap[account1] + require.NotNil(s.T(), load1) + require.Equal(s.T(), account1, load1.AccountID) + require.Equal(s.T(), 2, load1.CurrentConcurrency) + require.Equal(s.T(), 1, load1.WaitingCount) + require.Equal(s.T(), 100, load1.LoadRate) + + // Verify account2: (1 + 0) / 2 = 50% + load2 := loadMap[account2] + require.NotNil(s.T(), load2) + require.Equal(s.T(), account2, load2.AccountID) + require.Equal(s.T(), 1, load2.CurrentConcurrency) + require.Equal(s.T(), 0, load2.WaitingCount) + require.Equal(s.T(), 50, load2.LoadRate) + + // Verify account3: (0 + 0) / 1 = 0% + load3 := loadMap[account3] + require.NotNil(s.T(), load3) + require.Equal(s.T(), account3, load3.AccountID) + require.Equal(s.T(), 0, load3.CurrentConcurrency) + require.Equal(s.T(), 0, load3.WaitingCount) + require.Equal(s.T(), 0, load3.LoadRate) +} + +func (s *ConcurrencyCacheSuite) TestGetAccountsLoadBatch_Empty() { + // Test with empty account list + loadMap, err := s.cache.GetAccountsLoadBatch(s.ctx, []service.AccountWithConcurrency{}) + require.NoError(s.T(), err) + require.Empty(s.T(), loadMap) +} + +func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots() { + accountID := int64(200) + slotKey := fmt.Sprintf("%s%d", accountSlotKeyPrefix, accountID) + + // Acquire 3 slots + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req3") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Verify 3 slots exist + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 3, cur) + + // Manually set old timestamps for req1 and req2 (simulate expired slots) + now := time.Now().Unix() + expiredTime := now - int64(testSlotTTL.Seconds()) - 10 // 10 seconds past TTL + err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req1"}).Err() + require.NoError(s.T(), err) + err = s.rdb.ZAdd(s.ctx, slotKey, redis.Z{Score: float64(expiredTime), Member: "req2"}).Err() + require.NoError(s.T(), err) + + // Run cleanup + err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) + require.NoError(s.T(), err) + + // Verify only 1 slot remains (req3) + cur, err = s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 1, cur) + + // Verify req3 still exists + members, err := s.rdb.ZRange(s.ctx, slotKey, 0, -1).Result() + require.NoError(s.T(), err) + require.Len(s.T(), members, 1) + require.Equal(s.T(), "req3", members[0]) +} + +func (s *ConcurrencyCacheSuite) TestCleanupExpiredAccountSlots_NoExpired() { + accountID := int64(201) + + // Acquire 2 fresh slots + ok, err := s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req1") + require.NoError(s.T(), err) + require.True(s.T(), ok) + ok, err = s.cache.AcquireAccountSlot(s.ctx, accountID, 5, "req2") + require.NoError(s.T(), err) + require.True(s.T(), ok) + + // Run cleanup (should not remove anything) + err = s.cache.CleanupExpiredAccountSlots(s.ctx, accountID) + require.NoError(s.T(), err) + + // Verify both slots still exist + cur, err := s.cache.GetAccountConcurrency(s.ctx, accountID) + require.NoError(s.T(), err) + require.Equal(s.T(), 2, cur) +} + +func TestConcurrencyCacheSuite(t *testing.T) { + suite.Run(t, new(ConcurrencyCacheSuite)) +} diff --git a/backend/internal/repository/dashboard_aggregation_repo.go b/backend/internal/repository/dashboard_aggregation_repo.go new file mode 100644 index 00000000..3543e061 --- /dev/null +++ b/backend/internal/repository/dashboard_aggregation_repo.go @@ -0,0 +1,392 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +type dashboardAggregationRepository struct { + sql sqlExecutor +} + +// NewDashboardAggregationRepository 创建仪表盘预聚合仓储。 +func NewDashboardAggregationRepository(sqlDB *sql.DB) service.DashboardAggregationRepository { + if sqlDB == nil { + return nil + } + if !isPostgresDriver(sqlDB) { + log.Printf("[DashboardAggregation] 检测到非 PostgreSQL 驱动,已自动禁用预聚合") + return nil + } + return newDashboardAggregationRepositoryWithSQL(sqlDB) +} + +func newDashboardAggregationRepositoryWithSQL(sqlq sqlExecutor) *dashboardAggregationRepository { + return &dashboardAggregationRepository{sql: sqlq} +} + +func isPostgresDriver(db *sql.DB) bool { + if db == nil { + return false + } + _, ok := db.Driver().(*pq.Driver) + return ok +} + +func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, start, end time.Time) error { + loc := timezone.Location() + startLocal := start.In(loc) + endLocal := end.In(loc) + if !endLocal.After(startLocal) { + return nil + } + + hourStart := startLocal.Truncate(time.Hour) + hourEnd := endLocal.Truncate(time.Hour) + if endLocal.After(hourEnd) { + hourEnd = hourEnd.Add(time.Hour) + } + + dayStart := truncateToDay(startLocal) + dayEnd := truncateToDay(endLocal) + if endLocal.After(dayEnd) { + dayEnd = dayEnd.Add(24 * time.Hour) + } + + // 以桶边界聚合,允许覆盖 end 所在桶的剩余区间。 + if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil { + return err + } + return nil +} + +func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + var ts time.Time + query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1" + if err := scanSingleRow(ctx, r.sql, query, nil, &ts); err != nil { + if err == sql.ErrNoRows { + return time.Unix(0, 0).UTC(), nil + } + return time.Time{}, err + } + return ts.UTC(), nil +} + +func (r *dashboardAggregationRepository) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + query := ` + INSERT INTO usage_dashboard_aggregation_watermark (id, last_aggregated_at, updated_at) + VALUES (1, $1, NOW()) + ON CONFLICT (id) + DO UPDATE SET last_aggregated_at = EXCLUDED.last_aggregated_at, updated_at = EXCLUDED.updated_at + ` + _, err := r.sql.ExecContext(ctx, query, aggregatedAt.UTC()) + return err +} + +func (r *dashboardAggregationRepository) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + hourlyCutoffUTC := hourlyCutoff.UTC() + dailyCutoffUTC := dailyCutoff.UTC() + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start < $1", hourlyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start < $1", hourlyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil { + return err + } + return nil +} + +func (r *dashboardAggregationRepository) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + isPartitioned, err := r.isUsageLogsPartitioned(ctx) + if err != nil { + return err + } + if isPartitioned { + return r.dropUsageLogsPartitions(ctx, cutoff) + } + _, err = r.sql.ExecContext(ctx, "DELETE FROM usage_logs WHERE created_at < $1", cutoff.UTC()) + return err +} + +func (r *dashboardAggregationRepository) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + isPartitioned, err := r.isUsageLogsPartitioned(ctx) + if err != nil || !isPartitioned { + return err + } + monthStart := truncateToMonthUTC(now) + prevMonth := monthStart.AddDate(0, -1, 0) + nextMonth := monthStart.AddDate(0, 1, 0) + + for _, m := range []time.Time{prevMonth, monthStart, nextMonth} { + if err := r.createUsageLogsPartition(ctx, m); err != nil { + return err + } + } + return nil +} + +func (r *dashboardAggregationRepository) insertHourlyActiveUsers(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + INSERT INTO usage_dashboard_hourly_users (bucket_start, user_id) + SELECT DISTINCT + date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start, + user_id + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ON CONFLICT DO NOTHING + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) insertDailyActiveUsers(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + INSERT INTO usage_dashboard_daily_users (bucket_date, user_id) + SELECT DISTINCT + (bucket_start AT TIME ZONE $3)::date AS bucket_date, + user_id + FROM usage_dashboard_hourly_users + WHERE bucket_start >= $1 AND bucket_start < $2 + ON CONFLICT DO NOTHING + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + WITH hourly AS ( + SELECT + date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start, + COUNT(*) AS total_requests, + COALESCE(SUM(input_tokens), 0) AS input_tokens, + COALESCE(SUM(output_tokens), 0) AS output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens, + COALESCE(SUM(total_cost), 0) AS total_cost, + COALESCE(SUM(actual_cost), 0) AS actual_cost, + COALESCE(SUM(COALESCE(duration_ms, 0)), 0) AS total_duration_ms + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + GROUP BY 1 + ), + user_counts AS ( + SELECT bucket_start, COUNT(*) AS active_users + FROM usage_dashboard_hourly_users + WHERE bucket_start >= $1 AND bucket_start < $2 + GROUP BY bucket_start + ) + INSERT INTO usage_dashboard_hourly ( + bucket_start, + total_requests, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + total_cost, + actual_cost, + total_duration_ms, + active_users, + computed_at + ) + SELECT + hourly.bucket_start, + hourly.total_requests, + hourly.input_tokens, + hourly.output_tokens, + hourly.cache_creation_tokens, + hourly.cache_read_tokens, + hourly.total_cost, + hourly.actual_cost, + hourly.total_duration_ms, + COALESCE(user_counts.active_users, 0) AS active_users, + NOW() + FROM hourly + LEFT JOIN user_counts ON user_counts.bucket_start = hourly.bucket_start + ON CONFLICT (bucket_start) + DO UPDATE SET + total_requests = EXCLUDED.total_requests, + input_tokens = EXCLUDED.input_tokens, + output_tokens = EXCLUDED.output_tokens, + cache_creation_tokens = EXCLUDED.cache_creation_tokens, + cache_read_tokens = EXCLUDED.cache_read_tokens, + total_cost = EXCLUDED.total_cost, + actual_cost = EXCLUDED.actual_cost, + total_duration_ms = EXCLUDED.total_duration_ms, + active_users = EXCLUDED.active_users, + computed_at = EXCLUDED.computed_at + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + WITH daily AS ( + SELECT + (bucket_start AT TIME ZONE $5)::date AS bucket_date, + COALESCE(SUM(total_requests), 0) AS total_requests, + COALESCE(SUM(input_tokens), 0) AS input_tokens, + COALESCE(SUM(output_tokens), 0) AS output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens, + COALESCE(SUM(total_cost), 0) AS total_cost, + COALESCE(SUM(actual_cost), 0) AS actual_cost, + COALESCE(SUM(total_duration_ms), 0) AS total_duration_ms + FROM usage_dashboard_hourly + WHERE bucket_start >= $1 AND bucket_start < $2 + GROUP BY (bucket_start AT TIME ZONE $5)::date + ), + user_counts AS ( + SELECT bucket_date, COUNT(*) AS active_users + FROM usage_dashboard_daily_users + WHERE bucket_date >= $3::date AND bucket_date < $4::date + GROUP BY bucket_date + ) + INSERT INTO usage_dashboard_daily ( + bucket_date, + total_requests, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + total_cost, + actual_cost, + total_duration_ms, + active_users, + computed_at + ) + SELECT + daily.bucket_date, + daily.total_requests, + daily.input_tokens, + daily.output_tokens, + daily.cache_creation_tokens, + daily.cache_read_tokens, + daily.total_cost, + daily.actual_cost, + daily.total_duration_ms, + COALESCE(user_counts.active_users, 0) AS active_users, + NOW() + FROM daily + LEFT JOIN user_counts ON user_counts.bucket_date = daily.bucket_date + ON CONFLICT (bucket_date) + DO UPDATE SET + total_requests = EXCLUDED.total_requests, + input_tokens = EXCLUDED.input_tokens, + output_tokens = EXCLUDED.output_tokens, + cache_creation_tokens = EXCLUDED.cache_creation_tokens, + cache_read_tokens = EXCLUDED.cache_read_tokens, + total_cost = EXCLUDED.total_cost, + actual_cost = EXCLUDED.actual_cost, + total_duration_ms = EXCLUDED.total_duration_ms, + active_users = EXCLUDED.active_users, + computed_at = EXCLUDED.computed_at + ` + _, err := r.sql.ExecContext(ctx, query, start, end, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) isUsageLogsPartitioned(ctx context.Context) (bool, error) { + query := ` + SELECT EXISTS( + SELECT 1 + FROM pg_partitioned_table pt + JOIN pg_class c ON c.oid = pt.partrelid + WHERE c.relname = 'usage_logs' + ) + ` + var partitioned bool + if err := scanSingleRow(ctx, r.sql, query, nil, &partitioned); err != nil { + return false, err + } + return partitioned, nil +} + +func (r *dashboardAggregationRepository) dropUsageLogsPartitions(ctx context.Context, cutoff time.Time) error { + rows, err := r.sql.QueryContext(ctx, ` + SELECT c.relname + FROM pg_inherits + JOIN pg_class c ON c.oid = pg_inherits.inhrelid + JOIN pg_class p ON p.oid = pg_inherits.inhparent + WHERE p.relname = 'usage_logs' + `) + if err != nil { + return err + } + defer func() { + _ = rows.Close() + }() + + cutoffMonth := truncateToMonthUTC(cutoff) + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return err + } + if !strings.HasPrefix(name, "usage_logs_") { + continue + } + suffix := strings.TrimPrefix(name, "usage_logs_") + month, err := time.Parse("200601", suffix) + if err != nil { + continue + } + month = month.UTC() + if month.Before(cutoffMonth) { + if _, err := r.sql.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", pq.QuoteIdentifier(name))); err != nil { + return err + } + } + } + return rows.Err() +} + +func (r *dashboardAggregationRepository) createUsageLogsPartition(ctx context.Context, month time.Time) error { + monthStart := truncateToMonthUTC(month) + nextMonth := monthStart.AddDate(0, 1, 0) + name := fmt.Sprintf("usage_logs_%s", monthStart.Format("200601")) + query := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s PARTITION OF usage_logs FOR VALUES FROM (%s) TO (%s)", + pq.QuoteIdentifier(name), + pq.QuoteLiteral(monthStart.Format("2006-01-02")), + pq.QuoteLiteral(nextMonth.Format("2006-01-02")), + ) + _, err := r.sql.ExecContext(ctx, query) + return err +} + +func truncateToDay(t time.Time) time.Time { + return timezone.StartOfDay(t) +} + +func truncateToMonthUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC) +} diff --git a/backend/internal/repository/dashboard_cache.go b/backend/internal/repository/dashboard_cache.go new file mode 100644 index 00000000..f996cd68 --- /dev/null +++ b/backend/internal/repository/dashboard_cache.go @@ -0,0 +1,58 @@ +package repository + +import ( + "context" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const dashboardStatsCacheKey = "dashboard:stats:v1" + +type dashboardCache struct { + rdb *redis.Client + keyPrefix string +} + +func NewDashboardCache(rdb *redis.Client, cfg *config.Config) service.DashboardStatsCache { + prefix := "sub2api:" + if cfg != nil { + prefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix) + } + if prefix != "" && !strings.HasSuffix(prefix, ":") { + prefix += ":" + } + return &dashboardCache{ + rdb: rdb, + keyPrefix: prefix, + } +} + +func (c *dashboardCache) GetDashboardStats(ctx context.Context) (string, error) { + val, err := c.rdb.Get(ctx, c.buildKey()).Result() + if err != nil { + if err == redis.Nil { + return "", service.ErrDashboardStatsCacheMiss + } + return "", err + } + return val, nil +} + +func (c *dashboardCache) SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error { + return c.rdb.Set(ctx, c.buildKey(), data, ttl).Err() +} + +func (c *dashboardCache) buildKey() string { + if c.keyPrefix == "" { + return dashboardStatsCacheKey + } + return c.keyPrefix + dashboardStatsCacheKey +} + +func (c *dashboardCache) DeleteDashboardStats(ctx context.Context) error { + return c.rdb.Del(ctx, c.buildKey()).Err() +} diff --git a/backend/internal/repository/dashboard_cache_test.go b/backend/internal/repository/dashboard_cache_test.go new file mode 100644 index 00000000..3bb0da4f --- /dev/null +++ b/backend/internal/repository/dashboard_cache_test.go @@ -0,0 +1,28 @@ +package repository + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestNewDashboardCacheKeyPrefix(t *testing.T) { + cache := NewDashboardCache(nil, &config.Config{ + Dashboard: config.DashboardCacheConfig{ + KeyPrefix: "prod", + }, + }) + impl, ok := cache.(*dashboardCache) + require.True(t, ok) + require.Equal(t, "prod:", impl.keyPrefix) + + cache = NewDashboardCache(nil, &config.Config{ + Dashboard: config.DashboardCacheConfig{ + KeyPrefix: "staging:", + }, + }) + impl, ok = cache.(*dashboardCache) + require.True(t, ok) + require.Equal(t, "staging:", impl.keyPrefix) +} diff --git a/backend/internal/repository/db_pool.go b/backend/internal/repository/db_pool.go new file mode 100644 index 00000000..d7116ab1 --- /dev/null +++ b/backend/internal/repository/db_pool.go @@ -0,0 +1,32 @@ +package repository + +import ( + "database/sql" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +type dbPoolSettings struct { + MaxOpenConns int + MaxIdleConns int + ConnMaxLifetime time.Duration + ConnMaxIdleTime time.Duration +} + +func buildDBPoolSettings(cfg *config.Config) dbPoolSettings { + return dbPoolSettings{ + MaxOpenConns: cfg.Database.MaxOpenConns, + MaxIdleConns: cfg.Database.MaxIdleConns, + ConnMaxLifetime: time.Duration(cfg.Database.ConnMaxLifetimeMinutes) * time.Minute, + ConnMaxIdleTime: time.Duration(cfg.Database.ConnMaxIdleTimeMinutes) * time.Minute, + } +} + +func applyDBPoolSettings(db *sql.DB, cfg *config.Config) { + settings := buildDBPoolSettings(cfg) + db.SetMaxOpenConns(settings.MaxOpenConns) + db.SetMaxIdleConns(settings.MaxIdleConns) + db.SetConnMaxLifetime(settings.ConnMaxLifetime) + db.SetConnMaxIdleTime(settings.ConnMaxIdleTime) +} diff --git a/backend/internal/repository/db_pool_test.go b/backend/internal/repository/db_pool_test.go new file mode 100644 index 00000000..3868106a --- /dev/null +++ b/backend/internal/repository/db_pool_test.go @@ -0,0 +1,50 @@ +package repository + +import ( + "database/sql" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" + + _ "github.com/lib/pq" +) + +func TestBuildDBPoolSettings(t *testing.T) { + cfg := &config.Config{ + Database: config.DatabaseConfig{ + MaxOpenConns: 50, + MaxIdleConns: 10, + ConnMaxLifetimeMinutes: 30, + ConnMaxIdleTimeMinutes: 5, + }, + } + + settings := buildDBPoolSettings(cfg) + require.Equal(t, 50, settings.MaxOpenConns) + require.Equal(t, 10, settings.MaxIdleConns) + require.Equal(t, 30*time.Minute, settings.ConnMaxLifetime) + require.Equal(t, 5*time.Minute, settings.ConnMaxIdleTime) +} + +func TestApplyDBPoolSettings(t *testing.T) { + cfg := &config.Config{ + Database: config.DatabaseConfig{ + MaxOpenConns: 40, + MaxIdleConns: 8, + ConnMaxLifetimeMinutes: 15, + ConnMaxIdleTimeMinutes: 3, + }, + } + + db, err := sql.Open("postgres", "host=127.0.0.1 port=5432 user=postgres sslmode=disable") + require.NoError(t, err) + t.Cleanup(func() { + _ = db.Close() + }) + + applyDBPoolSettings(db, cfg) + stats := db.Stats() + require.Equal(t, 40, stats.MaxOpenConnections) +} diff --git a/backend/internal/repository/email_cache.go b/backend/internal/repository/email_cache.go new file mode 100644 index 00000000..e00e35dd --- /dev/null +++ b/backend/internal/repository/email_cache.go @@ -0,0 +1,52 @@ +package repository + +import ( + "context" + "encoding/json" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const verifyCodeKeyPrefix = "verify_code:" + +// verifyCodeKey generates the Redis key for email verification code. +func verifyCodeKey(email string) string { + return verifyCodeKeyPrefix + email +} + +type emailCache struct { + rdb *redis.Client +} + +func NewEmailCache(rdb *redis.Client) service.EmailCache { + return &emailCache{rdb: rdb} +} + +func (c *emailCache) GetVerificationCode(ctx context.Context, email string) (*service.VerificationCodeData, error) { + key := verifyCodeKey(email) + val, err := c.rdb.Get(ctx, key).Result() + if err != nil { + return nil, err + } + var data service.VerificationCodeData + if err := json.Unmarshal([]byte(val), &data); err != nil { + return nil, err + } + return &data, nil +} + +func (c *emailCache) SetVerificationCode(ctx context.Context, email string, data *service.VerificationCodeData, ttl time.Duration) error { + key := verifyCodeKey(email) + val, err := json.Marshal(data) + if err != nil { + return err + } + return c.rdb.Set(ctx, key, val, ttl).Err() +} + +func (c *emailCache) DeleteVerificationCode(ctx context.Context, email string) error { + key := verifyCodeKey(email) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/email_cache_integration_test.go b/backend/internal/repository/email_cache_integration_test.go new file mode 100644 index 00000000..40ec677b --- /dev/null +++ b/backend/internal/repository/email_cache_integration_test.go @@ -0,0 +1,92 @@ +//go:build integration + +package repository + +import ( + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EmailCacheSuite struct { + IntegrationRedisSuite + cache service.EmailCache +} + +func (s *EmailCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewEmailCache(s.rdb) +} + +func (s *EmailCacheSuite) TestGetVerificationCode_Missing() { + _, err := s.cache.GetVerificationCode(s.ctx, "nonexistent@example.com") + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil for missing verification code") +} + +func (s *EmailCacheSuite) TestSetAndGetVerificationCode() { + email := "a@example.com" + emailTTL := 2 * time.Minute + data := &service.VerificationCodeData{Code: "123456", Attempts: 1, CreatedAt: time.Now()} + + require.NoError(s.T(), s.cache.SetVerificationCode(s.ctx, email, data, emailTTL), "SetVerificationCode") + + got, err := s.cache.GetVerificationCode(s.ctx, email) + require.NoError(s.T(), err, "GetVerificationCode") + require.Equal(s.T(), "123456", got.Code) + require.Equal(s.T(), 1, got.Attempts) +} + +func (s *EmailCacheSuite) TestVerificationCode_TTL() { + email := "ttl@example.com" + emailTTL := 2 * time.Minute + data := &service.VerificationCodeData{Code: "654321", Attempts: 0, CreatedAt: time.Now()} + + require.NoError(s.T(), s.cache.SetVerificationCode(s.ctx, email, data, emailTTL), "SetVerificationCode") + + emailKey := verifyCodeKeyPrefix + email + ttl, err := s.rdb.TTL(s.ctx, emailKey).Result() + require.NoError(s.T(), err, "TTL emailKey") + s.AssertTTLWithin(ttl, 1*time.Second, emailTTL) +} + +func (s *EmailCacheSuite) TestDeleteVerificationCode() { + email := "delete@example.com" + data := &service.VerificationCodeData{Code: "999999", Attempts: 0, CreatedAt: time.Now()} + + require.NoError(s.T(), s.cache.SetVerificationCode(s.ctx, email, data, 2*time.Minute), "SetVerificationCode") + + // Verify it exists + _, err := s.cache.GetVerificationCode(s.ctx, email) + require.NoError(s.T(), err, "GetVerificationCode before delete") + + // Delete + require.NoError(s.T(), s.cache.DeleteVerificationCode(s.ctx, email), "DeleteVerificationCode") + + // Verify it's gone + _, err = s.cache.GetVerificationCode(s.ctx, email) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil after delete") +} + +func (s *EmailCacheSuite) TestDeleteVerificationCode_NonExistent() { + // Deleting a non-existent key should not error + require.NoError(s.T(), s.cache.DeleteVerificationCode(s.ctx, "nonexistent@example.com"), "DeleteVerificationCode non-existent") +} + +func (s *EmailCacheSuite) TestGetVerificationCode_JSONCorruption() { + emailKey := verifyCodeKeyPrefix + "corrupted@example.com" + + require.NoError(s.T(), s.rdb.Set(s.ctx, emailKey, "not-json", 1*time.Minute).Err(), "Set invalid JSON") + + _, err := s.cache.GetVerificationCode(s.ctx, "corrupted@example.com") + require.Error(s.T(), err, "expected error for corrupted JSON") + require.False(s.T(), errors.Is(err, redis.Nil), "expected decoding error, not redis.Nil") +} + +func TestEmailCacheSuite(t *testing.T) { + suite.Run(t, new(EmailCacheSuite)) +} diff --git a/backend/internal/repository/email_cache_test.go b/backend/internal/repository/email_cache_test.go new file mode 100644 index 00000000..1c498938 --- /dev/null +++ b/backend/internal/repository/email_cache_test.go @@ -0,0 +1,45 @@ +//go:build unit + +package repository + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVerifyCodeKey(t *testing.T) { + tests := []struct { + name string + email string + expected string + }{ + { + name: "normal_email", + email: "user@example.com", + expected: "verify_code:user@example.com", + }, + { + name: "empty_email", + email: "", + expected: "verify_code:", + }, + { + name: "email_with_plus", + email: "user+tag@example.com", + expected: "verify_code:user+tag@example.com", + }, + { + name: "email_with_special_chars", + email: "user.name+tag@sub.domain.com", + expected: "verify_code:user.name+tag@sub.domain.com", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := verifyCodeKey(tc.email) + require.Equal(t, tc.expected, got) + }) + } +} diff --git a/backend/internal/repository/ent.go b/backend/internal/repository/ent.go new file mode 100644 index 00000000..8005f114 --- /dev/null +++ b/backend/internal/repository/ent.go @@ -0,0 +1,69 @@ +// Package repository 提供应用程序的基础设施层组件。 +// 包括数据库连接初始化、ORM 客户端管理、Redis 连接、数据库迁移等核心功能。 +package repository + +import ( + "context" + "database/sql" + "time" + + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/migrations" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "github.com/lib/pq" // PostgreSQL 驱动,通过副作用导入注册驱动 +) + +// InitEnt 初始化 Ent ORM 客户端并返回客户端实例和底层的 *sql.DB。 +// +// 该函数执行以下操作: +// 1. 初始化全局时区设置,确保时间处理一致性 +// 2. 建立 PostgreSQL 数据库连接 +// 3. 自动执行数据库迁移,确保 schema 与代码同步 +// 4. 创建并返回 Ent 客户端实例 +// +// 重要提示:调用者必须负责关闭返回的 ent.Client(关闭时会自动关闭底层的 driver/db)。 +// +// 参数: +// - cfg: 应用程序配置,包含数据库连接信息和时区设置 +// +// 返回: +// - *ent.Client: Ent ORM 客户端,用于执行数据库操作 +// - *sql.DB: 底层的 SQL 数据库连接,可用于直接执行原生 SQL +// - error: 初始化过程中的错误 +func InitEnt(cfg *config.Config) (*ent.Client, *sql.DB, error) { + // 优先初始化时区设置,确保所有时间操作使用统一的时区。 + // 这对于跨时区部署和日志时间戳的一致性至关重要。 + if err := timezone.Init(cfg.Timezone); err != nil { + return nil, nil, err + } + + // 构建包含时区信息的数据库连接字符串 (DSN)。 + // 时区信息会传递给 PostgreSQL,确保数据库层面的时间处理正确。 + dsn := cfg.Database.DSNWithTimezone(cfg.Timezone) + + // 使用 Ent 的 SQL 驱动打开 PostgreSQL 连接。 + // dialect.Postgres 指定使用 PostgreSQL 方言进行 SQL 生成。 + drv, err := entsql.Open(dialect.Postgres, dsn) + if err != nil { + return nil, nil, err + } + applyDBPoolSettings(drv.DB(), cfg) + + // 确保数据库 schema 已准备就绪。 + // SQL 迁移文件是 schema 的权威来源(source of truth)。 + // 这种方式比 Ent 的自动迁移更可控,支持复杂的迁移场景。 + migrationCtx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + if err := applyMigrationsFS(migrationCtx, drv.DB(), migrations.FS); err != nil { + _ = drv.Close() // 迁移失败时关闭驱动,避免资源泄露 + return nil, nil, err + } + + // 创建 Ent 客户端,绑定到已配置的数据库驱动。 + client := ent.NewClient(ent.Driver(drv)) + return client, drv.DB(), nil +} diff --git a/backend/internal/repository/error_translate.go b/backend/internal/repository/error_translate.go new file mode 100644 index 00000000..b8065ffe --- /dev/null +++ b/backend/internal/repository/error_translate.go @@ -0,0 +1,97 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "strings" + + dbent "github.com/Wei-Shaw/sub2api/ent" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/lib/pq" +) + +// clientFromContext 从 context 中获取事务 client,如果不存在则返回默认 client。 +// +// 这个辅助函数支持 repository 方法在事务上下文中工作: +// - 如果 context 中存在事务(通过 ent.NewTxContext 设置),返回事务的 client +// - 否则返回传入的默认 client +// +// 使用示例: +// +// func (r *someRepo) SomeMethod(ctx context.Context) error { +// client := clientFromContext(ctx, r.client) +// return client.SomeEntity.Create().Save(ctx) +// } +func clientFromContext(ctx context.Context, defaultClient *dbent.Client) *dbent.Client { + if tx := dbent.TxFromContext(ctx); tx != nil { + return tx.Client() + } + return defaultClient +} + +// translatePersistenceError 将数据库层错误翻译为业务层错误。 +// +// 这是 Repository 层的核心错误处理函数,确保数据库细节不会泄露到业务层。 +// 通过统一的错误翻译,业务层可以使用语义明确的错误类型(如 ErrUserNotFound) +// 而不是依赖于特定数据库的错误(如 sql.ErrNoRows)。 +// +// 参数: +// - err: 原始数据库错误 +// - notFound: 当记录不存在时返回的业务错误(可为 nil 表示不处理) +// - conflict: 当违反唯一约束时返回的业务错误(可为 nil 表示不处理) +// +// 返回: +// - 翻译后的业务错误,或原始错误(如果不匹配任何规则) +// +// 示例: +// +// err := translatePersistenceError(dbErr, service.ErrUserNotFound, service.ErrEmailExists) +func translatePersistenceError(err error, notFound, conflict *infraerrors.ApplicationError) error { + if err == nil { + return nil + } + + // 兼容 Ent ORM 和标准 database/sql 的 NotFound 行为。 + // Ent 使用自定义的 NotFoundError,而标准库使用 sql.ErrNoRows。 + // 这里同时处理两种情况,保持业务错误映射一致。 + if notFound != nil && (errors.Is(err, sql.ErrNoRows) || dbent.IsNotFound(err)) { + return notFound.WithCause(err) + } + + // 处理唯一约束冲突(如邮箱已存在、名称重复等) + if conflict != nil && isUniqueConstraintViolation(err) { + return conflict.WithCause(err) + } + + // 未匹配任何规则,返回原始错误 + return err +} + +// isUniqueConstraintViolation 判断错误是否为唯一约束冲突。 +// +// 支持多种检测方式: +// 1. PostgreSQL 特定错误码 23505(唯一约束冲突) +// 2. 错误消息中包含的通用关键词 +// +// 这种多层次的检测确保了对不同数据库驱动和 ORM 的兼容性。 +func isUniqueConstraintViolation(err error) bool { + if err == nil { + return false + } + + // 优先检测 PostgreSQL 特定错误码(最精确)。 + // 错误码 23505 对应 unique_violation。 + // 参考:https://www.postgresql.org/docs/current/errcodes-appendix.html + var pgErr *pq.Error + if errors.As(err, &pgErr) { + return pgErr.Code == "23505" + } + + // 回退到错误消息检测(兼容其他场景)。 + // 这些关键词覆盖了 PostgreSQL、MySQL 等主流数据库的错误消息。 + msg := strings.ToLower(err.Error()) + return strings.Contains(msg, "duplicate key") || + strings.Contains(msg, "unique constraint") || + strings.Contains(msg, "duplicate entry") +} diff --git a/backend/internal/repository/fixtures_integration_test.go b/backend/internal/repository/fixtures_integration_test.go new file mode 100644 index 00000000..23adb4e4 --- /dev/null +++ b/backend/internal/repository/fixtures_integration_test.go @@ -0,0 +1,391 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func mustCreateUser(t *testing.T, client *dbent.Client, u *service.User) *service.User { + t.Helper() + ctx := context.Background() + + if u.Email == "" { + u.Email = "user-" + time.Now().Format(time.RFC3339Nano) + "@example.com" + } + if u.PasswordHash == "" { + u.PasswordHash = "test-password-hash" + } + if u.Role == "" { + u.Role = service.RoleUser + } + if u.Status == "" { + u.Status = service.StatusActive + } + if u.Concurrency == 0 { + u.Concurrency = 5 + } + + create := client.User.Create(). + SetEmail(u.Email). + SetPasswordHash(u.PasswordHash). + SetRole(u.Role). + SetStatus(u.Status). + SetBalance(u.Balance). + SetConcurrency(u.Concurrency). + SetUsername(u.Username). + SetNotes(u.Notes) + if !u.CreatedAt.IsZero() { + create.SetCreatedAt(u.CreatedAt) + } + if !u.UpdatedAt.IsZero() { + create.SetUpdatedAt(u.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create user") + + u.ID = created.ID + u.CreatedAt = created.CreatedAt + u.UpdatedAt = created.UpdatedAt + + if len(u.AllowedGroups) > 0 { + for _, groupID := range u.AllowedGroups { + _, err := client.UserAllowedGroup.Create(). + SetUserID(u.ID). + SetGroupID(groupID). + Save(ctx) + require.NoError(t, err, "create user_allowed_groups row") + } + } + + return u +} + +func mustCreateGroup(t *testing.T, client *dbent.Client, g *service.Group) *service.Group { + t.Helper() + ctx := context.Background() + + if g.Platform == "" { + g.Platform = service.PlatformAnthropic + } + if g.Status == "" { + g.Status = service.StatusActive + } + if g.SubscriptionType == "" { + g.SubscriptionType = service.SubscriptionTypeStandard + } + + create := client.Group.Create(). + SetName(g.Name). + SetPlatform(g.Platform). + SetStatus(g.Status). + SetSubscriptionType(g.SubscriptionType). + SetRateMultiplier(g.RateMultiplier). + SetIsExclusive(g.IsExclusive) + if g.Description != "" { + create.SetDescription(g.Description) + } + if g.DailyLimitUSD != nil { + create.SetDailyLimitUsd(*g.DailyLimitUSD) + } + if g.WeeklyLimitUSD != nil { + create.SetWeeklyLimitUsd(*g.WeeklyLimitUSD) + } + if g.MonthlyLimitUSD != nil { + create.SetMonthlyLimitUsd(*g.MonthlyLimitUSD) + } + if !g.CreatedAt.IsZero() { + create.SetCreatedAt(g.CreatedAt) + } + if !g.UpdatedAt.IsZero() { + create.SetUpdatedAt(g.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create group") + + g.ID = created.ID + g.CreatedAt = created.CreatedAt + g.UpdatedAt = created.UpdatedAt + return g +} + +func mustCreateProxy(t *testing.T, client *dbent.Client, p *service.Proxy) *service.Proxy { + t.Helper() + ctx := context.Background() + + if p.Protocol == "" { + p.Protocol = "http" + } + if p.Host == "" { + p.Host = "127.0.0.1" + } + if p.Port == 0 { + p.Port = 8080 + } + if p.Status == "" { + p.Status = service.StatusActive + } + + create := client.Proxy.Create(). + SetName(p.Name). + SetProtocol(p.Protocol). + SetHost(p.Host). + SetPort(p.Port). + SetStatus(p.Status) + if p.Username != "" { + create.SetUsername(p.Username) + } + if p.Password != "" { + create.SetPassword(p.Password) + } + if !p.CreatedAt.IsZero() { + create.SetCreatedAt(p.CreatedAt) + } + if !p.UpdatedAt.IsZero() { + create.SetUpdatedAt(p.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create proxy") + + p.ID = created.ID + p.CreatedAt = created.CreatedAt + p.UpdatedAt = created.UpdatedAt + return p +} + +func mustCreateAccount(t *testing.T, client *dbent.Client, a *service.Account) *service.Account { + t.Helper() + ctx := context.Background() + + if a.Platform == "" { + a.Platform = service.PlatformAnthropic + } + if a.Type == "" { + a.Type = service.AccountTypeOAuth + } + if a.Status == "" { + a.Status = service.StatusActive + } + if a.Concurrency == 0 { + a.Concurrency = 3 + } + if a.Priority == 0 { + a.Priority = 50 + } + if !a.Schedulable { + a.Schedulable = true + } + if a.Credentials == nil { + a.Credentials = map[string]any{} + } + if a.Extra == nil { + a.Extra = map[string]any{} + } + + create := client.Account.Create(). + SetName(a.Name). + SetPlatform(a.Platform). + SetType(a.Type). + SetCredentials(a.Credentials). + SetExtra(a.Extra). + SetConcurrency(a.Concurrency). + SetPriority(a.Priority). + SetStatus(a.Status). + SetSchedulable(a.Schedulable). + SetErrorMessage(a.ErrorMessage) + + if a.ProxyID != nil { + create.SetProxyID(*a.ProxyID) + } + if a.LastUsedAt != nil { + create.SetLastUsedAt(*a.LastUsedAt) + } + if a.RateLimitedAt != nil { + create.SetRateLimitedAt(*a.RateLimitedAt) + } + if a.RateLimitResetAt != nil { + create.SetRateLimitResetAt(*a.RateLimitResetAt) + } + if a.OverloadUntil != nil { + create.SetOverloadUntil(*a.OverloadUntil) + } + if a.SessionWindowStart != nil { + create.SetSessionWindowStart(*a.SessionWindowStart) + } + if a.SessionWindowEnd != nil { + create.SetSessionWindowEnd(*a.SessionWindowEnd) + } + if a.SessionWindowStatus != "" { + create.SetSessionWindowStatus(a.SessionWindowStatus) + } + if !a.CreatedAt.IsZero() { + create.SetCreatedAt(a.CreatedAt) + } + if !a.UpdatedAt.IsZero() { + create.SetUpdatedAt(a.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create account") + + a.ID = created.ID + a.CreatedAt = created.CreatedAt + a.UpdatedAt = created.UpdatedAt + return a +} + +func mustCreateApiKey(t *testing.T, client *dbent.Client, k *service.APIKey) *service.APIKey { + t.Helper() + ctx := context.Background() + + if k.Status == "" { + k.Status = service.StatusActive + } + if k.Key == "" { + k.Key = "sk-" + time.Now().Format("150405.000000") + } + if k.Name == "" { + k.Name = "default" + } + + create := client.APIKey.Create(). + SetUserID(k.UserID). + SetKey(k.Key). + SetName(k.Name). + SetStatus(k.Status) + if k.GroupID != nil { + create.SetGroupID(*k.GroupID) + } + if !k.CreatedAt.IsZero() { + create.SetCreatedAt(k.CreatedAt) + } + if !k.UpdatedAt.IsZero() { + create.SetUpdatedAt(k.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create api key") + + k.ID = created.ID + k.CreatedAt = created.CreatedAt + k.UpdatedAt = created.UpdatedAt + return k +} + +func mustCreateRedeemCode(t *testing.T, client *dbent.Client, c *service.RedeemCode) *service.RedeemCode { + t.Helper() + ctx := context.Background() + + if c.Status == "" { + c.Status = service.StatusUnused + } + if c.Type == "" { + c.Type = service.RedeemTypeBalance + } + if c.Code == "" { + c.Code = "rc-" + time.Now().Format("150405.000000") + } + + create := client.RedeemCode.Create(). + SetCode(c.Code). + SetType(c.Type). + SetValue(c.Value). + SetStatus(c.Status). + SetNotes(c.Notes). + SetValidityDays(c.ValidityDays) + if c.UsedBy != nil { + create.SetUsedBy(*c.UsedBy) + } + if c.UsedAt != nil { + create.SetUsedAt(*c.UsedAt) + } + if c.GroupID != nil { + create.SetGroupID(*c.GroupID) + } + if !c.CreatedAt.IsZero() { + create.SetCreatedAt(c.CreatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create redeem code") + + c.ID = created.ID + c.CreatedAt = created.CreatedAt + return c +} + +func mustCreateSubscription(t *testing.T, client *dbent.Client, s *service.UserSubscription) *service.UserSubscription { + t.Helper() + ctx := context.Background() + + if s.Status == "" { + s.Status = service.SubscriptionStatusActive + } + now := time.Now() + if s.StartsAt.IsZero() { + s.StartsAt = now.Add(-1 * time.Hour) + } + if s.ExpiresAt.IsZero() { + s.ExpiresAt = now.Add(24 * time.Hour) + } + if s.AssignedAt.IsZero() { + s.AssignedAt = now + } + if s.CreatedAt.IsZero() { + s.CreatedAt = now + } + if s.UpdatedAt.IsZero() { + s.UpdatedAt = now + } + + create := client.UserSubscription.Create(). + SetUserID(s.UserID). + SetGroupID(s.GroupID). + SetStartsAt(s.StartsAt). + SetExpiresAt(s.ExpiresAt). + SetStatus(s.Status). + SetAssignedAt(s.AssignedAt). + SetNotes(s.Notes). + SetDailyUsageUsd(s.DailyUsageUSD). + SetWeeklyUsageUsd(s.WeeklyUsageUSD). + SetMonthlyUsageUsd(s.MonthlyUsageUSD) + + if s.AssignedBy != nil { + create.SetAssignedBy(*s.AssignedBy) + } + if !s.CreatedAt.IsZero() { + create.SetCreatedAt(s.CreatedAt) + } + if !s.UpdatedAt.IsZero() { + create.SetUpdatedAt(s.UpdatedAt) + } + + created, err := create.Save(ctx) + require.NoError(t, err, "create user subscription") + + s.ID = created.ID + s.CreatedAt = created.CreatedAt + s.UpdatedAt = created.UpdatedAt + return s +} + +func mustBindAccountToGroup(t *testing.T, client *dbent.Client, accountID, groupID int64, priority int) { + t.Helper() + ctx := context.Background() + + _, err := client.AccountGroup.Create(). + SetAccountID(accountID). + SetGroupID(groupID). + SetPriority(priority). + Save(ctx) + require.NoError(t, err, "create account_group") +} diff --git a/backend/internal/repository/gateway_cache.go b/backend/internal/repository/gateway_cache.go new file mode 100644 index 00000000..40a9ad05 --- /dev/null +++ b/backend/internal/repository/gateway_cache.go @@ -0,0 +1,41 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const stickySessionPrefix = "sticky_session:" + +type gatewayCache struct { + rdb *redis.Client +} + +func NewGatewayCache(rdb *redis.Client) service.GatewayCache { + return &gatewayCache{rdb: rdb} +} + +// buildSessionKey 构建 session key,包含 groupID 实现分组隔离 +// 格式: sticky_session:{groupID}:{sessionHash} +func buildSessionKey(groupID int64, sessionHash string) string { + return fmt.Sprintf("%s%d:%s", stickySessionPrefix, groupID, sessionHash) +} + +func (c *gatewayCache) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { + key := buildSessionKey(groupID, sessionHash) + return c.rdb.Get(ctx, key).Int64() +} + +func (c *gatewayCache) SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error { + key := buildSessionKey(groupID, sessionHash) + return c.rdb.Set(ctx, key, accountID, ttl).Err() +} + +func (c *gatewayCache) RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error { + key := buildSessionKey(groupID, sessionHash) + return c.rdb.Expire(ctx, key, ttl).Err() +} diff --git a/backend/internal/repository/gateway_cache_integration_test.go b/backend/internal/repository/gateway_cache_integration_test.go new file mode 100644 index 00000000..d8885bca --- /dev/null +++ b/backend/internal/repository/gateway_cache_integration_test.go @@ -0,0 +1,96 @@ +//go:build integration + +package repository + +import ( + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type GatewayCacheSuite struct { + IntegrationRedisSuite + cache service.GatewayCache +} + +func (s *GatewayCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewGatewayCache(s.rdb) +} + +func (s *GatewayCacheSuite) TestGetSessionAccountID_Missing() { + _, err := s.cache.GetSessionAccountID(s.ctx, 1, "nonexistent") + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil for missing session") +} + +func (s *GatewayCacheSuite) TestSetAndGetSessionAccountID() { + sessionID := "s1" + accountID := int64(99) + groupID := int64(1) + sessionTTL := 1 * time.Minute + + require.NoError(s.T(), s.cache.SetSessionAccountID(s.ctx, groupID, sessionID, accountID, sessionTTL), "SetSessionAccountID") + + sid, err := s.cache.GetSessionAccountID(s.ctx, groupID, sessionID) + require.NoError(s.T(), err, "GetSessionAccountID") + require.Equal(s.T(), accountID, sid, "session id mismatch") +} + +func (s *GatewayCacheSuite) TestSessionAccountID_TTL() { + sessionID := "s2" + accountID := int64(100) + groupID := int64(1) + sessionTTL := 1 * time.Minute + + require.NoError(s.T(), s.cache.SetSessionAccountID(s.ctx, groupID, sessionID, accountID, sessionTTL), "SetSessionAccountID") + + sessionKey := buildSessionKey(groupID, sessionID) + ttl, err := s.rdb.TTL(s.ctx, sessionKey).Result() + require.NoError(s.T(), err, "TTL sessionKey after Set") + s.AssertTTLWithin(ttl, 1*time.Second, sessionTTL) +} + +func (s *GatewayCacheSuite) TestRefreshSessionTTL() { + sessionID := "s3" + accountID := int64(101) + groupID := int64(1) + initialTTL := 1 * time.Minute + refreshTTL := 3 * time.Minute + + require.NoError(s.T(), s.cache.SetSessionAccountID(s.ctx, groupID, sessionID, accountID, initialTTL), "SetSessionAccountID") + + require.NoError(s.T(), s.cache.RefreshSessionTTL(s.ctx, groupID, sessionID, refreshTTL), "RefreshSessionTTL") + + sessionKey := buildSessionKey(groupID, sessionID) + ttl, err := s.rdb.TTL(s.ctx, sessionKey).Result() + require.NoError(s.T(), err, "TTL after Refresh") + s.AssertTTLWithin(ttl, 1*time.Second, refreshTTL) +} + +func (s *GatewayCacheSuite) TestRefreshSessionTTL_MissingKey() { + // RefreshSessionTTL on a missing key should not error (no-op) + err := s.cache.RefreshSessionTTL(s.ctx, 1, "missing-session", 1*time.Minute) + require.NoError(s.T(), err, "RefreshSessionTTL on missing key should not error") +} + +func (s *GatewayCacheSuite) TestGetSessionAccountID_CorruptedValue() { + sessionID := "corrupted" + groupID := int64(1) + sessionKey := buildSessionKey(groupID, sessionID) + + // Set a non-integer value + require.NoError(s.T(), s.rdb.Set(s.ctx, sessionKey, "not-a-number", 1*time.Minute).Err(), "Set invalid value") + + _, err := s.cache.GetSessionAccountID(s.ctx, groupID, sessionID) + require.Error(s.T(), err, "expected error for corrupted value") + require.False(s.T(), errors.Is(err, redis.Nil), "expected parsing error, not redis.Nil") +} + +func TestGatewayCacheSuite(t *testing.T) { + suite.Run(t, new(GatewayCacheSuite)) +} diff --git a/backend/internal/repository/gateway_routing_integration_test.go b/backend/internal/repository/gateway_routing_integration_test.go new file mode 100644 index 00000000..5566d2e9 --- /dev/null +++ b/backend/internal/repository/gateway_routing_integration_test.go @@ -0,0 +1,250 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +// GatewayRoutingSuite 测试网关路由相关的数据库查询 +// 验证账户选择和分流逻辑在真实数据库环境下的行为 +type GatewayRoutingSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + accountRepo *accountRepository +} + +func (s *GatewayRoutingSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.client = tx.Client() + s.accountRepo = newAccountRepositoryWithSQL(s.client, tx) +} + +func TestGatewayRoutingSuite(t *testing.T) { + suite.Run(t, new(GatewayRoutingSuite)) +} + +// TestListSchedulableByPlatforms_GeminiAndAntigravity 验证多平台账户查询 +func (s *GatewayRoutingSuite) TestListSchedulableByPlatforms_GeminiAndAntigravity() { + // 创建各平台账户 + geminiAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "gemini-oauth", + Platform: service.PlatformGemini, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + Schedulable: true, + Priority: 1, + }) + + antigravityAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "antigravity-oauth", + Platform: service.PlatformAntigravity, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + Schedulable: true, + Priority: 2, + Credentials: map[string]any{ + "access_token": "test-token", + "refresh_token": "test-refresh", + "project_id": "test-project", + }, + }) + + // 创建不应被选中的 anthropic 账户 + mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "anthropic-oauth", + Platform: service.PlatformAnthropic, + Type: service.AccountTypeOAuth, + Status: service.StatusActive, + Schedulable: true, + Priority: 0, + }) + + // 查询 gemini + antigravity 平台 + accounts, err := s.accountRepo.ListSchedulableByPlatforms(s.ctx, []string{ + service.PlatformGemini, + service.PlatformAntigravity, + }) + + s.Require().NoError(err) + s.Require().Len(accounts, 2, "应返回 gemini 和 antigravity 两个账户") + + // 验证返回的账户平台 + platforms := make(map[string]bool) + for _, acc := range accounts { + platforms[acc.Platform] = true + } + s.Require().True(platforms[service.PlatformGemini], "应包含 gemini 账户") + s.Require().True(platforms[service.PlatformAntigravity], "应包含 antigravity 账户") + s.Require().False(platforms[service.PlatformAnthropic], "不应包含 anthropic 账户") + + // 验证账户 ID 匹配 + ids := make(map[int64]bool) + for _, acc := range accounts { + ids[acc.ID] = true + } + s.Require().True(ids[geminiAcc.ID]) + s.Require().True(ids[antigravityAcc.ID]) +} + +// TestListSchedulableByGroupIDAndPlatforms_WithGroupBinding 验证按分组过滤 +func (s *GatewayRoutingSuite) TestListSchedulableByGroupIDAndPlatforms_WithGroupBinding() { + // 创建 gemini 分组 + group := mustCreateGroup(s.T(), s.client, &service.Group{ + Name: "gemini-group", + Platform: service.PlatformGemini, + Status: service.StatusActive, + }) + + // 创建账户 + boundAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "bound-antigravity", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + Schedulable: true, + }) + unboundAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "unbound-antigravity", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + Schedulable: true, + }) + + // 只绑定一个账户到分组 + mustBindAccountToGroup(s.T(), s.client, boundAcc.ID, group.ID, 1) + + // 查询分组内的账户 + accounts, err := s.accountRepo.ListSchedulableByGroupIDAndPlatforms(s.ctx, group.ID, []string{ + service.PlatformGemini, + service.PlatformAntigravity, + }) + + s.Require().NoError(err) + s.Require().Len(accounts, 1, "应只返回绑定到分组的账户") + s.Require().Equal(boundAcc.ID, accounts[0].ID) + + // 确认未绑定的账户不在结果中 + for _, acc := range accounts { + s.Require().NotEqual(unboundAcc.ID, acc.ID, "不应包含未绑定的账户") + } +} + +// TestListSchedulableByPlatform_Antigravity 验证单平台查询 +func (s *GatewayRoutingSuite) TestListSchedulableByPlatform_Antigravity() { + // 创建多种平台账户 + mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "gemini-1", + Platform: service.PlatformGemini, + Status: service.StatusActive, + Schedulable: true, + }) + + antigravity := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "antigravity-1", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + Schedulable: true, + }) + + // 只查询 antigravity 平台 + accounts, err := s.accountRepo.ListSchedulableByPlatform(s.ctx, service.PlatformAntigravity) + + s.Require().NoError(err) + s.Require().Len(accounts, 1) + s.Require().Equal(antigravity.ID, accounts[0].ID) + s.Require().Equal(service.PlatformAntigravity, accounts[0].Platform) +} + +// TestSchedulableFilter_ExcludesInactive 验证不可调度账户被过滤 +func (s *GatewayRoutingSuite) TestSchedulableFilter_ExcludesInactive() { + // 创建可调度账户 + activeAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "active-antigravity", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + Schedulable: true, + }) + + // 创建不可调度账户(需要先创建再更新,因为 fixture 默认设置 Schedulable=true) + inactiveAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "inactive-antigravity", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + }) + s.Require().NoError(s.client.Account.UpdateOneID(inactiveAcc.ID).SetSchedulable(false).Exec(s.ctx)) + + // 创建错误状态账户 + mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "error-antigravity", + Platform: service.PlatformAntigravity, + Status: service.StatusError, + Schedulable: true, + }) + + accounts, err := s.accountRepo.ListSchedulableByPlatform(s.ctx, service.PlatformAntigravity) + + s.Require().NoError(err) + s.Require().Len(accounts, 1, "应只返回可调度的 active 账户") + s.Require().Equal(activeAcc.ID, accounts[0].ID) +} + +// TestPlatformRoutingDecision 验证平台路由决策 +// 这个测试模拟 Handler 层在选择账户后的路由决策逻辑 +func (s *GatewayRoutingSuite) TestPlatformRoutingDecision() { + // 创建两种平台的账户 + geminiAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "gemini-route-test", + Platform: service.PlatformGemini, + Status: service.StatusActive, + Schedulable: true, + }) + + antigravityAcc := mustCreateAccount(s.T(), s.client, &service.Account{ + Name: "antigravity-route-test", + Platform: service.PlatformAntigravity, + Status: service.StatusActive, + Schedulable: true, + }) + + tests := []struct { + name string + accountID int64 + expectedService string + }{ + { + name: "Gemini账户路由到ForwardNative", + accountID: geminiAcc.ID, + expectedService: "GeminiMessagesCompatService.ForwardNative", + }, + { + name: "Antigravity账户路由到ForwardGemini", + accountID: antigravityAcc.ID, + expectedService: "AntigravityGatewayService.ForwardGemini", + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + // 从数据库获取账户 + account, err := s.accountRepo.GetByID(s.ctx, tt.accountID) + s.Require().NoError(err) + + // 模拟 Handler 层的路由决策 + var routedService string + if account.Platform == service.PlatformAntigravity { + routedService = "AntigravityGatewayService.ForwardGemini" + } else { + routedService = "GeminiMessagesCompatService.ForwardNative" + } + + s.Require().Equal(tt.expectedService, routedService) + }) + } +} diff --git a/backend/internal/repository/gemini_oauth_client.go b/backend/internal/repository/gemini_oauth_client.go new file mode 100644 index 00000000..8b7fe625 --- /dev/null +++ b/backend/internal/repository/gemini_oauth_client.go @@ -0,0 +1,119 @@ +package repository + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/imroc/req/v3" +) + +type geminiOAuthClient struct { + tokenURL string + cfg *config.Config +} + +func NewGeminiOAuthClient(cfg *config.Config) service.GeminiOAuthClient { + return &geminiOAuthClient{ + tokenURL: geminicli.TokenURL, + cfg: cfg, + } +} + +func (c *geminiOAuthClient) ExchangeCode(ctx context.Context, oauthType, code, codeVerifier, redirectURI, proxyURL string) (*geminicli.TokenResponse, error) { + client := createGeminiReqClient(proxyURL) + + // Use different OAuth clients based on oauthType: + // - code_assist: always use built-in Gemini CLI OAuth client (public) + // - google_one: always use built-in Gemini CLI OAuth client (public) + // - ai_studio: requires a user-provided OAuth client + oauthCfgInput := geminicli.OAuthConfig{ + ClientID: c.cfg.Gemini.OAuth.ClientID, + ClientSecret: c.cfg.Gemini.OAuth.ClientSecret, + Scopes: c.cfg.Gemini.OAuth.Scopes, + } + if oauthType == "code_assist" || oauthType == "google_one" { + // Force use of built-in Gemini CLI OAuth client + oauthCfgInput.ClientID = "" + oauthCfgInput.ClientSecret = "" + } + + oauthCfg, err := geminicli.EffectiveOAuthConfig(oauthCfgInput, oauthType) + if err != nil { + return nil, err + } + + formData := url.Values{} + formData.Set("grant_type", "authorization_code") + formData.Set("client_id", oauthCfg.ClientID) + formData.Set("client_secret", oauthCfg.ClientSecret) + formData.Set("code", code) + formData.Set("code_verifier", codeVerifier) + formData.Set("redirect_uri", redirectURI) + + var tokenResp geminicli.TokenResponse + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(c.tokenURL) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token exchange failed: status %d, body: %s", resp.StatusCode, geminicli.SanitizeBodyForLogs(resp.String())) + } + return &tokenResp, nil +} + +func (c *geminiOAuthClient) RefreshToken(ctx context.Context, oauthType, refreshToken, proxyURL string) (*geminicli.TokenResponse, error) { + client := createGeminiReqClient(proxyURL) + + oauthCfgInput := geminicli.OAuthConfig{ + ClientID: c.cfg.Gemini.OAuth.ClientID, + ClientSecret: c.cfg.Gemini.OAuth.ClientSecret, + Scopes: c.cfg.Gemini.OAuth.Scopes, + } + if oauthType == "code_assist" || oauthType == "google_one" { + // Force use of built-in Gemini CLI OAuth client + oauthCfgInput.ClientID = "" + oauthCfgInput.ClientSecret = "" + } + + oauthCfg, err := geminicli.EffectiveOAuthConfig(oauthCfgInput, oauthType) + if err != nil { + return nil, err + } + + formData := url.Values{} + formData.Set("grant_type", "refresh_token") + formData.Set("refresh_token", refreshToken) + formData.Set("client_id", oauthCfg.ClientID) + formData.Set("client_secret", oauthCfg.ClientSecret) + + var tokenResp geminicli.TokenResponse + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(c.tokenURL) + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token refresh failed: status %d, body: %s", resp.StatusCode, geminicli.SanitizeBodyForLogs(resp.String())) + } + return &tokenResp, nil +} + +func createGeminiReqClient(proxyURL string) *req.Client { + return getSharedReqClient(reqClientOptions{ + ProxyURL: proxyURL, + Timeout: 60 * time.Second, + }) +} diff --git a/backend/internal/repository/gemini_token_cache.go b/backend/internal/repository/gemini_token_cache.go new file mode 100644 index 00000000..82c14def --- /dev/null +++ b/backend/internal/repository/gemini_token_cache.go @@ -0,0 +1,49 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/redis/go-redis/v9" +) + +const ( + geminiTokenKeyPrefix = "gemini:token:" + geminiRefreshLockKeyPrefix = "gemini:refresh_lock:" +) + +type geminiTokenCache struct { + rdb *redis.Client +} + +func NewGeminiTokenCache(rdb *redis.Client) service.GeminiTokenCache { + return &geminiTokenCache{rdb: rdb} +} + +func (c *geminiTokenCache) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { + key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey) + return c.rdb.Get(ctx, key).Result() +} + +func (c *geminiTokenCache) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { + key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey) + return c.rdb.Set(ctx, key, token, ttl).Err() +} + +func (c *geminiTokenCache) DeleteAccessToken(ctx context.Context, cacheKey string) error { + key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey) + return c.rdb.Del(ctx, key).Err() +} + +func (c *geminiTokenCache) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { + key := fmt.Sprintf("%s%s", geminiRefreshLockKeyPrefix, cacheKey) + return c.rdb.SetNX(ctx, key, 1, ttl).Result() +} + +func (c *geminiTokenCache) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { + key := fmt.Sprintf("%s%s", geminiRefreshLockKeyPrefix, cacheKey) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/gemini_token_cache_integration_test.go b/backend/internal/repository/gemini_token_cache_integration_test.go new file mode 100644 index 00000000..4fe89865 --- /dev/null +++ b/backend/internal/repository/gemini_token_cache_integration_test.go @@ -0,0 +1,47 @@ +//go:build integration + +package repository + +import ( + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type GeminiTokenCacheSuite struct { + IntegrationRedisSuite + cache service.GeminiTokenCache +} + +func (s *GeminiTokenCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewGeminiTokenCache(s.rdb) +} + +func (s *GeminiTokenCacheSuite) TestDeleteAccessToken() { + cacheKey := "project-123" + token := "token-value" + require.NoError(s.T(), s.cache.SetAccessToken(s.ctx, cacheKey, token, time.Minute)) + + got, err := s.cache.GetAccessToken(s.ctx, cacheKey) + require.NoError(s.T(), err) + require.Equal(s.T(), token, got) + + require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, cacheKey)) + + _, err = s.cache.GetAccessToken(s.ctx, cacheKey) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil after delete") +} + +func (s *GeminiTokenCacheSuite) TestDeleteAccessToken_MissingKey() { + require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, "missing-key")) +} + +func TestGeminiTokenCacheSuite(t *testing.T) { + suite.Run(t, new(GeminiTokenCacheSuite)) +} diff --git a/backend/internal/repository/gemini_token_cache_test.go b/backend/internal/repository/gemini_token_cache_test.go new file mode 100644 index 00000000..4fcebfdd --- /dev/null +++ b/backend/internal/repository/gemini_token_cache_test.go @@ -0,0 +1,28 @@ +//go:build unit + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +func TestGeminiTokenCache_DeleteAccessToken_RedisError(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Addr: "127.0.0.1:1", + DialTimeout: 50 * time.Millisecond, + ReadTimeout: 50 * time.Millisecond, + WriteTimeout: 50 * time.Millisecond, + }) + t.Cleanup(func() { + _ = rdb.Close() + }) + + cache := NewGeminiTokenCache(rdb) + err := cache.DeleteAccessToken(context.Background(), "broken") + require.Error(t, err) +} diff --git a/backend/internal/repository/geminicli_codeassist_client.go b/backend/internal/repository/geminicli_codeassist_client.go new file mode 100644 index 00000000..d7f54e85 --- /dev/null +++ b/backend/internal/repository/geminicli_codeassist_client.go @@ -0,0 +1,104 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/imroc/req/v3" +) + +type geminiCliCodeAssistClient struct { + baseURL string +} + +func NewGeminiCliCodeAssistClient() service.GeminiCliCodeAssistClient { + return &geminiCliCodeAssistClient{baseURL: geminicli.GeminiCliBaseURL} +} + +func (c *geminiCliCodeAssistClient) LoadCodeAssist(ctx context.Context, accessToken, proxyURL string, reqBody *geminicli.LoadCodeAssistRequest) (*geminicli.LoadCodeAssistResponse, error) { + if reqBody == nil { + reqBody = defaultLoadCodeAssistRequest() + } + + var out geminicli.LoadCodeAssistResponse + resp, err := createGeminiCliReqClient(proxyURL).R(). + SetContext(ctx). + SetHeader("Authorization", "Bearer "+accessToken). + SetHeader("Content-Type", "application/json"). + SetHeader("User-Agent", geminicli.GeminiCLIUserAgent). + SetBody(reqBody). + SetSuccessResult(&out). + Post(c.baseURL + "/v1internal:loadCodeAssist") + if err != nil { + fmt.Printf("[CodeAssist] LoadCodeAssist request error: %v\n", err) + return nil, fmt.Errorf("request failed: %w", err) + } + if !resp.IsSuccessState() { + body := geminicli.SanitizeBodyForLogs(resp.String()) + fmt.Printf("[CodeAssist] LoadCodeAssist failed: status %d, body: %s\n", resp.StatusCode, body) + return nil, fmt.Errorf("loadCodeAssist failed: status %d, body: %s", resp.StatusCode, body) + } + fmt.Printf("[CodeAssist] LoadCodeAssist success: status %d, response: %+v\n", resp.StatusCode, out) + return &out, nil +} + +func (c *geminiCliCodeAssistClient) OnboardUser(ctx context.Context, accessToken, proxyURL string, reqBody *geminicli.OnboardUserRequest) (*geminicli.OnboardUserResponse, error) { + if reqBody == nil { + reqBody = defaultOnboardUserRequest() + } + + fmt.Printf("[CodeAssist] OnboardUser request body: %+v\n", reqBody) + + var out geminicli.OnboardUserResponse + resp, err := createGeminiCliReqClient(proxyURL).R(). + SetContext(ctx). + SetHeader("Authorization", "Bearer "+accessToken). + SetHeader("Content-Type", "application/json"). + SetHeader("User-Agent", geminicli.GeminiCLIUserAgent). + SetBody(reqBody). + SetSuccessResult(&out). + Post(c.baseURL + "/v1internal:onboardUser") + if err != nil { + fmt.Printf("[CodeAssist] OnboardUser request error: %v\n", err) + return nil, fmt.Errorf("request failed: %w", err) + } + if !resp.IsSuccessState() { + body := geminicli.SanitizeBodyForLogs(resp.String()) + fmt.Printf("[CodeAssist] OnboardUser failed: status %d, body: %s\n", resp.StatusCode, body) + return nil, fmt.Errorf("onboardUser failed: status %d, body: %s", resp.StatusCode, body) + } + fmt.Printf("[CodeAssist] OnboardUser success: status %d, response: %+v\n", resp.StatusCode, out) + return &out, nil +} + +func createGeminiCliReqClient(proxyURL string) *req.Client { + return getSharedReqClient(reqClientOptions{ + ProxyURL: proxyURL, + Timeout: 30 * time.Second, + }) +} + +func defaultLoadCodeAssistRequest() *geminicli.LoadCodeAssistRequest { + return &geminicli.LoadCodeAssistRequest{ + Metadata: geminicli.LoadCodeAssistMetadata{ + IDEType: "ANTIGRAVITY", + Platform: "PLATFORM_UNSPECIFIED", + PluginType: "GEMINI", + }, + } +} + +func defaultOnboardUserRequest() *geminicli.OnboardUserRequest { + return &geminicli.OnboardUserRequest{ + TierID: "LEGACY", + Metadata: geminicli.LoadCodeAssistMetadata{ + IDEType: "ANTIGRAVITY", + Platform: "PLATFORM_UNSPECIFIED", + PluginType: "GEMINI", + }, + } +} diff --git a/backend/internal/repository/github_release_service.go b/backend/internal/repository/github_release_service.go new file mode 100644 index 00000000..77839626 --- /dev/null +++ b/backend/internal/repository/github_release_service.go @@ -0,0 +1,136 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type githubReleaseClient struct { + httpClient *http.Client + downloadHTTPClient *http.Client +} + +// NewGitHubReleaseClient 创建 GitHub Release 客户端 +// proxyURL 为空时直连 GitHub,支持 http/https/socks5/socks5h 协议 +func NewGitHubReleaseClient(proxyURL string) service.GitHubReleaseClient { + sharedClient, err := httpclient.GetClient(httpclient.Options{ + Timeout: 30 * time.Second, + ProxyURL: proxyURL, + }) + if err != nil { + sharedClient = &http.Client{Timeout: 30 * time.Second} + } + + // 下载客户端需要更长的超时时间 + downloadClient, err := httpclient.GetClient(httpclient.Options{ + Timeout: 10 * time.Minute, + ProxyURL: proxyURL, + }) + if err != nil { + downloadClient = &http.Client{Timeout: 10 * time.Minute} + } + + return &githubReleaseClient{ + httpClient: sharedClient, + downloadHTTPClient: downloadClient, + } +} + +func (c *githubReleaseClient) FetchLatestRelease(ctx context.Context, repo string) (*service.GitHubRelease, error) { + url := fmt.Sprintf("https://api.github.com/repos/%s/releases/latest", repo) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/vnd.github.v3+json") + req.Header.Set("User-Agent", "Sub2API-Updater") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GitHub API returned %d", resp.StatusCode) + } + + var release service.GitHubRelease + if err := json.NewDecoder(resp.Body).Decode(&release); err != nil { + return nil, err + } + + return &release, nil +} + +func (c *githubReleaseClient) DownloadFile(ctx context.Context, url, dest string, maxSize int64) error { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return err + } + + // 使用预配置的下载客户端(已包含代理配置) + resp, err := c.downloadHTTPClient.Do(req) + if err != nil { + return err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("download returned %d", resp.StatusCode) + } + + // SECURITY: Check Content-Length if available + if resp.ContentLength > maxSize { + return fmt.Errorf("file too large: %d bytes (max %d)", resp.ContentLength, maxSize) + } + + out, err := os.Create(dest) + if err != nil { + return err + } + defer func() { _ = out.Close() }() + + // SECURITY: Use LimitReader to enforce max download size even if Content-Length is missing/wrong + limited := io.LimitReader(resp.Body, maxSize+1) + written, err := io.Copy(out, limited) + if err != nil { + return err + } + + // Check if we hit the limit (downloaded more than maxSize) + if written > maxSize { + _ = os.Remove(dest) // Clean up partial file (best-effort) + return fmt.Errorf("download exceeded maximum size of %d bytes", maxSize) + } + + return nil +} + +func (c *githubReleaseClient) FetchChecksumFile(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} diff --git a/backend/internal/repository/github_release_service_test.go b/backend/internal/repository/github_release_service_test.go new file mode 100644 index 00000000..d375a193 --- /dev/null +++ b/backend/internal/repository/github_release_service_test.go @@ -0,0 +1,317 @@ +package repository + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type GitHubReleaseServiceSuite struct { + suite.Suite + srv *httptest.Server + client *githubReleaseClient + tempDir string +} + +// testTransport redirects requests to the test server +type testTransport struct { + testServerURL string +} + +func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // Rewrite the URL to point to our test server + testURL := t.testServerURL + req.URL.Path + newReq, err := http.NewRequestWithContext(req.Context(), req.Method, testURL, req.Body) + if err != nil { + return nil, err + } + newReq.Header = req.Header + return http.DefaultTransport.RoundTrip(newReq) +} + +func newTestGitHubReleaseClient() *githubReleaseClient { + return &githubReleaseClient{ + httpClient: &http.Client{}, + downloadHTTPClient: &http.Client{}, + } +} + +func (s *GitHubReleaseServiceSuite) SetupTest() { + s.tempDir = s.T().TempDir() +} + +func (s *GitHubReleaseServiceSuite) TearDownTest() { + if s.srv != nil { + s.srv.Close() + s.srv = nil + } +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_EnforcesMaxSize_ContentLength() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Length", "100") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(bytes.Repeat([]byte("a"), 100)) + })) + + s.client = newTestGitHubReleaseClient() + + dest := filepath.Join(s.tempDir, "file1.bin") + err := s.client.DownloadFile(context.Background(), s.srv.URL, dest, 10) + require.Error(s.T(), err, "expected error for oversized download with Content-Length") + + _, statErr := os.Stat(dest) + require.Error(s.T(), statErr, "expected file to not exist for rejected download") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_EnforcesMaxSize_Chunked() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Force chunked encoding (unknown Content-Length) by flushing headers before writing. + w.WriteHeader(http.StatusOK) + if fl, ok := w.(http.Flusher); ok { + fl.Flush() + } + for i := 0; i < 10; i++ { + _, _ = w.Write(bytes.Repeat([]byte("b"), 10)) + if fl, ok := w.(http.Flusher); ok { + fl.Flush() + } + } + })) + + s.client = newTestGitHubReleaseClient() + + dest := filepath.Join(s.tempDir, "file2.bin") + err := s.client.DownloadFile(context.Background(), s.srv.URL, dest, 10) + require.Error(s.T(), err, "expected error for oversized chunked download") + + _, statErr := os.Stat(dest) + require.Error(s.T(), statErr, "expected file to be cleaned up for oversized chunked download") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_Success() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + if fl, ok := w.(http.Flusher); ok { + fl.Flush() + } + for i := 0; i < 10; i++ { + _, _ = w.Write(bytes.Repeat([]byte("b"), 10)) + if fl, ok := w.(http.Flusher); ok { + fl.Flush() + } + } + })) + + s.client = newTestGitHubReleaseClient() + + dest := filepath.Join(s.tempDir, "file3.bin") + err := s.client.DownloadFile(context.Background(), s.srv.URL, dest, 200) + require.NoError(s.T(), err, "expected success") + + b, err := os.ReadFile(dest) + require.NoError(s.T(), err, "read") + require.True(s.T(), strings.HasPrefix(string(b), "b"), "downloaded content should start with 'b'") + require.Len(s.T(), b, 100, "downloaded content length mismatch") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_404() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + s.client = newTestGitHubReleaseClient() + + dest := filepath.Join(s.tempDir, "notfound.bin") + err := s.client.DownloadFile(context.Background(), s.srv.URL, dest, 100) + require.Error(s.T(), err, "expected error for 404") + + _, statErr := os.Stat(dest) + require.Error(s.T(), statErr, "expected file to not exist for 404") +} + +func (s *GitHubReleaseServiceSuite) TestFetchChecksumFile_Success() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("sum")) + })) + + s.client = newTestGitHubReleaseClient() + + body, err := s.client.FetchChecksumFile(context.Background(), s.srv.URL) + require.NoError(s.T(), err, "FetchChecksumFile") + require.Equal(s.T(), "sum", string(body), "checksum body mismatch") +} + +func (s *GitHubReleaseServiceSuite) TestFetchChecksumFile_Non200() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + + s.client = newTestGitHubReleaseClient() + + _, err := s.client.FetchChecksumFile(context.Background(), s.srv.URL) + require.Error(s.T(), err, "expected error for non-200") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_ContextCancel() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-r.Context().Done() + })) + + s.client = newTestGitHubReleaseClient() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + dest := filepath.Join(s.tempDir, "cancelled.bin") + err := s.client.DownloadFile(ctx, s.srv.URL, dest, 100) + require.Error(s.T(), err, "expected error for cancelled context") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_InvalidURL() { + s.client = newTestGitHubReleaseClient() + + dest := filepath.Join(s.tempDir, "invalid.bin") + err := s.client.DownloadFile(context.Background(), "://invalid-url", dest, 100) + require.Error(s.T(), err, "expected error for invalid URL") +} + +func (s *GitHubReleaseServiceSuite) TestDownloadFile_InvalidDestPath() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("content")) + })) + + s.client = newTestGitHubReleaseClient() + + // Use a path that cannot be created (directory doesn't exist) + dest := filepath.Join(s.tempDir, "nonexistent", "subdir", "file.bin") + err := s.client.DownloadFile(context.Background(), s.srv.URL, dest, 100) + require.Error(s.T(), err, "expected error for invalid destination path") +} + +func (s *GitHubReleaseServiceSuite) TestFetchChecksumFile_InvalidURL() { + s.client = newTestGitHubReleaseClient() + + _, err := s.client.FetchChecksumFile(context.Background(), "://invalid-url") + require.Error(s.T(), err, "expected error for invalid URL") +} + +func (s *GitHubReleaseServiceSuite) TestFetchLatestRelease_Success() { + releaseJSON := `{ + "tag_name": "v1.0.0", + "name": "Release 1.0.0", + "body": "Release notes", + "html_url": "https://github.com/test/repo/releases/v1.0.0", + "assets": [ + { + "name": "app-linux-amd64.tar.gz", + "browser_download_url": "https://github.com/test/repo/releases/download/v1.0.0/app-linux-amd64.tar.gz" + } + ] + }` + + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(s.T(), "/repos/test/repo/releases/latest", r.URL.Path) + require.Equal(s.T(), "application/vnd.github.v3+json", r.Header.Get("Accept")) + require.Equal(s.T(), "Sub2API-Updater", r.Header.Get("User-Agent")) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(releaseJSON)) + })) + + // Use custom transport to redirect requests to test server + s.client = &githubReleaseClient{ + httpClient: &http.Client{ + Transport: &testTransport{testServerURL: s.srv.URL}, + }, + downloadHTTPClient: &http.Client{}, + } + + release, err := s.client.FetchLatestRelease(context.Background(), "test/repo") + require.NoError(s.T(), err) + require.Equal(s.T(), "v1.0.0", release.TagName) + require.Equal(s.T(), "Release 1.0.0", release.Name) + require.Len(s.T(), release.Assets, 1) + require.Equal(s.T(), "app-linux-amd64.tar.gz", release.Assets[0].Name) +} + +func (s *GitHubReleaseServiceSuite) TestFetchLatestRelease_Non200() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + s.client = &githubReleaseClient{ + httpClient: &http.Client{ + Transport: &testTransport{testServerURL: s.srv.URL}, + }, + downloadHTTPClient: &http.Client{}, + } + + _, err := s.client.FetchLatestRelease(context.Background(), "test/repo") + require.Error(s.T(), err) + require.Contains(s.T(), err.Error(), "404") +} + +func (s *GitHubReleaseServiceSuite) TestFetchLatestRelease_InvalidJSON() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("not valid json")) + })) + + s.client = &githubReleaseClient{ + httpClient: &http.Client{ + Transport: &testTransport{testServerURL: s.srv.URL}, + }, + downloadHTTPClient: &http.Client{}, + } + + _, err := s.client.FetchLatestRelease(context.Background(), "test/repo") + require.Error(s.T(), err) +} + +func (s *GitHubReleaseServiceSuite) TestFetchLatestRelease_ContextCancel() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-r.Context().Done() + })) + + s.client = &githubReleaseClient{ + httpClient: &http.Client{ + Transport: &testTransport{testServerURL: s.srv.URL}, + }, + downloadHTTPClient: &http.Client{}, + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, err := s.client.FetchLatestRelease(ctx, "test/repo") + require.Error(s.T(), err) +} + +func (s *GitHubReleaseServiceSuite) TestFetchChecksumFile_ContextCancel() { + s.srv = newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + <-r.Context().Done() + })) + + s.client = newTestGitHubReleaseClient() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + _, err := s.client.FetchChecksumFile(ctx, s.srv.URL) + require.Error(s.T(), err) +} + +func TestGitHubReleaseServiceSuite(t *testing.T) { + suite.Run(t, new(GitHubReleaseServiceSuite)) +} diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go new file mode 100644 index 00000000..9f3c1a57 --- /dev/null +++ b/backend/internal/repository/group_repo.go @@ -0,0 +1,413 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "log" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +type sqlExecutor interface { + ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) +} + +type groupRepository struct { + client *dbent.Client + sql sqlExecutor +} + +func NewGroupRepository(client *dbent.Client, sqlDB *sql.DB) service.GroupRepository { + return newGroupRepositoryWithSQL(client, sqlDB) +} + +func newGroupRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *groupRepository { + return &groupRepository{client: client, sql: sqlq} +} + +func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) error { + builder := r.client.Group.Create(). + SetName(groupIn.Name). + SetDescription(groupIn.Description). + SetPlatform(groupIn.Platform). + SetRateMultiplier(groupIn.RateMultiplier). + SetIsExclusive(groupIn.IsExclusive). + SetStatus(groupIn.Status). + SetSubscriptionType(groupIn.SubscriptionType). + SetNillableDailyLimitUsd(groupIn.DailyLimitUSD). + SetNillableWeeklyLimitUsd(groupIn.WeeklyLimitUSD). + SetNillableMonthlyLimitUsd(groupIn.MonthlyLimitUSD). + SetNillableImagePrice1k(groupIn.ImagePrice1K). + SetNillableImagePrice2k(groupIn.ImagePrice2K). + SetNillableImagePrice4k(groupIn.ImagePrice4K). + SetDefaultValidityDays(groupIn.DefaultValidityDays). + SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). + SetNillableFallbackGroupID(groupIn.FallbackGroupID) + + created, err := builder.Save(ctx) + if err == nil { + groupIn.ID = created.ID + groupIn.CreatedAt = created.CreatedAt + groupIn.UpdatedAt = created.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupIn.ID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group create failed: group=%d err=%v", groupIn.ID, err) + } + } + return translatePersistenceError(err, nil, service.ErrGroupExists) +} + +func (r *groupRepository) GetByID(ctx context.Context, id int64) (*service.Group, error) { + out, err := r.GetByIDLite(ctx, id) + if err != nil { + return nil, err + } + count, _ := r.GetAccountCount(ctx, out.ID) + out.AccountCount = count + return out, nil +} + +func (r *groupRepository) GetByIDLite(ctx context.Context, id int64) (*service.Group, error) { + // AccountCount is intentionally not loaded here; use GetByID when needed. + m, err := r.client.Group.Query(). + Where(group.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) + } + + return groupEntityToService(m), nil +} + +func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) error { + builder := r.client.Group.UpdateOneID(groupIn.ID). + SetName(groupIn.Name). + SetDescription(groupIn.Description). + SetPlatform(groupIn.Platform). + SetRateMultiplier(groupIn.RateMultiplier). + SetIsExclusive(groupIn.IsExclusive). + SetStatus(groupIn.Status). + SetSubscriptionType(groupIn.SubscriptionType). + SetNillableDailyLimitUsd(groupIn.DailyLimitUSD). + SetNillableWeeklyLimitUsd(groupIn.WeeklyLimitUSD). + SetNillableMonthlyLimitUsd(groupIn.MonthlyLimitUSD). + SetNillableImagePrice1k(groupIn.ImagePrice1K). + SetNillableImagePrice2k(groupIn.ImagePrice2K). + SetNillableImagePrice4k(groupIn.ImagePrice4K). + SetDefaultValidityDays(groupIn.DefaultValidityDays). + SetClaudeCodeOnly(groupIn.ClaudeCodeOnly) + + // 处理 FallbackGroupID:nil 时清除,否则设置 + if groupIn.FallbackGroupID != nil { + builder = builder.SetFallbackGroupID(*groupIn.FallbackGroupID) + } else { + builder = builder.ClearFallbackGroupID() + } + + updated, err := builder.Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrGroupNotFound, service.ErrGroupExists) + } + groupIn.UpdatedAt = updated.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupIn.ID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group update failed: group=%d err=%v", groupIn.ID, err) + } + return nil +} + +func (r *groupRepository) Delete(ctx context.Context, id int64) error { + _, err := r.client.Group.Delete().Where(group.IDEQ(id)).Exec(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrGroupNotFound, nil) + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &id, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group delete failed: group=%d err=%v", id, err) + } + return nil +} + +func (r *groupRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Group, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "", "", nil) +} + +func (r *groupRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]service.Group, *pagination.PaginationResult, error) { + q := r.client.Group.Query() + + if platform != "" { + q = q.Where(group.PlatformEQ(platform)) + } + if status != "" { + q = q.Where(group.StatusEQ(status)) + } + if search != "" { + q = q.Where(group.Or( + group.NameContainsFold(search), + group.DescriptionContainsFold(search), + )) + } + if isExclusive != nil { + q = q.Where(group.IsExclusiveEQ(*isExclusive)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + groups, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Asc(group.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + groupIDs := make([]int64, 0, len(groups)) + outGroups := make([]service.Group, 0, len(groups)) + for i := range groups { + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) + } + + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } + } + + return outGroups, paginationResultFromTotal(int64(total), params), nil +} + +func (r *groupRepository) ListActive(ctx context.Context) ([]service.Group, error) { + groups, err := r.client.Group.Query(). + Where(group.StatusEQ(service.StatusActive)). + Order(dbent.Asc(group.FieldID)). + All(ctx) + if err != nil { + return nil, err + } + + groupIDs := make([]int64, 0, len(groups)) + outGroups := make([]service.Group, 0, len(groups)) + for i := range groups { + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) + } + + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } + } + + return outGroups, nil +} + +func (r *groupRepository) ListActiveByPlatform(ctx context.Context, platform string) ([]service.Group, error) { + groups, err := r.client.Group.Query(). + Where(group.StatusEQ(service.StatusActive), group.PlatformEQ(platform)). + Order(dbent.Asc(group.FieldID)). + All(ctx) + if err != nil { + return nil, err + } + + groupIDs := make([]int64, 0, len(groups)) + outGroups := make([]service.Group, 0, len(groups)) + for i := range groups { + g := groupEntityToService(groups[i]) + outGroups = append(outGroups, *g) + groupIDs = append(groupIDs, g.ID) + } + + counts, err := r.loadAccountCounts(ctx, groupIDs) + if err == nil { + for i := range outGroups { + outGroups[i].AccountCount = counts[outGroups[i].ID] + } + } + + return outGroups, nil +} + +func (r *groupRepository) ExistsByName(ctx context.Context, name string) (bool, error) { + return r.client.Group.Query().Where(group.NameEQ(name)).Exist(ctx) +} + +func (r *groupRepository) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + var count int64 + if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM account_groups WHERE group_id = $1", []any{groupID}, &count); err != nil { + return 0, err + } + return count, nil +} + +func (r *groupRepository) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + res, err := r.sql.ExecContext(ctx, "DELETE FROM account_groups WHERE group_id = $1", groupID) + if err != nil { + return 0, err + } + affected, _ := res.RowsAffected() + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group account clear failed: group=%d err=%v", groupID, err) + } + return affected, nil +} + +func (r *groupRepository) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + g, err := r.client.Group.Query().Where(group.IDEQ(id)).Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) + } + groupSvc := groupEntityToService(g) + + // 使用 ent 事务统一包裹:避免手工基于 *sql.Tx 构造 ent client 带来的驱动断言问题, + // 同时保证级联删除的原子性。 + tx, err := r.client.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return nil, err + } + exec := r.client + txClient := r.client + if err == nil { + defer func() { _ = tx.Rollback() }() + exec = tx.Client() + txClient = exec + } + // err 为 dbent.ErrTxStarted 时,复用当前 client 参与同一事务。 + + // Lock the group row to avoid concurrent writes while we cascade. + // 这里使用 exec.QueryContext 手动扫描,确保同一事务内加锁并能区分"未找到"与其他错误。 + rows, err := exec.QueryContext(ctx, "SELECT id FROM groups WHERE id = $1 AND deleted_at IS NULL FOR UPDATE", id) + if err != nil { + return nil, err + } + var lockedID int64 + if rows.Next() { + if err := rows.Scan(&lockedID); err != nil { + _ = rows.Close() + return nil, err + } + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + if lockedID == 0 { + return nil, service.ErrGroupNotFound + } + + var affectedUserIDs []int64 + if groupSvc.IsSubscriptionType() { + // 只查询未软删除的订阅,避免通知已取消订阅的用户 + rows, err := exec.QueryContext(ctx, "SELECT user_id FROM user_subscriptions WHERE group_id = $1 AND deleted_at IS NULL", id) + if err != nil { + return nil, err + } + for rows.Next() { + var userID int64 + if scanErr := rows.Scan(&userID); scanErr != nil { + _ = rows.Close() + return nil, scanErr + } + affectedUserIDs = append(affectedUserIDs, userID) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + // 软删除订阅:设置 deleted_at 而非硬删除 + if _, err := exec.ExecContext(ctx, "UPDATE user_subscriptions SET deleted_at = NOW() WHERE group_id = $1 AND deleted_at IS NULL", id); err != nil { + return nil, err + } + } + + // 2. Clear group_id for api keys bound to this group. + // 仅更新未软删除的记录,避免修改已删除数据,保证审计与历史回溯一致性。 + // 与 APIKeyRepository 的软删除语义保持一致,减少跨模块行为差异。 + if _, err := txClient.APIKey.Update(). + Where(apikey.GroupIDEQ(id), apikey.DeletedAtIsNil()). + ClearGroupID(). + Save(ctx); err != nil { + return nil, err + } + + // 3. Remove the group id from user_allowed_groups join table. + // Legacy users.allowed_groups 列已弃用,不再同步。 + if _, err := exec.ExecContext(ctx, "DELETE FROM user_allowed_groups WHERE group_id = $1", id); err != nil { + return nil, err + } + + // 4. Delete account_groups join rows. + if _, err := exec.ExecContext(ctx, "DELETE FROM account_groups WHERE group_id = $1", id); err != nil { + return nil, err + } + + // 5. Soft-delete group itself. + if _, err := txClient.Group.Delete().Where(group.IDEQ(id)).Exec(ctx); err != nil { + return nil, err + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return nil, err + } + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &id, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group cascade delete failed: group=%d err=%v", id, err) + } + + return affectedUserIDs, nil +} + +func (r *groupRepository) loadAccountCounts(ctx context.Context, groupIDs []int64) (counts map[int64]int64, err error) { + counts = make(map[int64]int64, len(groupIDs)) + if len(groupIDs) == 0 { + return counts, nil + } + + rows, err := r.sql.QueryContext( + ctx, + "SELECT group_id, COUNT(*) FROM account_groups WHERE group_id = ANY($1) GROUP BY group_id", + pq.Array(groupIDs), + ) + if err != nil { + return nil, err + } + defer func() { + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + counts = nil + } + }() + + for rows.Next() { + var groupID int64 + var count int64 + if err = rows.Scan(&groupID, &count); err != nil { + return nil, err + } + counts[groupID] = count + } + if err = rows.Err(); err != nil { + return nil, err + } + + return counts, nil +} diff --git a/backend/internal/repository/group_repo_integration_test.go b/backend/internal/repository/group_repo_integration_test.go new file mode 100644 index 00000000..c31a9ec4 --- /dev/null +++ b/backend/internal/repository/group_repo_integration_test.go @@ -0,0 +1,677 @@ +//go:build integration + +package repository + +import ( + "context" + "database/sql" + "errors" + "testing" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type GroupRepoSuite struct { + suite.Suite + ctx context.Context + tx *dbent.Tx + repo *groupRepository +} + +type forbidSQLExecutor struct { + called bool +} + +func (s *forbidSQLExecutor) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + s.called = true + return nil, errors.New("unexpected sql exec") +} + +func (s *forbidSQLExecutor) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + s.called = true + return nil, errors.New("unexpected sql query") +} + +func (s *GroupRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.tx = tx + s.repo = newGroupRepositoryWithSQL(tx.Client(), tx) +} + +func TestGroupRepoSuite(t *testing.T) { + suite.Run(t, new(GroupRepoSuite)) +} + +// --- Create / GetByID / Update / Delete --- + +func (s *GroupRepoSuite) TestCreate() { + group := &service.Group{ + Name: "test-create", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + + err := s.repo.Create(s.ctx, group) + s.Require().NoError(err, "Create") + s.Require().NotZero(group.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, group.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("test-create", got.Name) +} + +func (s *GroupRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") + s.Require().ErrorIs(err, service.ErrGroupNotFound) +} + +func (s *GroupRepoSuite) TestGetByIDLite_DoesNotUseAccountCount() { + group := &service.Group{ + Name: "lite-group", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + spy := &forbidSQLExecutor{} + repo := newGroupRepositoryWithSQL(s.tx.Client(), spy) + + got, err := repo.GetByIDLite(s.ctx, group.ID) + s.Require().NoError(err) + s.Require().Equal(group.ID, got.ID) + s.Require().False(spy.called, "expected no direct sql executor usage") +} + +func (s *GroupRepoSuite) TestUpdate() { + group := &service.Group{ + Name: "original", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + group.Name = "updated" + err := s.repo.Update(s.ctx, group) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, group.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("updated", got.Name) +} + +func (s *GroupRepoSuite) TestDelete() { + group := &service.Group{ + Name: "to-delete", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + err := s.repo.Delete(s.ctx, group.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, group.ID) + s.Require().Error(err, "expected error after delete") + s.Require().ErrorIs(err, service.ErrGroupNotFound) +} + +// --- List / ListWithFilters --- + +func (s *GroupRepoSuite) TestList() { + baseGroups, basePage, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List base") + + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + groups, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List") + s.Require().Len(groups, len(baseGroups)+2) + s.Require().Equal(basePage.Total+2, page.Total) +} + +func (s *GroupRepoSuite) TestListWithFilters_Platform() { + baseGroups, _, err := s.repo.ListWithFilters( + s.ctx, + pagination.PaginationParams{Page: 1, PageSize: 10}, + service.PlatformOpenAI, + "", + "", + nil, + ) + s.Require().NoError(err, "ListWithFilters base") + + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformOpenAI, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.PlatformOpenAI, "", "", nil) + s.Require().NoError(err) + s.Require().Len(groups, len(baseGroups)+1) + // Verify all groups are OpenAI platform + for _, g := range groups { + s.Require().Equal(service.PlatformOpenAI, g.Platform) + } +} + +func (s *GroupRepoSuite) TestListWithFilters_Status() { + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusDisabled, "", nil) + s.Require().NoError(err) + s.Require().Len(groups, 1) + s.Require().Equal(service.StatusDisabled, groups[0].Status) +} + +func (s *GroupRepoSuite) TestListWithFilters_IsExclusive() { + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: true, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + isExclusive := true + groups, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "", &isExclusive) + s.Require().NoError(err) + s.Require().Len(groups, 1) + s.Require().True(groups[0].IsExclusive) +} + +func (s *GroupRepoSuite) TestListWithFilters_Search() { + newRepo := func() (*groupRepository, context.Context) { + tx := testEntTx(s.T()) + return newGroupRepositoryWithSQL(tx.Client(), tx), context.Background() + } + + containsID := func(groups []service.Group, id int64) bool { + for i := range groups { + if groups[i].ID == id { + return true + } + } + return false + } + + mustCreate := func(repo *groupRepository, ctx context.Context, g *service.Group) *service.Group { + s.Require().NoError(repo.Create(ctx, g)) + s.Require().NotZero(g.ID) + return g + } + + newGroup := func(name string) *service.Group { + return &service.Group{ + Name: name, + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + } + + s.Run("search_name_should_match", func() { + repo, ctx := newRepo() + + target := mustCreate(repo, ctx, newGroup("it-group-search-name-target")) + other := mustCreate(repo, ctx, newGroup("it-group-search-name-other")) + + groups, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", "name-target", nil) + s.Require().NoError(err) + s.Require().True(containsID(groups, target.ID), "expected target group to match by name") + s.Require().False(containsID(groups, other.ID), "expected other group to be filtered out") + }) + + s.Run("search_description_should_match", func() { + repo, ctx := newRepo() + + target := newGroup("it-group-search-desc-target") + target.Description = "something about desc-needle in here" + target = mustCreate(repo, ctx, target) + + other := newGroup("it-group-search-desc-other") + other.Description = "nothing to see here" + other = mustCreate(repo, ctx, other) + + groups, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", "desc-needle", nil) + s.Require().NoError(err) + s.Require().True(containsID(groups, target.ID), "expected target group to match by description") + s.Require().False(containsID(groups, other.ID), "expected other group to be filtered out") + }) + + s.Run("search_nonexistent_should_return_empty", func() { + repo, ctx := newRepo() + + _ = mustCreate(repo, ctx, newGroup("it-group-search-nonexistent-baseline")) + + search := s.T().Name() + "__no_such_group__" + groups, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", search, nil) + s.Require().NoError(err) + s.Require().Empty(groups) + }) + + s.Run("search_should_be_case_insensitive", func() { + repo, ctx := newRepo() + + target := mustCreate(repo, ctx, newGroup("MiXeDCaSe-Needle")) + other := mustCreate(repo, ctx, newGroup("it-group-search-case-other")) + + groups, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", "mixedcase-needle", nil) + s.Require().NoError(err) + s.Require().True(containsID(groups, target.ID), "expected case-insensitive match") + s.Require().False(containsID(groups, other.ID), "expected other group to be filtered out") + }) + + s.Run("search_should_escape_like_wildcards", func() { + repo, ctx := newRepo() + + percentTarget := mustCreate(repo, ctx, newGroup("it-group-search-100%-target")) + percentOther := mustCreate(repo, ctx, newGroup("it-group-search-100X-other")) + + groups, _, err := repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", "100%", nil) + s.Require().NoError(err) + s.Require().True(containsID(groups, percentTarget.ID), "expected literal %% match") + s.Require().False(containsID(groups, percentOther.ID), "expected %% not to act as wildcard") + + underscoreTarget := mustCreate(repo, ctx, newGroup("it-group-search-ab_cd-target")) + underscoreOther := mustCreate(repo, ctx, newGroup("it-group-search-abXcd-other")) + + groups, _, err = repo.ListWithFilters(ctx, pagination.PaginationParams{Page: 1, PageSize: 50}, "", "", "ab_cd", nil) + s.Require().NoError(err) + s.Require().True(containsID(groups, underscoreTarget.ID), "expected literal _ match") + s.Require().False(containsID(groups, underscoreOther.ID), "expected _ not to act as wildcard") + }) +} + +func (s *GroupRepoSuite) TestListWithFilters_AccountCount() { + g1 := &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + g2 := &service.Group{ + Name: "g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: true, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g1)) + s.Require().NoError(s.repo.Create(s.ctx, g2)) + + var accountID int64 + s.Require().NoError(scanSingleRow( + s.ctx, + s.tx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + []any{"acc1", service.PlatformAnthropic, service.AccountTypeOAuth}, + &accountID, + )) + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g1.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g2.ID, 1) + s.Require().NoError(err) + + isExclusive := true + groups, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.PlatformAnthropic, service.StatusActive, "", &isExclusive) + s.Require().NoError(err, "ListWithFilters") + s.Require().Equal(int64(1), page.Total) + s.Require().Len(groups, 1) + s.Require().Equal(g2.ID, groups[0].ID, "ListWithFilters returned wrong group") + s.Require().Equal(int64(1), groups[0].AccountCount, "AccountCount mismatch") +} + +// --- ListActive / ListActiveByPlatform --- + +func (s *GroupRepoSuite) TestListActive() { + baseGroups, err := s.repo.ListActive(s.ctx) + s.Require().NoError(err, "ListActive base") + + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "active1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "inactive1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + groups, err := s.repo.ListActive(s.ctx) + s.Require().NoError(err, "ListActive") + s.Require().Len(groups, len(baseGroups)+1) + // Verify our test group is in the results + var found bool + for _, g := range groups { + if g.Name == "active1" { + found = true + break + } + } + s.Require().True(found, "active1 group should be in results") +} + +func (s *GroupRepoSuite) TestListActiveByPlatform() { + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g2", + Platform: service.PlatformOpenAI, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "g3", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusDisabled, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + groups, err := s.repo.ListActiveByPlatform(s.ctx, service.PlatformAnthropic) + s.Require().NoError(err, "ListActiveByPlatform") + // 1 default anthropic group + 1 test active anthropic group = 2 total + s.Require().Len(groups, 2) + // Verify our test group is in the results + var found bool + for _, g := range groups { + if g.Name == "g1" { + found = true + break + } + } + s.Require().True(found, "g1 group should be in results") +} + +// --- ExistsByName --- + +func (s *GroupRepoSuite) TestExistsByName() { + s.Require().NoError(s.repo.Create(s.ctx, &service.Group{ + Name: "existing-group", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + })) + + exists, err := s.repo.ExistsByName(s.ctx, "existing-group") + s.Require().NoError(err, "ExistsByName") + s.Require().True(exists) + + notExists, err := s.repo.ExistsByName(s.ctx, "non-existing") + s.Require().NoError(err) + s.Require().False(notExists) +} + +// --- GetAccountCount --- + +func (s *GroupRepoSuite) TestGetAccountCount() { + group := &service.Group{ + Name: "g-count", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + var a1 int64 + s.Require().NoError(scanSingleRow( + s.ctx, + s.tx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + []any{"a1", service.PlatformAnthropic, service.AccountTypeOAuth}, + &a1, + )) + var a2 int64 + s.Require().NoError(scanSingleRow( + s.ctx, + s.tx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + []any{"a2", service.PlatformAnthropic, service.AccountTypeOAuth}, + &a2, + )) + + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a1, group.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a2, group.ID, 2) + s.Require().NoError(err) + + count, err := s.repo.GetAccountCount(s.ctx, group.ID) + s.Require().NoError(err, "GetAccountCount") + s.Require().Equal(int64(2), count) +} + +func (s *GroupRepoSuite) TestGetAccountCount_Empty() { + group := &service.Group{ + Name: "g-empty", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + count, err := s.repo.GetAccountCount(s.ctx, group.ID) + s.Require().NoError(err) + s.Require().Zero(count) +} + +// --- DeleteAccountGroupsByGroupID --- + +func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID() { + g := &service.Group{ + Name: "g-del", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g)) + var accountID int64 + s.Require().NoError(scanSingleRow( + s.ctx, + s.tx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + []any{"acc-del", service.PlatformAnthropic, service.AccountTypeOAuth}, + &accountID, + )) + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", accountID, g.ID, 1) + s.Require().NoError(err) + + affected, err := s.repo.DeleteAccountGroupsByGroupID(s.ctx, g.ID) + s.Require().NoError(err, "DeleteAccountGroupsByGroupID") + s.Require().Equal(int64(1), affected, "expected 1 affected row") + + count, err := s.repo.GetAccountCount(s.ctx, g.ID) + s.Require().NoError(err, "GetAccountCount") + s.Require().Equal(int64(0), count, "expected 0 account groups") +} + +func (s *GroupRepoSuite) TestDeleteAccountGroupsByGroupID_MultipleAccounts() { + g := &service.Group{ + Name: "g-multi", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g)) + + insertAccount := func(name string) int64 { + var id int64 + s.Require().NoError(scanSingleRow( + s.ctx, + s.tx, + "INSERT INTO accounts (name, platform, type) VALUES ($1, $2, $3) RETURNING id", + []any{name, service.PlatformAnthropic, service.AccountTypeOAuth}, + &id, + )) + return id + } + a1 := insertAccount("a1") + a2 := insertAccount("a2") + a3 := insertAccount("a3") + _, err := s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a1, g.ID, 1) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a2, g.ID, 2) + s.Require().NoError(err) + _, err = s.tx.ExecContext(s.ctx, "INSERT INTO account_groups (account_id, group_id, priority, created_at) VALUES ($1, $2, $3, NOW())", a3, g.ID, 3) + s.Require().NoError(err) + + affected, err := s.repo.DeleteAccountGroupsByGroupID(s.ctx, g.ID) + s.Require().NoError(err) + s.Require().Equal(int64(3), affected) + + count, _ := s.repo.GetAccountCount(s.ctx, g.ID) + s.Require().Zero(count) +} + +// --- 软删除过滤测试 --- + +func (s *GroupRepoSuite) TestDelete_SoftDelete_NotVisibleInList() { + group := &service.Group{ + Name: "to-soft-delete", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + // 获取删除前的列表数量 + listBefore, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 100}) + s.Require().NoError(err) + beforeCount := len(listBefore) + + // 软删除 + err = s.repo.Delete(s.ctx, group.ID) + s.Require().NoError(err, "Delete (soft delete)") + + // 验证列表中不再包含软删除的 group + listAfter, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 100}) + s.Require().NoError(err) + s.Require().Len(listAfter, beforeCount-1, "soft deleted group should not appear in list") + + // 验证 GetByID 也无法找到 + _, err = s.repo.GetByID(s.ctx, group.ID) + s.Require().Error(err) + s.Require().ErrorIs(err, service.ErrGroupNotFound) +} + +func (s *GroupRepoSuite) TestDelete_SoftDeletedGroup_lockForUpdate() { + group := &service.Group{ + Name: "lock-soft-delete", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + // 软删除 + err := s.repo.Delete(s.ctx, group.ID) + s.Require().NoError(err) + + // 验证软删除的 group 在 GetByID 时返回 ErrGroupNotFound + // 这证明 lockForUpdate 的 deleted_at IS NULL 过滤正在工作 + _, err = s.repo.GetByID(s.ctx, group.ID) + s.Require().Error(err, "should fail to get soft-deleted group") + s.Require().ErrorIs(err, service.ErrGroupNotFound) +} diff --git a/backend/internal/repository/http_upstream.go b/backend/internal/repository/http_upstream.go new file mode 100644 index 00000000..feb32541 --- /dev/null +++ b/backend/internal/repository/http_upstream.go @@ -0,0 +1,653 @@ +package repository + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/proxyutil" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" +) + +// 默认配置常量 +// 这些值在配置文件未指定时作为回退默认值使用 +const ( + // directProxyKey: 无代理时的缓存键标识 + directProxyKey = "direct" + // defaultMaxIdleConns: 默认最大空闲连接总数 + // HTTP/2 场景下,单连接可多路复用,240 足以支撑高并发 + defaultMaxIdleConns = 240 + // defaultMaxIdleConnsPerHost: 默认每主机最大空闲连接数 + defaultMaxIdleConnsPerHost = 120 + // defaultMaxConnsPerHost: 默认每主机最大连接数(含活跃连接) + // 达到上限后新请求会等待,而非无限创建连接 + defaultMaxConnsPerHost = 240 + // defaultIdleConnTimeout: 默认空闲连接超时时间(90秒) + // 超时后连接会被关闭,释放系统资源(建议小于上游 LB 超时) + defaultIdleConnTimeout = 90 * time.Second + // defaultResponseHeaderTimeout: 默认等待响应头超时时间(5分钟) + // LLM 请求可能排队较久,需要较长超时 + defaultResponseHeaderTimeout = 300 * time.Second + // defaultMaxUpstreamClients: 默认最大客户端缓存数量 + // 超出后会淘汰最久未使用的客户端 + defaultMaxUpstreamClients = 5000 + // defaultClientIdleTTLSeconds: 默认客户端空闲回收阈值(15分钟) + defaultClientIdleTTLSeconds = 900 +) + +var errUpstreamClientLimitReached = errors.New("upstream client cache limit reached") + +// poolSettings 连接池配置参数 +// 封装 Transport 所需的各项连接池参数 +type poolSettings struct { + maxIdleConns int // 最大空闲连接总数 + maxIdleConnsPerHost int // 每主机最大空闲连接数 + maxConnsPerHost int // 每主机最大连接数(含活跃) + idleConnTimeout time.Duration // 空闲连接超时时间 + responseHeaderTimeout time.Duration // 等待响应头超时时间 +} + +// upstreamClientEntry 上游客户端缓存条目 +// 记录客户端实例及其元数据,用于连接池管理和淘汰策略 +type upstreamClientEntry struct { + client *http.Client // HTTP 客户端实例 + proxyKey string // 代理标识(用于检测代理变更) + poolKey string // 连接池配置标识(用于检测配置变更) + lastUsed int64 // 最后使用时间戳(纳秒),用于 LRU 淘汰 + inFlight int64 // 当前进行中的请求数,>0 时不可淘汰 +} + +// httpUpstreamService 通用 HTTP 上游服务 +// 用于向任意 HTTP API(Claude、OpenAI 等)发送请求,支持可选代理 +// +// 架构设计: +// - 根据隔离策略(proxy/account/account_proxy)缓存客户端实例 +// - 每个客户端拥有独立的 Transport 连接池 +// - 支持 LRU + 空闲时间双重淘汰策略 +// +// 性能优化: +// 1. 根据隔离策略缓存客户端实例,避免频繁创建 http.Client +// 2. 复用 Transport 连接池,减少 TCP 握手和 TLS 协商开销 +// 3. 支持账号级隔离与空闲回收,降低连接层关联风险 +// 4. 达到最大连接数后等待可用连接,而非无限创建 +// 5. 仅回收空闲客户端,避免中断活跃请求 +// 6. HTTP/2 多路复用,连接上限不等于并发请求上限 +// 7. 代理变更时清空旧连接池,避免复用错误代理 +// 8. 账号并发数与连接池上限对应(账号隔离策略下) +type httpUpstreamService struct { + cfg *config.Config // 全局配置 + mu sync.RWMutex // 保护 clients map 的读写锁 + clients map[string]*upstreamClientEntry // 客户端缓存池,key 由隔离策略决定 +} + +// NewHTTPUpstream 创建通用 HTTP 上游服务 +// 使用配置中的连接池参数构建 Transport +// +// 参数: +// - cfg: 全局配置,包含连接池参数和隔离策略 +// +// 返回: +// - service.HTTPUpstream 接口实现 +func NewHTTPUpstream(cfg *config.Config) service.HTTPUpstream { + return &httpUpstreamService{ + cfg: cfg, + clients: make(map[string]*upstreamClientEntry), + } +} + +// Do 执行 HTTP 请求 +// 根据隔离策略获取或创建客户端,并跟踪请求生命周期 +// +// 参数: +// - req: HTTP 请求对象 +// - proxyURL: 代理地址,空字符串表示直连 +// - accountID: 账户 ID,用于账户级隔离 +// - accountConcurrency: 账户并发限制,用于动态调整连接池大小 +// +// 返回: +// - *http.Response: HTTP 响应(Body 已包装,关闭时自动更新计数) +// - error: 请求错误 +// +// 注意: +// - 调用方必须关闭 resp.Body,否则会导致 inFlight 计数泄漏 +// - inFlight > 0 的客户端不会被淘汰,确保活跃请求不被中断 +func (s *httpUpstreamService) Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) { + if err := s.validateRequestHost(req); err != nil { + return nil, err + } + + // 获取或创建对应的客户端,并标记请求占用 + entry, err := s.acquireClient(proxyURL, accountID, accountConcurrency) + if err != nil { + return nil, err + } + + // 执行请求 + resp, err := entry.client.Do(req) + if err != nil { + // 请求失败,立即减少计数 + atomic.AddInt64(&entry.inFlight, -1) + atomic.StoreInt64(&entry.lastUsed, time.Now().UnixNano()) + return nil, err + } + + // 包装响应体,在关闭时自动减少计数并更新时间戳 + // 这确保了流式响应(如 SSE)在完全读取前不会被淘汰 + resp.Body = wrapTrackedBody(resp.Body, func() { + atomic.AddInt64(&entry.inFlight, -1) + atomic.StoreInt64(&entry.lastUsed, time.Now().UnixNano()) + }) + + return resp, nil +} + +func (s *httpUpstreamService) shouldValidateResolvedIP() bool { + if s.cfg == nil { + return false + } + if !s.cfg.Security.URLAllowlist.Enabled { + return false + } + return !s.cfg.Security.URLAllowlist.AllowPrivateHosts +} + +func (s *httpUpstreamService) validateRequestHost(req *http.Request) error { + if !s.shouldValidateResolvedIP() { + return nil + } + if req == nil || req.URL == nil { + return errors.New("request url is nil") + } + host := strings.TrimSpace(req.URL.Hostname()) + if host == "" { + return errors.New("request host is empty") + } + if err := urlvalidator.ValidateResolvedIP(host); err != nil { + return err + } + return nil +} + +func (s *httpUpstreamService) redirectChecker(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + return s.validateRequestHost(req) +} + +// acquireClient 获取或创建客户端,并标记为进行中请求 +// 用于请求路径,避免在获取后被淘汰 +func (s *httpUpstreamService) acquireClient(proxyURL string, accountID int64, accountConcurrency int) (*upstreamClientEntry, error) { + return s.getClientEntry(proxyURL, accountID, accountConcurrency, true, true) +} + +// getOrCreateClient 获取或创建客户端 +// 根据隔离策略和参数决定缓存键,处理代理变更和配置变更 +// +// 参数: +// - proxyURL: 代理地址 +// - accountID: 账户 ID +// - accountConcurrency: 账户并发限制 +// +// 返回: +// - *upstreamClientEntry: 客户端缓存条目 +// +// 隔离策略说明: +// - proxy: 按代理地址隔离,同一代理共享客户端 +// - account: 按账户隔离,同一账户共享客户端(代理变更时重建) +// - account_proxy: 按账户+代理组合隔离,最细粒度 +func (s *httpUpstreamService) getOrCreateClient(proxyURL string, accountID int64, accountConcurrency int) *upstreamClientEntry { + entry, _ := s.getClientEntry(proxyURL, accountID, accountConcurrency, false, false) + return entry +} + +// getClientEntry 获取或创建客户端条目 +// markInFlight=true 时会标记进行中请求,用于请求路径防止被淘汰 +// enforceLimit=true 时会限制客户端数量,超限且无法淘汰时返回错误 +func (s *httpUpstreamService) getClientEntry(proxyURL string, accountID int64, accountConcurrency int, markInFlight bool, enforceLimit bool) (*upstreamClientEntry, error) { + // 获取隔离模式 + isolation := s.getIsolationMode() + // 标准化代理 URL 并解析 + proxyKey, parsedProxy := normalizeProxyURL(proxyURL) + // 构建缓存键(根据隔离策略不同) + cacheKey := buildCacheKey(isolation, proxyKey, accountID) + // 构建连接池配置键(用于检测配置变更) + poolKey := s.buildPoolKey(isolation, accountConcurrency) + + now := time.Now() + nowUnix := now.UnixNano() + + // 读锁快速路径:命中缓存直接返回,减少锁竞争 + s.mu.RLock() + if entry, ok := s.clients[cacheKey]; ok && s.shouldReuseEntry(entry, isolation, proxyKey, poolKey) { + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.AddInt64(&entry.inFlight, 1) + } + s.mu.RUnlock() + return entry, nil + } + s.mu.RUnlock() + + // 写锁慢路径:创建或重建客户端 + s.mu.Lock() + if entry, ok := s.clients[cacheKey]; ok { + if s.shouldReuseEntry(entry, isolation, proxyKey, poolKey) { + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.AddInt64(&entry.inFlight, 1) + } + s.mu.Unlock() + return entry, nil + } + s.removeClientLocked(cacheKey, entry) + } + + // 超出缓存上限时尝试淘汰,无法淘汰则拒绝新建 + if enforceLimit && s.maxUpstreamClients() > 0 { + s.evictIdleLocked(now) + if len(s.clients) >= s.maxUpstreamClients() { + if !s.evictOldestIdleLocked() { + s.mu.Unlock() + return nil, errUpstreamClientLimitReached + } + } + } + + // 缓存未命中或需要重建,创建新客户端 + settings := s.resolvePoolSettings(isolation, accountConcurrency) + transport, err := buildUpstreamTransport(settings, parsedProxy) + if err != nil { + s.mu.Unlock() + return nil, fmt.Errorf("build transport: %w", err) + } + client := &http.Client{Transport: transport} + if s.shouldValidateResolvedIP() { + client.CheckRedirect = s.redirectChecker + } + entry := &upstreamClientEntry{ + client: client, + proxyKey: proxyKey, + poolKey: poolKey, + } + atomic.StoreInt64(&entry.lastUsed, nowUnix) + if markInFlight { + atomic.StoreInt64(&entry.inFlight, 1) + } + s.clients[cacheKey] = entry + + // 执行淘汰策略:先淘汰空闲超时的,再淘汰超出数量限制的 + s.evictIdleLocked(now) + s.evictOverLimitLocked() + s.mu.Unlock() + return entry, nil +} + +// shouldReuseEntry 判断缓存条目是否可复用 +// 若代理或连接池配置发生变化,则需要重建客户端 +func (s *httpUpstreamService) shouldReuseEntry(entry *upstreamClientEntry, isolation, proxyKey, poolKey string) bool { + if entry == nil { + return false + } + if isolation == config.ConnectionPoolIsolationAccount && entry.proxyKey != proxyKey { + return false + } + if entry.poolKey != poolKey { + return false + } + return true +} + +// removeClientLocked 移除客户端(需持有锁) +// 从缓存中删除并关闭空闲连接 +// +// 参数: +// - key: 缓存键 +// - entry: 客户端条目 +func (s *httpUpstreamService) removeClientLocked(key string, entry *upstreamClientEntry) { + delete(s.clients, key) + if entry != nil && entry.client != nil { + // 关闭空闲连接,释放系统资源 + // 注意:这不会中断活跃连接 + entry.client.CloseIdleConnections() + } +} + +// evictIdleLocked 淘汰空闲超时的客户端(需持有锁) +// 遍历所有客户端,移除超过 TTL 且无活跃请求的条目 +// +// 参数: +// - now: 当前时间 +func (s *httpUpstreamService) evictIdleLocked(now time.Time) { + ttl := s.clientIdleTTL() + if ttl <= 0 { + return + } + // 计算淘汰截止时间 + cutoff := now.Add(-ttl).UnixNano() + for key, entry := range s.clients { + // 跳过有活跃请求的客户端 + if atomic.LoadInt64(&entry.inFlight) != 0 { + continue + } + // 淘汰超时的空闲客户端 + if atomic.LoadInt64(&entry.lastUsed) <= cutoff { + s.removeClientLocked(key, entry) + } + } +} + +// evictOldestIdleLocked 淘汰最久未使用且无活跃请求的客户端(需持有锁) +func (s *httpUpstreamService) evictOldestIdleLocked() bool { + var ( + oldestKey string + oldestEntry *upstreamClientEntry + oldestTime int64 + ) + // 查找最久未使用且无活跃请求的客户端 + for key, entry := range s.clients { + // 跳过有活跃请求的客户端 + if atomic.LoadInt64(&entry.inFlight) != 0 { + continue + } + lastUsed := atomic.LoadInt64(&entry.lastUsed) + if oldestEntry == nil || lastUsed < oldestTime { + oldestKey = key + oldestEntry = entry + oldestTime = lastUsed + } + } + // 所有客户端都有活跃请求,无法淘汰 + if oldestEntry == nil { + return false + } + s.removeClientLocked(oldestKey, oldestEntry) + return true +} + +// evictOverLimitLocked 淘汰超出数量限制的客户端(需持有锁) +// 使用 LRU 策略,优先淘汰最久未使用且无活跃请求的客户端 +func (s *httpUpstreamService) evictOverLimitLocked() bool { + maxClients := s.maxUpstreamClients() + if maxClients <= 0 { + return false + } + evicted := false + // 循环淘汰直到满足数量限制 + for len(s.clients) > maxClients { + if !s.evictOldestIdleLocked() { + return evicted + } + evicted = true + } + return evicted +} + +// getIsolationMode 获取连接池隔离模式 +// 从配置中读取,无效值回退到 account_proxy 模式 +// +// 返回: +// - string: 隔离模式(proxy/account/account_proxy) +func (s *httpUpstreamService) getIsolationMode() string { + if s.cfg == nil { + return config.ConnectionPoolIsolationAccountProxy + } + mode := strings.ToLower(strings.TrimSpace(s.cfg.Gateway.ConnectionPoolIsolation)) + if mode == "" { + return config.ConnectionPoolIsolationAccountProxy + } + switch mode { + case config.ConnectionPoolIsolationProxy, config.ConnectionPoolIsolationAccount, config.ConnectionPoolIsolationAccountProxy: + return mode + default: + return config.ConnectionPoolIsolationAccountProxy + } +} + +// maxUpstreamClients 获取最大客户端缓存数量 +// 从配置中读取,无效值使用默认值 +func (s *httpUpstreamService) maxUpstreamClients() int { + if s.cfg == nil { + return defaultMaxUpstreamClients + } + if s.cfg.Gateway.MaxUpstreamClients > 0 { + return s.cfg.Gateway.MaxUpstreamClients + } + return defaultMaxUpstreamClients +} + +// clientIdleTTL 获取客户端空闲回收阈值 +// 从配置中读取,无效值使用默认值 +func (s *httpUpstreamService) clientIdleTTL() time.Duration { + if s.cfg == nil { + return time.Duration(defaultClientIdleTTLSeconds) * time.Second + } + if s.cfg.Gateway.ClientIdleTTLSeconds > 0 { + return time.Duration(s.cfg.Gateway.ClientIdleTTLSeconds) * time.Second + } + return time.Duration(defaultClientIdleTTLSeconds) * time.Second +} + +// resolvePoolSettings 解析连接池配置 +// 根据隔离策略和账户并发数动态调整连接池参数 +// +// 参数: +// - isolation: 隔离模式 +// - accountConcurrency: 账户并发限制 +// +// 返回: +// - poolSettings: 连接池配置 +// +// 说明: +// - 账户隔离模式下,连接池大小与账户并发数对应 +// - 这确保了单账户不会占用过多连接资源 +func (s *httpUpstreamService) resolvePoolSettings(isolation string, accountConcurrency int) poolSettings { + settings := defaultPoolSettings(s.cfg) + // 账户隔离模式下,根据账户并发数调整连接池大小 + if (isolation == config.ConnectionPoolIsolationAccount || isolation == config.ConnectionPoolIsolationAccountProxy) && accountConcurrency > 0 { + settings.maxIdleConns = accountConcurrency + settings.maxIdleConnsPerHost = accountConcurrency + settings.maxConnsPerHost = accountConcurrency + } + return settings +} + +// buildPoolKey 构建连接池配置键 +// 用于检测配置变更,配置变更时需要重建客户端 +// +// 参数: +// - isolation: 隔离模式 +// - accountConcurrency: 账户并发限制 +// +// 返回: +// - string: 配置键 +func (s *httpUpstreamService) buildPoolKey(isolation string, accountConcurrency int) string { + if isolation == config.ConnectionPoolIsolationAccount || isolation == config.ConnectionPoolIsolationAccountProxy { + if accountConcurrency > 0 { + return fmt.Sprintf("account:%d", accountConcurrency) + } + } + return "default" +} + +// buildCacheKey 构建客户端缓存键 +// 根据隔离策略决定缓存键的组成 +// +// 参数: +// - isolation: 隔离模式 +// - proxyKey: 代理标识 +// - accountID: 账户 ID +// +// 返回: +// - string: 缓存键 +// +// 缓存键格式: +// - proxy 模式: "proxy:{proxyKey}" +// - account 模式: "account:{accountID}" +// - account_proxy 模式: "account:{accountID}|proxy:{proxyKey}" +func buildCacheKey(isolation, proxyKey string, accountID int64) string { + switch isolation { + case config.ConnectionPoolIsolationAccount: + return fmt.Sprintf("account:%d", accountID) + case config.ConnectionPoolIsolationAccountProxy: + return fmt.Sprintf("account:%d|proxy:%s", accountID, proxyKey) + default: + return fmt.Sprintf("proxy:%s", proxyKey) + } +} + +// normalizeProxyURL 标准化代理 URL +// 处理空值和解析错误,返回标准化的键和解析后的 URL +// +// 参数: +// - raw: 原始代理 URL 字符串 +// +// 返回: +// - string: 标准化的代理键(空或解析失败返回 "direct") +// - *url.URL: 解析后的 URL(空或解析失败返回 nil) +func normalizeProxyURL(raw string) (string, *url.URL) { + proxyURL := strings.TrimSpace(raw) + if proxyURL == "" { + return directProxyKey, nil + } + parsed, err := url.Parse(proxyURL) + if err != nil { + return directProxyKey, nil + } + parsed.Scheme = strings.ToLower(parsed.Scheme) + parsed.Host = strings.ToLower(parsed.Host) + parsed.Path = "" + parsed.RawPath = "" + parsed.RawQuery = "" + parsed.Fragment = "" + parsed.ForceQuery = false + if hostname := parsed.Hostname(); hostname != "" { + port := parsed.Port() + if (parsed.Scheme == "http" && port == "80") || (parsed.Scheme == "https" && port == "443") { + port = "" + } + hostname = strings.ToLower(hostname) + if port != "" { + parsed.Host = net.JoinHostPort(hostname, port) + } else { + parsed.Host = hostname + } + } + return parsed.String(), parsed +} + +// defaultPoolSettings 获取默认连接池配置 +// 从全局配置中读取,无效值使用常量默认值 +// +// 参数: +// - cfg: 全局配置 +// +// 返回: +// - poolSettings: 连接池配置 +func defaultPoolSettings(cfg *config.Config) poolSettings { + maxIdleConns := defaultMaxIdleConns + maxIdleConnsPerHost := defaultMaxIdleConnsPerHost + maxConnsPerHost := defaultMaxConnsPerHost + idleConnTimeout := defaultIdleConnTimeout + responseHeaderTimeout := defaultResponseHeaderTimeout + + if cfg != nil { + if cfg.Gateway.MaxIdleConns > 0 { + maxIdleConns = cfg.Gateway.MaxIdleConns + } + if cfg.Gateway.MaxIdleConnsPerHost > 0 { + maxIdleConnsPerHost = cfg.Gateway.MaxIdleConnsPerHost + } + if cfg.Gateway.MaxConnsPerHost >= 0 { + maxConnsPerHost = cfg.Gateway.MaxConnsPerHost + } + if cfg.Gateway.IdleConnTimeoutSeconds > 0 { + idleConnTimeout = time.Duration(cfg.Gateway.IdleConnTimeoutSeconds) * time.Second + } + if cfg.Gateway.ResponseHeaderTimeout > 0 { + responseHeaderTimeout = time.Duration(cfg.Gateway.ResponseHeaderTimeout) * time.Second + } + } + + return poolSettings{ + maxIdleConns: maxIdleConns, + maxIdleConnsPerHost: maxIdleConnsPerHost, + maxConnsPerHost: maxConnsPerHost, + idleConnTimeout: idleConnTimeout, + responseHeaderTimeout: responseHeaderTimeout, + } +} + +// buildUpstreamTransport 构建上游请求的 Transport +// 使用配置文件中的连接池参数,支持生产环境调优 +// +// 参数: +// - settings: 连接池配置 +// - proxyURL: 代理 URL(nil 表示直连) +// +// 返回: +// - *http.Transport: 配置好的 Transport 实例 +// - error: 代理配置错误 +// +// Transport 参数说明: +// - MaxIdleConns: 所有主机的最大空闲连接总数 +// - MaxIdleConnsPerHost: 每主机最大空闲连接数(影响连接复用率) +// - MaxConnsPerHost: 每主机最大连接数(达到后新请求等待) +// - IdleConnTimeout: 空闲连接超时(超时后关闭) +// - ResponseHeaderTimeout: 等待响应头超时(不影响流式传输) +func buildUpstreamTransport(settings poolSettings, proxyURL *url.URL) (*http.Transport, error) { + transport := &http.Transport{ + MaxIdleConns: settings.maxIdleConns, + MaxIdleConnsPerHost: settings.maxIdleConnsPerHost, + MaxConnsPerHost: settings.maxConnsPerHost, + IdleConnTimeout: settings.idleConnTimeout, + ResponseHeaderTimeout: settings.responseHeaderTimeout, + } + if err := proxyutil.ConfigureTransportProxy(transport, proxyURL); err != nil { + return nil, err + } + return transport, nil +} + +// trackedBody 带跟踪功能的响应体包装器 +// 在 Close 时执行回调,用于更新请求计数 +type trackedBody struct { + io.ReadCloser // 原始响应体 + once sync.Once + onClose func() // 关闭时的回调函数 +} + +// Close 关闭响应体并执行回调 +// 使用 sync.Once 确保回调只执行一次 +func (b *trackedBody) Close() error { + err := b.ReadCloser.Close() + if b.onClose != nil { + b.once.Do(b.onClose) + } + return err +} + +// wrapTrackedBody 包装响应体以跟踪关闭事件 +// 用于在响应体关闭时更新 inFlight 计数 +// +// 参数: +// - body: 原始响应体 +// - onClose: 关闭时的回调函数 +// +// 返回: +// - io.ReadCloser: 包装后的响应体 +func wrapTrackedBody(body io.ReadCloser, onClose func()) io.ReadCloser { + if body == nil { + return body + } + return &trackedBody{ReadCloser: body, onClose: onClose} +} diff --git a/backend/internal/repository/http_upstream_benchmark_test.go b/backend/internal/repository/http_upstream_benchmark_test.go new file mode 100644 index 00000000..1e7430a3 --- /dev/null +++ b/backend/internal/repository/http_upstream_benchmark_test.go @@ -0,0 +1,70 @@ +package repository + +import ( + "net/http" + "net/url" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// httpClientSink 用于防止编译器优化掉基准测试中的赋值操作 +// 这是 Go 基准测试的常见模式,确保测试结果准确 +var httpClientSink *http.Client + +// BenchmarkHTTPUpstreamProxyClient 对比重复创建与复用代理客户端的开销 +// +// 测试目的: +// - 验证连接池复用相比每次新建的性能提升 +// - 量化内存分配差异 +// +// 预期结果: +// - "复用" 子测试应显著快于 "新建" +// - "复用" 子测试应零内存分配 +func BenchmarkHTTPUpstreamProxyClient(b *testing.B) { + // 创建测试配置 + cfg := &config.Config{ + Gateway: config.GatewayConfig{ResponseHeaderTimeout: 300}, + } + upstream := NewHTTPUpstream(cfg) + svc, ok := upstream.(*httpUpstreamService) + if !ok { + b.Fatalf("类型断言失败,无法获取 httpUpstreamService") + } + + proxyURL := "http://127.0.0.1:8080" + b.ReportAllocs() // 报告内存分配统计 + + // 子测试:每次新建客户端 + // 模拟未优化前的行为,每次请求都创建新的 http.Client + b.Run("新建", func(b *testing.B) { + parsedProxy, err := url.Parse(proxyURL) + if err != nil { + b.Fatalf("解析代理地址失败: %v", err) + } + settings := defaultPoolSettings(cfg) + for i := 0; i < b.N; i++ { + // 每次迭代都创建新客户端,包含 Transport 分配 + transport, err := buildUpstreamTransport(settings, parsedProxy) + if err != nil { + b.Fatalf("创建 Transport 失败: %v", err) + } + httpClientSink = &http.Client{ + Transport: transport, + } + } + }) + + // 子测试:复用已缓存的客户端 + // 模拟优化后的行为,从缓存获取客户端 + b.Run("复用", func(b *testing.B) { + // 预热:确保客户端已缓存 + entry := svc.getOrCreateClient(proxyURL, 1, 1) + client := entry.client + b.ResetTimer() // 重置计时器,排除预热时间 + for i := 0; i < b.N; i++ { + // 直接使用缓存的客户端,无内存分配 + httpClientSink = client + } + }) +} diff --git a/backend/internal/repository/http_upstream_test.go b/backend/internal/repository/http_upstream_test.go new file mode 100644 index 00000000..fbe44c5e --- /dev/null +++ b/backend/internal/repository/http_upstream_test.go @@ -0,0 +1,291 @@ +package repository + +import ( + "io" + "net/http" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +// HTTPUpstreamSuite HTTP 上游服务测试套件 +// 使用 testify/suite 组织测试,支持 SetupTest 初始化 +type HTTPUpstreamSuite struct { + suite.Suite + cfg *config.Config // 测试用配置 +} + +// SetupTest 每个测试用例执行前的初始化 +// 创建空配置,各测试用例可按需覆盖 +func (s *HTTPUpstreamSuite) SetupTest() { + s.cfg = &config.Config{ + Security: config.SecurityConfig{ + URLAllowlist: config.URLAllowlistConfig{ + AllowPrivateHosts: true, + }, + }, + } +} + +// newService 创建测试用的 httpUpstreamService 实例 +// 返回具体类型以便访问内部状态进行断言 +func (s *HTTPUpstreamSuite) newService() *httpUpstreamService { + up := NewHTTPUpstream(s.cfg) + svc, ok := up.(*httpUpstreamService) + require.True(s.T(), ok, "expected *httpUpstreamService") + return svc +} + +// TestDefaultResponseHeaderTimeout 测试默认响应头超时配置 +// 验证未配置时使用 300 秒默认值 +func (s *HTTPUpstreamSuite) TestDefaultResponseHeaderTimeout() { + svc := s.newService() + entry := svc.getOrCreateClient("", 0, 0) + transport, ok := entry.client.Transport.(*http.Transport) + require.True(s.T(), ok, "expected *http.Transport") + require.Equal(s.T(), 300*time.Second, transport.ResponseHeaderTimeout, "ResponseHeaderTimeout mismatch") +} + +// TestCustomResponseHeaderTimeout 测试自定义响应头超时配置 +// 验证配置值能正确应用到 Transport +func (s *HTTPUpstreamSuite) TestCustomResponseHeaderTimeout() { + s.cfg.Gateway = config.GatewayConfig{ResponseHeaderTimeout: 7} + svc := s.newService() + entry := svc.getOrCreateClient("", 0, 0) + transport, ok := entry.client.Transport.(*http.Transport) + require.True(s.T(), ok, "expected *http.Transport") + require.Equal(s.T(), 7*time.Second, transport.ResponseHeaderTimeout, "ResponseHeaderTimeout mismatch") +} + +// TestGetOrCreateClient_InvalidURLFallsBackToDirect 测试无效代理 URL 回退 +// 验证解析失败时回退到直连模式 +func (s *HTTPUpstreamSuite) TestGetOrCreateClient_InvalidURLFallsBackToDirect() { + svc := s.newService() + entry := svc.getOrCreateClient("://bad-proxy-url", 1, 1) + require.Equal(s.T(), directProxyKey, entry.proxyKey, "expected direct proxy fallback") +} + +// TestNormalizeProxyURL_Canonicalizes 测试代理 URL 规范化 +// 验证等价地址能够映射到同一缓存键 +func (s *HTTPUpstreamSuite) TestNormalizeProxyURL_Canonicalizes() { + key1, _ := normalizeProxyURL("http://proxy.local:8080") + key2, _ := normalizeProxyURL("http://proxy.local:8080/") + require.Equal(s.T(), key1, key2, "expected normalized proxy keys to match") +} + +// TestAcquireClient_OverLimitReturnsError 测试连接池缓存上限保护 +// 验证超限且无可淘汰条目时返回错误 +func (s *HTTPUpstreamSuite) TestAcquireClient_OverLimitReturnsError() { + s.cfg.Gateway = config.GatewayConfig{ + ConnectionPoolIsolation: config.ConnectionPoolIsolationAccountProxy, + MaxUpstreamClients: 1, + } + svc := s.newService() + entry1, err := svc.acquireClient("http://proxy-a:8080", 1, 1) + require.NoError(s.T(), err, "expected first acquire to succeed") + require.NotNil(s.T(), entry1, "expected entry") + + entry2, err := svc.acquireClient("http://proxy-b:8080", 2, 1) + require.Error(s.T(), err, "expected error when cache limit reached") + require.Nil(s.T(), entry2, "expected nil entry when cache limit reached") +} + +// TestDo_WithoutProxy_GoesDirect 测试无代理时直连 +// 验证空代理 URL 时请求直接发送到目标服务器 +func (s *HTTPUpstreamSuite) TestDo_WithoutProxy_GoesDirect() { + // 创建模拟上游服务器 + upstream := newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = io.WriteString(w, "direct") + })) + s.T().Cleanup(upstream.Close) + + up := NewHTTPUpstream(s.cfg) + + req, err := http.NewRequest(http.MethodGet, upstream.URL+"/x", nil) + require.NoError(s.T(), err, "NewRequest") + resp, err := up.Do(req, "", 1, 1) + require.NoError(s.T(), err, "Do") + defer func() { _ = resp.Body.Close() }() + b, _ := io.ReadAll(resp.Body) + require.Equal(s.T(), "direct", string(b), "unexpected body") +} + +// TestDo_WithHTTPProxy_UsesProxy 测试 HTTP 代理功能 +// 验证请求通过代理服务器转发,使用绝对 URI 格式 +func (s *HTTPUpstreamSuite) TestDo_WithHTTPProxy_UsesProxy() { + // 用于接收代理请求的通道 + seen := make(chan string, 1) + // 创建模拟代理服务器 + proxySrv := newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + seen <- r.RequestURI // 记录请求 URI + _, _ = io.WriteString(w, "proxied") + })) + s.T().Cleanup(proxySrv.Close) + + s.cfg.Gateway = config.GatewayConfig{ResponseHeaderTimeout: 1} + up := NewHTTPUpstream(s.cfg) + + // 发送请求到外部地址,应通过代理 + req, err := http.NewRequest(http.MethodGet, "http://example.com/test", nil) + require.NoError(s.T(), err, "NewRequest") + resp, err := up.Do(req, proxySrv.URL, 1, 1) + require.NoError(s.T(), err, "Do") + defer func() { _ = resp.Body.Close() }() + b, _ := io.ReadAll(resp.Body) + require.Equal(s.T(), "proxied", string(b), "unexpected body") + + // 验证代理收到的是绝对 URI 格式(HTTP 代理规范要求) + select { + case uri := <-seen: + require.Equal(s.T(), "http://example.com/test", uri, "expected absolute-form request URI") + default: + require.Fail(s.T(), "expected proxy to receive request") + } +} + +// TestDo_EmptyProxy_UsesDirect 测试空代理字符串 +// 验证空字符串代理等同于直连 +func (s *HTTPUpstreamSuite) TestDo_EmptyProxy_UsesDirect() { + upstream := newLocalTestServer(s.T(), http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, _ = io.WriteString(w, "direct-empty") + })) + s.T().Cleanup(upstream.Close) + + up := NewHTTPUpstream(s.cfg) + req, err := http.NewRequest(http.MethodGet, upstream.URL+"/y", nil) + require.NoError(s.T(), err, "NewRequest") + resp, err := up.Do(req, "", 1, 1) + require.NoError(s.T(), err, "Do with empty proxy") + defer func() { _ = resp.Body.Close() }() + b, _ := io.ReadAll(resp.Body) + require.Equal(s.T(), "direct-empty", string(b)) +} + +// TestAccountIsolation_DifferentAccounts 测试账户隔离模式 +// 验证不同账户使用独立的连接池 +func (s *HTTPUpstreamSuite) TestAccountIsolation_DifferentAccounts() { + s.cfg.Gateway = config.GatewayConfig{ConnectionPoolIsolation: config.ConnectionPoolIsolationAccount} + svc := s.newService() + // 同一代理,不同账户 + entry1 := svc.getOrCreateClient("http://proxy.local:8080", 1, 3) + entry2 := svc.getOrCreateClient("http://proxy.local:8080", 2, 3) + require.NotSame(s.T(), entry1, entry2, "不同账号不应共享连接池") + require.Equal(s.T(), 2, len(svc.clients), "账号隔离应缓存两个客户端") +} + +// TestAccountProxyIsolation_DifferentProxy 测试账户+代理组合隔离模式 +// 验证同一账户使用不同代理时创建独立连接池 +func (s *HTTPUpstreamSuite) TestAccountProxyIsolation_DifferentProxy() { + s.cfg.Gateway = config.GatewayConfig{ConnectionPoolIsolation: config.ConnectionPoolIsolationAccountProxy} + svc := s.newService() + // 同一账户,不同代理 + entry1 := svc.getOrCreateClient("http://proxy-a:8080", 1, 3) + entry2 := svc.getOrCreateClient("http://proxy-b:8080", 1, 3) + require.NotSame(s.T(), entry1, entry2, "账号+代理隔离应区分不同代理") + require.Equal(s.T(), 2, len(svc.clients), "账号+代理隔离应缓存两个客户端") +} + +// TestAccountModeProxyChangeClearsPool 测试账户模式下代理变更 +// 验证账户切换代理时清理旧连接池,避免复用错误代理 +func (s *HTTPUpstreamSuite) TestAccountModeProxyChangeClearsPool() { + s.cfg.Gateway = config.GatewayConfig{ConnectionPoolIsolation: config.ConnectionPoolIsolationAccount} + svc := s.newService() + // 同一账户,先后使用不同代理 + entry1 := svc.getOrCreateClient("http://proxy-a:8080", 1, 3) + entry2 := svc.getOrCreateClient("http://proxy-b:8080", 1, 3) + require.NotSame(s.T(), entry1, entry2, "账号切换代理应创建新连接池") + require.Equal(s.T(), 1, len(svc.clients), "账号模式下应仅保留一个连接池") + require.False(s.T(), hasEntry(svc, entry1), "旧连接池应被清理") +} + +// TestAccountConcurrencyOverridesPoolSettings 测试账户并发数覆盖连接池配置 +// 验证账户隔离模式下,连接池大小与账户并发数对应 +func (s *HTTPUpstreamSuite) TestAccountConcurrencyOverridesPoolSettings() { + s.cfg.Gateway = config.GatewayConfig{ConnectionPoolIsolation: config.ConnectionPoolIsolationAccount} + svc := s.newService() + // 账户并发数为 12 + entry := svc.getOrCreateClient("", 1, 12) + transport, ok := entry.client.Transport.(*http.Transport) + require.True(s.T(), ok, "expected *http.Transport") + // 连接池参数应与并发数一致 + require.Equal(s.T(), 12, transport.MaxConnsPerHost, "MaxConnsPerHost mismatch") + require.Equal(s.T(), 12, transport.MaxIdleConns, "MaxIdleConns mismatch") + require.Equal(s.T(), 12, transport.MaxIdleConnsPerHost, "MaxIdleConnsPerHost mismatch") +} + +// TestAccountConcurrencyFallbackToDefault 测试账户并发数为 0 时回退到默认配置 +// 验证未指定并发数时使用全局配置值 +func (s *HTTPUpstreamSuite) TestAccountConcurrencyFallbackToDefault() { + s.cfg.Gateway = config.GatewayConfig{ + ConnectionPoolIsolation: config.ConnectionPoolIsolationAccount, + MaxIdleConns: 77, + MaxIdleConnsPerHost: 55, + MaxConnsPerHost: 66, + } + svc := s.newService() + // 账户并发数为 0,应使用全局配置 + entry := svc.getOrCreateClient("", 1, 0) + transport, ok := entry.client.Transport.(*http.Transport) + require.True(s.T(), ok, "expected *http.Transport") + require.Equal(s.T(), 66, transport.MaxConnsPerHost, "MaxConnsPerHost fallback mismatch") + require.Equal(s.T(), 77, transport.MaxIdleConns, "MaxIdleConns fallback mismatch") + require.Equal(s.T(), 55, transport.MaxIdleConnsPerHost, "MaxIdleConnsPerHost fallback mismatch") +} + +// TestEvictOverLimitRemovesOldestIdle 测试超出数量限制时的 LRU 淘汰 +// 验证优先淘汰最久未使用的空闲客户端 +func (s *HTTPUpstreamSuite) TestEvictOverLimitRemovesOldestIdle() { + s.cfg.Gateway = config.GatewayConfig{ + ConnectionPoolIsolation: config.ConnectionPoolIsolationAccountProxy, + MaxUpstreamClients: 2, // 最多缓存 2 个客户端 + } + svc := s.newService() + // 创建两个客户端,设置不同的最后使用时间 + entry1 := svc.getOrCreateClient("http://proxy-a:8080", 1, 1) + entry2 := svc.getOrCreateClient("http://proxy-b:8080", 2, 1) + atomic.StoreInt64(&entry1.lastUsed, time.Now().Add(-2*time.Hour).UnixNano()) // 最久 + atomic.StoreInt64(&entry2.lastUsed, time.Now().Add(-time.Hour).UnixNano()) + // 创建第三个客户端,触发淘汰 + _ = svc.getOrCreateClient("http://proxy-c:8080", 3, 1) + + require.LessOrEqual(s.T(), len(svc.clients), 2, "应保持在缓存上限内") + require.False(s.T(), hasEntry(svc, entry1), "最久未使用的连接池应被清理") +} + +// TestIdleTTLDoesNotEvictActive 测试活跃请求保护 +// 验证有进行中请求的客户端不会被空闲超时淘汰 +func (s *HTTPUpstreamSuite) TestIdleTTLDoesNotEvictActive() { + s.cfg.Gateway = config.GatewayConfig{ + ConnectionPoolIsolation: config.ConnectionPoolIsolationAccount, + ClientIdleTTLSeconds: 1, // 1 秒空闲超时 + } + svc := s.newService() + entry1 := svc.getOrCreateClient("", 1, 1) + // 设置为很久之前使用,但有活跃请求 + atomic.StoreInt64(&entry1.lastUsed, time.Now().Add(-2*time.Minute).UnixNano()) + atomic.StoreInt64(&entry1.inFlight, 1) // 模拟有活跃请求 + // 创建新客户端,触发淘汰检查 + _ = svc.getOrCreateClient("", 2, 1) + + require.True(s.T(), hasEntry(svc, entry1), "有活跃请求时不应回收") +} + +// TestHTTPUpstreamSuite 运行测试套件 +func TestHTTPUpstreamSuite(t *testing.T) { + suite.Run(t, new(HTTPUpstreamSuite)) +} + +// hasEntry 检查客户端是否存在于缓存中 +// 辅助函数,用于验证淘汰逻辑 +func hasEntry(svc *httpUpstreamService, target *upstreamClientEntry) bool { + for _, entry := range svc.clients { + if entry == target { + return true + } + } + return false +} diff --git a/backend/internal/repository/identity_cache.go b/backend/internal/repository/identity_cache.go new file mode 100644 index 00000000..d28477b7 --- /dev/null +++ b/backend/internal/repository/identity_cache.go @@ -0,0 +1,51 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + fingerprintKeyPrefix = "fingerprint:" + fingerprintTTL = 24 * time.Hour +) + +// fingerprintKey generates the Redis key for account fingerprint cache. +func fingerprintKey(accountID int64) string { + return fmt.Sprintf("%s%d", fingerprintKeyPrefix, accountID) +} + +type identityCache struct { + rdb *redis.Client +} + +func NewIdentityCache(rdb *redis.Client) service.IdentityCache { + return &identityCache{rdb: rdb} +} + +func (c *identityCache) GetFingerprint(ctx context.Context, accountID int64) (*service.Fingerprint, error) { + key := fingerprintKey(accountID) + val, err := c.rdb.Get(ctx, key).Result() + if err != nil { + return nil, err + } + var fp service.Fingerprint + if err := json.Unmarshal([]byte(val), &fp); err != nil { + return nil, err + } + return &fp, nil +} + +func (c *identityCache) SetFingerprint(ctx context.Context, accountID int64, fp *service.Fingerprint) error { + key := fingerprintKey(accountID) + val, err := json.Marshal(fp) + if err != nil { + return err + } + return c.rdb.Set(ctx, key, val, fingerprintTTL).Err() +} diff --git a/backend/internal/repository/identity_cache_integration_test.go b/backend/internal/repository/identity_cache_integration_test.go new file mode 100644 index 00000000..48f59c13 --- /dev/null +++ b/backend/internal/repository/identity_cache_integration_test.go @@ -0,0 +1,67 @@ +//go:build integration + +package repository + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type IdentityCacheSuite struct { + IntegrationRedisSuite + cache *identityCache +} + +func (s *IdentityCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewIdentityCache(s.rdb).(*identityCache) +} + +func (s *IdentityCacheSuite) TestGetFingerprint_Missing() { + _, err := s.cache.GetFingerprint(s.ctx, 1) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil for missing fingerprint") +} + +func (s *IdentityCacheSuite) TestSetAndGetFingerprint() { + fp := &service.Fingerprint{ClientID: "c1", UserAgent: "ua"} + require.NoError(s.T(), s.cache.SetFingerprint(s.ctx, 1, fp), "SetFingerprint") + gotFP, err := s.cache.GetFingerprint(s.ctx, 1) + require.NoError(s.T(), err, "GetFingerprint") + require.Equal(s.T(), "c1", gotFP.ClientID) + require.Equal(s.T(), "ua", gotFP.UserAgent) +} + +func (s *IdentityCacheSuite) TestFingerprint_TTL() { + fp := &service.Fingerprint{ClientID: "c1", UserAgent: "ua"} + require.NoError(s.T(), s.cache.SetFingerprint(s.ctx, 2, fp)) + + fpKey := fmt.Sprintf("%s%d", fingerprintKeyPrefix, 2) + ttl, err := s.rdb.TTL(s.ctx, fpKey).Result() + require.NoError(s.T(), err, "TTL fpKey") + s.AssertTTLWithin(ttl, 1*time.Second, fingerprintTTL) +} + +func (s *IdentityCacheSuite) TestGetFingerprint_JSONCorruption() { + fpKey := fmt.Sprintf("%s%d", fingerprintKeyPrefix, 999) + require.NoError(s.T(), s.rdb.Set(s.ctx, fpKey, "invalid-json-data", 1*time.Minute).Err(), "Set invalid JSON") + + _, err := s.cache.GetFingerprint(s.ctx, 999) + require.Error(s.T(), err, "expected error for corrupted JSON") + require.False(s.T(), errors.Is(err, redis.Nil), "expected decoding error, not redis.Nil") +} + +func (s *IdentityCacheSuite) TestSetFingerprint_Nil() { + err := s.cache.SetFingerprint(s.ctx, 100, nil) + require.NoError(s.T(), err, "SetFingerprint(nil) should succeed") +} + +func TestIdentityCacheSuite(t *testing.T) { + suite.Run(t, new(IdentityCacheSuite)) +} diff --git a/backend/internal/repository/identity_cache_test.go b/backend/internal/repository/identity_cache_test.go new file mode 100644 index 00000000..05921b12 --- /dev/null +++ b/backend/internal/repository/identity_cache_test.go @@ -0,0 +1,46 @@ +//go:build unit + +package repository + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFingerprintKey(t *testing.T) { + tests := []struct { + name string + accountID int64 + expected string + }{ + { + name: "normal_account_id", + accountID: 123, + expected: "fingerprint:123", + }, + { + name: "zero_account_id", + accountID: 0, + expected: "fingerprint:0", + }, + { + name: "negative_account_id", + accountID: -1, + expected: "fingerprint:-1", + }, + { + name: "max_int64", + accountID: math.MaxInt64, + expected: "fingerprint:9223372036854775807", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := fingerprintKey(tc.accountID) + require.Equal(t, tc.expected, got) + }) + } +} diff --git a/backend/internal/repository/inprocess_transport_test.go b/backend/internal/repository/inprocess_transport_test.go new file mode 100644 index 00000000..fbdf2c81 --- /dev/null +++ b/backend/internal/repository/inprocess_transport_test.go @@ -0,0 +1,63 @@ +package repository + +import ( + "bytes" + "io" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" +) + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (f roundTripFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) } + +// newInProcessTransport adapts an http.HandlerFunc into an http.RoundTripper without opening sockets. +// It captures the request body (if any) and then rewinds it before invoking the handler. +func newInProcessTransport(handler http.HandlerFunc, capture func(r *http.Request, body []byte)) http.RoundTripper { + return roundTripFunc(func(r *http.Request) (*http.Response, error) { + var body []byte + if r.Body != nil { + body, _ = io.ReadAll(r.Body) + _ = r.Body.Close() + r.Body = io.NopCloser(bytes.NewReader(body)) + } + if capture != nil { + capture(r, body) + } + + rec := httptest.NewRecorder() + handler(rec, r) + return rec.Result(), nil + }) +} + +var ( + canListenOnce sync.Once + canListen bool + canListenErr error +) + +func localListenerAvailable() bool { + canListenOnce.Do(func() { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + canListenErr = err + canListen = false + return + } + _ = ln.Close() + canListen = true + }) + return canListen +} + +func newLocalTestServer(tb testing.TB, handler http.Handler) *httptest.Server { + tb.Helper() + if !localListenerAvailable() { + tb.Skipf("local listeners are not permitted in this environment: %v", canListenErr) + } + return httptest.NewServer(handler) +} diff --git a/backend/internal/repository/integration_harness_test.go b/backend/internal/repository/integration_harness_test.go new file mode 100644 index 00000000..fb9c26c4 --- /dev/null +++ b/backend/internal/repository/integration_harness_test.go @@ -0,0 +1,408 @@ +//go:build integration + +package repository + +import ( + "context" + "database/sql" + "fmt" + "log" + "os" + "os/exec" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + _ "github.com/Wei-Shaw/sub2api/ent/runtime" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "github.com/lib/pq" + redisclient "github.com/redis/go-redis/v9" + tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" + tcredis "github.com/testcontainers/testcontainers-go/modules/redis" +) + +const ( + redisImageTag = "redis:8.4-alpine" + postgresImageTag = "postgres:18.1-alpine3.23" +) + +var ( + integrationDB *sql.DB + integrationEntClient *dbent.Client + integrationRedis *redisclient.Client + + redisNamespaceSeq uint64 +) + +func TestMain(m *testing.M) { + ctx := context.Background() + + if err := timezone.Init("UTC"); err != nil { + log.Printf("failed to init timezone: %v", err) + os.Exit(1) + } + + if !dockerIsAvailable(ctx) { + // In CI we expect Docker to be available so integration tests should fail loudly. + if os.Getenv("CI") != "" { + log.Printf("docker is not available (CI=true); failing integration tests") + os.Exit(1) + } + log.Printf("docker is not available; skipping integration tests (start Docker to enable)") + os.Exit(0) + } + + postgresImage := selectDockerImage(ctx, postgresImageTag) + pgContainer, err := tcpostgres.Run( + ctx, + postgresImage, + tcpostgres.WithDatabase("sub2api_test"), + tcpostgres.WithUsername("postgres"), + tcpostgres.WithPassword("postgres"), + tcpostgres.BasicWaitStrategies(), + ) + if err != nil { + log.Printf("failed to start postgres container: %v", err) + os.Exit(1) + } + defer func() { _ = pgContainer.Terminate(ctx) }() + + redisContainer, err := tcredis.Run( + ctx, + redisImageTag, + ) + if err != nil { + log.Printf("failed to start redis container: %v", err) + os.Exit(1) + } + defer func() { _ = redisContainer.Terminate(ctx) }() + + dsn, err := pgContainer.ConnectionString(ctx, "sslmode=disable", "TimeZone=UTC") + if err != nil { + log.Printf("failed to get postgres dsn: %v", err) + os.Exit(1) + } + + integrationDB, err = openSQLWithRetry(ctx, dsn, 30*time.Second) + if err != nil { + log.Printf("failed to open sql db: %v", err) + os.Exit(1) + } + if err := ApplyMigrations(ctx, integrationDB); err != nil { + log.Printf("failed to apply db migrations: %v", err) + os.Exit(1) + } + + // 创建 ent client 用于集成测试 + drv := entsql.OpenDB(dialect.Postgres, integrationDB) + integrationEntClient = dbent.NewClient(dbent.Driver(drv)) + + redisHost, err := redisContainer.Host(ctx) + if err != nil { + log.Printf("failed to get redis host: %v", err) + os.Exit(1) + } + redisPort, err := redisContainer.MappedPort(ctx, "6379/tcp") + if err != nil { + log.Printf("failed to get redis port: %v", err) + os.Exit(1) + } + + integrationRedis = redisclient.NewClient(&redisclient.Options{ + Addr: fmt.Sprintf("%s:%d", redisHost, redisPort.Int()), + DB: 0, + }) + if err := integrationRedis.Ping(ctx).Err(); err != nil { + log.Printf("failed to ping redis: %v", err) + os.Exit(1) + } + + code := m.Run() + + _ = integrationEntClient.Close() + _ = integrationRedis.Close() + _ = integrationDB.Close() + + os.Exit(code) +} + +func dockerIsAvailable(ctx context.Context) bool { + cmd := exec.CommandContext(ctx, "docker", "info") + cmd.Env = os.Environ() + return cmd.Run() == nil +} + +func selectDockerImage(ctx context.Context, preferred string) string { + if dockerImageExists(ctx, preferred) { + return preferred + } + + return preferred +} + +func dockerImageExists(ctx context.Context, image string) bool { + cmd := exec.CommandContext(ctx, "docker", "image", "inspect", image) + cmd.Env = os.Environ() + cmd.Stdout = nil + cmd.Stderr = nil + return cmd.Run() == nil +} + +func openSQLWithRetry(ctx context.Context, dsn string, timeout time.Duration) (*sql.DB, error) { + deadline := time.Now().Add(timeout) + var lastErr error + + for time.Now().Before(deadline) { + db, err := sql.Open("postgres", dsn) + if err != nil { + lastErr = err + time.Sleep(250 * time.Millisecond) + continue + } + + if err := pingWithTimeout(ctx, db, 2*time.Second); err != nil { + lastErr = err + _ = db.Close() + time.Sleep(250 * time.Millisecond) + continue + } + + return db, nil + } + + return nil, fmt.Errorf("db not ready after %s: %w", timeout, lastErr) +} + +func pingWithTimeout(ctx context.Context, db *sql.DB, timeout time.Duration) error { + pingCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return db.PingContext(pingCtx) +} + +func testTx(t *testing.T) *sql.Tx { + t.Helper() + + tx, err := integrationDB.BeginTx(context.Background(), nil) + require.NoError(t, err, "begin tx") + t.Cleanup(func() { + _ = tx.Rollback() + }) + return tx +} + +// testEntClient 返回全局的 ent client,用于测试需要内部管理事务的代码(如 Create/Update 方法)。 +// 注意:此 client 的操作会真正写入数据库,测试结束后不会自动回滚。 +func testEntClient(t *testing.T) *dbent.Client { + t.Helper() + return integrationEntClient +} + +// testEntTx 返回一个 ent 事务,用于需要事务隔离的测试。 +// 测试结束后会自动回滚,不会影响数据库状态。 +func testEntTx(t *testing.T) *dbent.Tx { + t.Helper() + + tx, err := integrationEntClient.Tx(context.Background()) + require.NoError(t, err, "begin ent tx") + t.Cleanup(func() { + _ = tx.Rollback() + }) + return tx +} + +// testEntSQLTx 已弃用:不要在新测试中使用此函数。 +// 基于 *sql.Tx 创建的 ent client 在调用 client.Tx() 时会 panic。 +// 对于需要测试内部使用事务的代码,请使用 testEntClient。 +// 对于需要事务隔离的测试,请使用 testEntTx。 +// +// Deprecated: Use testEntClient or testEntTx instead. +func testEntSQLTx(t *testing.T) (*dbent.Client, *sql.Tx) { + t.Helper() + + // 直接失败,避免旧测试误用导致的事务嵌套 panic。 + t.Fatalf("testEntSQLTx 已弃用:请使用 testEntClient 或 testEntTx") + return nil, nil +} + +func testRedis(t *testing.T) *redisclient.Client { + t.Helper() + + prefix := fmt.Sprintf( + "it:%s:%d:%d:", + sanitizeRedisNamespace(t.Name()), + time.Now().UnixNano(), + atomic.AddUint64(&redisNamespaceSeq, 1), + ) + + opts := *integrationRedis.Options() + rdb := redisclient.NewClient(&opts) + rdb.AddHook(prefixHook{prefix: prefix}) + + t.Cleanup(func() { + ctx := context.Background() + + var cursor uint64 + for { + keys, nextCursor, err := integrationRedis.Scan(ctx, cursor, prefix+"*", 500).Result() + require.NoError(t, err, "scan redis keys for cleanup") + if len(keys) > 0 { + require.NoError(t, integrationRedis.Unlink(ctx, keys...).Err(), "unlink redis keys for cleanup") + } + + cursor = nextCursor + if cursor == 0 { + break + } + } + + _ = rdb.Close() + }) + + return rdb +} + +func assertTTLWithin(t *testing.T, ttl time.Duration, min, max time.Duration) { + t.Helper() + require.GreaterOrEqual(t, ttl, min, "ttl should be >= min") + require.LessOrEqual(t, ttl, max, "ttl should be <= max") +} + +func sanitizeRedisNamespace(name string) string { + name = strings.ReplaceAll(name, "/", "_") + name = strings.ReplaceAll(name, " ", "_") + return name +} + +type prefixHook struct { + prefix string +} + +func (h prefixHook) DialHook(next redisclient.DialHook) redisclient.DialHook { return next } + +func (h prefixHook) ProcessHook(next redisclient.ProcessHook) redisclient.ProcessHook { + return func(ctx context.Context, cmd redisclient.Cmder) error { + h.prefixCmd(cmd) + return next(ctx, cmd) + } +} + +func (h prefixHook) ProcessPipelineHook(next redisclient.ProcessPipelineHook) redisclient.ProcessPipelineHook { + return func(ctx context.Context, cmds []redisclient.Cmder) error { + for _, cmd := range cmds { + h.prefixCmd(cmd) + } + return next(ctx, cmds) + } +} + +func (h prefixHook) prefixCmd(cmd redisclient.Cmder) { + args := cmd.Args() + if len(args) < 2 { + return + } + + prefixOne := func(i int) { + if i < 0 || i >= len(args) { + return + } + + switch v := args[i].(type) { + case string: + if v != "" && !strings.HasPrefix(v, h.prefix) { + args[i] = h.prefix + v + } + case []byte: + s := string(v) + if s != "" && !strings.HasPrefix(s, h.prefix) { + args[i] = []byte(h.prefix + s) + } + } + } + + switch strings.ToLower(cmd.Name()) { + case "get", "set", "setnx", "setex", "psetex", "incr", "decr", "incrby", "expire", "pexpire", "ttl", "pttl", + "hgetall", "hget", "hset", "hdel", "hincrbyfloat", "exists", + "zadd", "zcard", "zrange", "zrangebyscore", "zrem", "zremrangebyscore", "zrevrange", "zrevrangebyscore", "zscore": + prefixOne(1) + case "del", "unlink": + for i := 1; i < len(args); i++ { + prefixOne(i) + } + case "eval", "evalsha", "eval_ro", "evalsha_ro": + if len(args) < 3 { + return + } + numKeys, err := strconv.Atoi(fmt.Sprint(args[2])) + if err != nil || numKeys <= 0 { + return + } + for i := 0; i < numKeys && 3+i < len(args); i++ { + prefixOne(3 + i) + } + case "scan": + for i := 2; i+1 < len(args); i++ { + if strings.EqualFold(fmt.Sprint(args[i]), "match") { + prefixOne(i + 1) + break + } + } + } +} + +// IntegrationRedisSuite provides a base suite for Redis integration tests. +// Embedding suites should call SetupTest to initialize ctx and rdb. +type IntegrationRedisSuite struct { + suite.Suite + ctx context.Context + rdb *redisclient.Client +} + +// SetupTest initializes ctx and rdb for each test method. +func (s *IntegrationRedisSuite) SetupTest() { + s.ctx = context.Background() + s.rdb = testRedis(s.T()) +} + +// RequireNoError is a convenience method wrapping require.NoError with s.T(). +func (s *IntegrationRedisSuite) RequireNoError(err error, msgAndArgs ...any) { + s.T().Helper() + require.NoError(s.T(), err, msgAndArgs...) +} + +// AssertTTLWithin asserts that ttl is within [min, max]. +func (s *IntegrationRedisSuite) AssertTTLWithin(ttl, min, max time.Duration) { + s.T().Helper() + assertTTLWithin(s.T(), ttl, min, max) +} + +// IntegrationDBSuite provides a base suite for DB integration tests. +// Embedding suites should call SetupTest to initialize ctx and client. +type IntegrationDBSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + tx *dbent.Tx +} + +// SetupTest initializes ctx and client for each test method. +func (s *IntegrationDBSuite) SetupTest() { + s.ctx = context.Background() + // 统一使用 ent.Tx,确保每个测试都有独立事务并自动回滚。 + tx := testEntTx(s.T()) + s.tx = tx + s.client = tx.Client() +} + +// RequireNoError is a convenience method wrapping require.NoError with s.T(). +func (s *IntegrationDBSuite) RequireNoError(err error, msgAndArgs ...any) { + s.T().Helper() + require.NoError(s.T(), err, msgAndArgs...) +} diff --git a/backend/internal/repository/migrations_runner.go b/backend/internal/repository/migrations_runner.go new file mode 100644 index 00000000..5912e50f --- /dev/null +++ b/backend/internal/repository/migrations_runner.go @@ -0,0 +1,302 @@ +package repository + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "io/fs" + "sort" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/migrations" +) + +// schemaMigrationsTableDDL 定义迁移记录表的 DDL。 +// 该表用于跟踪已应用的迁移文件及其校验和。 +// - filename: 迁移文件名,作为主键唯一标识每个迁移 +// - checksum: 文件内容的 SHA256 哈希值,用于检测迁移文件是否被篡改 +// - applied_at: 迁移应用时间戳 +const schemaMigrationsTableDDL = ` +CREATE TABLE IF NOT EXISTS schema_migrations ( + filename TEXT PRIMARY KEY, + checksum TEXT NOT NULL, + applied_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); +` + +const atlasSchemaRevisionsTableDDL = ` +CREATE TABLE IF NOT EXISTS atlas_schema_revisions ( + version TEXT PRIMARY KEY, + description TEXT NOT NULL, + type INTEGER NOT NULL, + applied INTEGER NOT NULL DEFAULT 0, + total INTEGER NOT NULL DEFAULT 0, + executed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + execution_time BIGINT NOT NULL DEFAULT 0, + error TEXT NULL, + error_stmt TEXT NULL, + hash TEXT NOT NULL DEFAULT '', + partial_hashes TEXT[] NULL, + operator_version TEXT NULL +); +` + +// migrationsAdvisoryLockID 是用于序列化迁移操作的 PostgreSQL Advisory Lock ID。 +// 在多实例部署场景下,该锁确保同一时间只有一个实例执行迁移。 +// 任何稳定的 int64 值都可以,只要不与同一数据库中的其他锁冲突即可。 +const migrationsAdvisoryLockID int64 = 694208311321144027 +const migrationsLockRetryInterval = 500 * time.Millisecond + +// ApplyMigrations 将嵌入的 SQL 迁移文件应用到指定的数据库。 +// +// 该函数可以在每次应用启动时安全调用: +// - 已应用的迁移会被自动跳过(通过校验 filename 判断) +// - 如果迁移文件内容被修改(checksum 不匹配),会返回错误 +// - 使用 PostgreSQL Advisory Lock 确保多实例并发安全 +// +// 参数: +// - ctx: 上下文,用于超时控制和取消 +// - db: 数据库连接 +// +// 返回: +// - error: 迁移过程中的任何错误 +func ApplyMigrations(ctx context.Context, db *sql.DB) error { + if db == nil { + return errors.New("nil sql db") + } + return applyMigrationsFS(ctx, db, migrations.FS) +} + +// applyMigrationsFS 是迁移执行的核心实现。 +// 它从指定的文件系统读取 SQL 迁移文件并按顺序应用。 +// +// 迁移执行流程: +// 1. 获取 PostgreSQL Advisory Lock,防止多实例并发迁移 +// 2. 确保 schema_migrations 表存在 +// 3. 按文件名排序读取所有 .sql 文件 +// 4. 对于每个迁移文件: +// - 计算文件内容的 SHA256 校验和 +// - 检查该迁移是否已应用(通过 filename 查询) +// - 如果已应用,验证校验和是否匹配 +// - 如果未应用,在事务中执行迁移并记录 +// 5. 释放 Advisory Lock +// +// 参数: +// - ctx: 上下文 +// - db: 数据库连接 +// - fsys: 包含迁移文件的文件系统(通常是 embed.FS) +func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { + if db == nil { + return errors.New("nil sql db") + } + + // 获取分布式锁,确保多实例部署时只有一个实例执行迁移。 + // 这是 PostgreSQL 特有的 Advisory Lock 机制。 + if err := pgAdvisoryLock(ctx, db); err != nil { + return err + } + defer func() { + // 无论迁移是否成功,都要释放锁。 + // 使用 context.Background() 确保即使原 ctx 已取消也能释放锁。 + _ = pgAdvisoryUnlock(context.Background(), db) + }() + + // 创建迁移记录表(如果不存在)。 + // 该表记录所有已应用的迁移及其校验和。 + if _, err := db.ExecContext(ctx, schemaMigrationsTableDDL); err != nil { + return fmt.Errorf("create schema_migrations: %w", err) + } + + // 自动对齐 Atlas 基线(如果检测到 legacy schema_migrations 且缺失 atlas_schema_revisions)。 + if err := ensureAtlasBaselineAligned(ctx, db, fsys); err != nil { + return err + } + + // 获取所有 .sql 迁移文件并按文件名排序。 + // 命名规范:使用零填充数字前缀(如 001_init.sql, 002_add_users.sql)。 + files, err := fs.Glob(fsys, "*.sql") + if err != nil { + return fmt.Errorf("list migrations: %w", err) + } + sort.Strings(files) // 确保按文件名顺序执行迁移 + + for _, name := range files { + // 读取迁移文件内容 + contentBytes, err := fs.ReadFile(fsys, name) + if err != nil { + return fmt.Errorf("read migration %s: %w", name, err) + } + + content := strings.TrimSpace(string(contentBytes)) + if content == "" { + continue // 跳过空文件 + } + + // 计算文件内容的 SHA256 校验和,用于检测文件是否被修改。 + // 这是一种防篡改机制:如果有人修改了已应用的迁移文件,系统会拒绝启动。 + sum := sha256.Sum256([]byte(content)) + checksum := hex.EncodeToString(sum[:]) + + // 检查该迁移是否已经应用 + var existing string + rowErr := db.QueryRowContext(ctx, "SELECT checksum FROM schema_migrations WHERE filename = $1", name).Scan(&existing) + if rowErr == nil { + // 迁移已应用,验证校验和是否匹配 + if existing != checksum { + // 校验和不匹配意味着迁移文件在应用后被修改,这是危险的。 + // 正确的做法是创建新的迁移文件来进行变更。 + return fmt.Errorf( + "migration %s checksum mismatch (db=%s file=%s)\n"+ + "This means the migration file was modified after being applied to the database.\n"+ + "Solutions:\n"+ + " 1. Revert to original: git log --oneline -- migrations/%s && git checkout -- migrations/%s\n"+ + " 2. For new changes, create a new migration file instead of modifying existing ones\n"+ + "Note: Modifying applied migrations breaks the immutability principle and can cause inconsistencies across environments", + name, existing, checksum, name, name, + ) + } + continue // 迁移已应用且校验和匹配,跳过 + } + if !errors.Is(rowErr, sql.ErrNoRows) { + return fmt.Errorf("check migration %s: %w", name, rowErr) + } + + // 迁移未应用,在事务中执行。 + // 使用事务确保迁移的原子性:要么完全成功,要么完全回滚。 + tx, err := db.BeginTx(ctx, nil) + if err != nil { + return fmt.Errorf("begin migration %s: %w", name, err) + } + + // 执行迁移 SQL + if _, err := tx.ExecContext(ctx, content); err != nil { + _ = tx.Rollback() + return fmt.Errorf("apply migration %s: %w", name, err) + } + + // 记录迁移已完成,保存文件名和校验和 + if _, err := tx.ExecContext(ctx, "INSERT INTO schema_migrations (filename, checksum) VALUES ($1, $2)", name, checksum); err != nil { + _ = tx.Rollback() + return fmt.Errorf("record migration %s: %w", name, err) + } + + // 提交事务 + if err := tx.Commit(); err != nil { + _ = tx.Rollback() + return fmt.Errorf("commit migration %s: %w", name, err) + } + } + + return nil +} + +func ensureAtlasBaselineAligned(ctx context.Context, db *sql.DB, fsys fs.FS) error { + hasLegacy, err := tableExists(ctx, db, "schema_migrations") + if err != nil { + return fmt.Errorf("check schema_migrations: %w", err) + } + if !hasLegacy { + return nil + } + + hasAtlas, err := tableExists(ctx, db, "atlas_schema_revisions") + if err != nil { + return fmt.Errorf("check atlas_schema_revisions: %w", err) + } + if !hasAtlas { + if _, err := db.ExecContext(ctx, atlasSchemaRevisionsTableDDL); err != nil { + return fmt.Errorf("create atlas_schema_revisions: %w", err) + } + } + + var count int + if err := db.QueryRowContext(ctx, "SELECT COUNT(*) FROM atlas_schema_revisions").Scan(&count); err != nil { + return fmt.Errorf("count atlas_schema_revisions: %w", err) + } + if count > 0 { + return nil + } + + version, description, hash, err := latestMigrationBaseline(fsys) + if err != nil { + return fmt.Errorf("atlas baseline version: %w", err) + } + + if _, err := db.ExecContext(ctx, ` + INSERT INTO atlas_schema_revisions (version, description, type, applied, total, executed_at, execution_time, hash) + VALUES ($1, $2, $3, 0, 0, NOW(), 0, $4) + `, version, description, 1, hash); err != nil { + return fmt.Errorf("insert atlas baseline: %w", err) + } + return nil +} + +func tableExists(ctx context.Context, db *sql.DB, tableName string) (bool, error) { + var exists bool + err := db.QueryRowContext(ctx, ` + SELECT EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = $1 + ) + `, tableName).Scan(&exists) + return exists, err +} + +func latestMigrationBaseline(fsys fs.FS) (string, string, string, error) { + files, err := fs.Glob(fsys, "*.sql") + if err != nil { + return "", "", "", err + } + if len(files) == 0 { + return "baseline", "baseline", "", nil + } + sort.Strings(files) + name := files[len(files)-1] + contentBytes, err := fs.ReadFile(fsys, name) + if err != nil { + return "", "", "", err + } + content := strings.TrimSpace(string(contentBytes)) + sum := sha256.Sum256([]byte(content)) + hash := hex.EncodeToString(sum[:]) + version := strings.TrimSuffix(name, ".sql") + return version, version, hash, nil +} + +// pgAdvisoryLock 获取 PostgreSQL Advisory Lock。 +// Advisory Lock 是一种轻量级的锁机制,不与任何特定的数据库对象关联。 +// 它非常适合用于应用层面的分布式锁场景,如迁移序列化。 +func pgAdvisoryLock(ctx context.Context, db *sql.DB) error { + ticker := time.NewTicker(migrationsLockRetryInterval) + defer ticker.Stop() + + for { + var locked bool + if err := db.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", migrationsAdvisoryLockID).Scan(&locked); err != nil { + return fmt.Errorf("acquire migrations lock: %w", err) + } + if locked { + return nil + } + select { + case <-ctx.Done(): + return fmt.Errorf("acquire migrations lock: %w", ctx.Err()) + case <-ticker.C: + } + } +} + +// pgAdvisoryUnlock 释放 PostgreSQL Advisory Lock。 +// 必须在获取锁后确保释放,否则会阻塞其他实例的迁移操作。 +func pgAdvisoryUnlock(ctx context.Context, db *sql.DB) error { + _, err := db.ExecContext(ctx, "SELECT pg_advisory_unlock($1)", migrationsAdvisoryLockID) + if err != nil { + return fmt.Errorf("release migrations lock: %w", err) + } + return nil +} diff --git a/backend/internal/repository/migrations_schema_integration_test.go b/backend/internal/repository/migrations_schema_integration_test.go new file mode 100644 index 00000000..bc37ee72 --- /dev/null +++ b/backend/internal/repository/migrations_schema_integration_test.go @@ -0,0 +1,103 @@ +//go:build integration + +package repository + +import ( + "context" + "database/sql" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMigrationsRunner_IsIdempotent_AndSchemaIsUpToDate(t *testing.T) { + tx := testTx(t) + + // Re-apply migrations to verify idempotency (no errors, no duplicate rows). + require.NoError(t, ApplyMigrations(context.Background(), integrationDB)) + + // schema_migrations should have at least the current migration set. + var applied int + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT COUNT(*) FROM schema_migrations").Scan(&applied)) + require.GreaterOrEqual(t, applied, 7, "expected schema_migrations to contain applied migrations") + + // users: columns required by repository queries + requireColumn(t, tx, "users", "username", "character varying", 100, false) + requireColumn(t, tx, "users", "notes", "text", 0, false) + + // accounts: schedulable and rate-limit fields + requireColumn(t, tx, "accounts", "notes", "text", 0, true) + requireColumn(t, tx, "accounts", "schedulable", "boolean", 0, false) + requireColumn(t, tx, "accounts", "rate_limited_at", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "rate_limit_reset_at", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "overload_until", "timestamp with time zone", 0, true) + requireColumn(t, tx, "accounts", "session_window_status", "character varying", 20, true) + + // api_keys: key length should be 128 + requireColumn(t, tx, "api_keys", "key", "character varying", 128, false) + + // redeem_codes: subscription fields + requireColumn(t, tx, "redeem_codes", "group_id", "bigint", 0, true) + requireColumn(t, tx, "redeem_codes", "validity_days", "integer", 0, false) + + // usage_logs: billing_type used by filters/stats + requireColumn(t, tx, "usage_logs", "billing_type", "smallint", 0, false) + + // settings table should exist + var settingsRegclass sql.NullString + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT to_regclass('public.settings')").Scan(&settingsRegclass)) + require.True(t, settingsRegclass.Valid, "expected settings table to exist") + + // user_allowed_groups table should exist + var uagRegclass sql.NullString + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT to_regclass('public.user_allowed_groups')").Scan(&uagRegclass)) + require.True(t, uagRegclass.Valid, "expected user_allowed_groups table to exist") + + // user_subscriptions: deleted_at for soft delete support (migration 012) + requireColumn(t, tx, "user_subscriptions", "deleted_at", "timestamp with time zone", 0, true) + + // orphan_allowed_groups_audit table should exist (migration 013) + var orphanAuditRegclass sql.NullString + require.NoError(t, tx.QueryRowContext(context.Background(), "SELECT to_regclass('public.orphan_allowed_groups_audit')").Scan(&orphanAuditRegclass)) + require.True(t, orphanAuditRegclass.Valid, "expected orphan_allowed_groups_audit table to exist") + + // account_groups: created_at should be timestamptz + requireColumn(t, tx, "account_groups", "created_at", "timestamp with time zone", 0, false) + + // user_allowed_groups: created_at should be timestamptz + requireColumn(t, tx, "user_allowed_groups", "created_at", "timestamp with time zone", 0, false) +} + +func requireColumn(t *testing.T, tx *sql.Tx, table, column, dataType string, maxLen int, nullable bool) { + t.Helper() + + var row struct { + DataType string + MaxLen sql.NullInt64 + Nullable string + } + + err := tx.QueryRowContext(context.Background(), ` +SELECT + data_type, + character_maximum_length, + is_nullable +FROM information_schema.columns +WHERE table_schema = 'public' + AND table_name = $1 + AND column_name = $2 +`, table, column).Scan(&row.DataType, &row.MaxLen, &row.Nullable) + require.NoError(t, err, "query information_schema.columns for %s.%s", table, column) + require.Equal(t, dataType, row.DataType, "data_type mismatch for %s.%s", table, column) + + if maxLen > 0 { + require.True(t, row.MaxLen.Valid, "expected maxLen for %s.%s", table, column) + require.Equal(t, int64(maxLen), row.MaxLen.Int64, "maxLen mismatch for %s.%s", table, column) + } + + if nullable { + require.Equal(t, "YES", row.Nullable, "nullable mismatch for %s.%s", table, column) + } else { + require.Equal(t, "NO", row.Nullable, "nullable mismatch for %s.%s", table, column) + } +} diff --git a/backend/internal/repository/openai_oauth_service.go b/backend/internal/repository/openai_oauth_service.go new file mode 100644 index 00000000..07d57410 --- /dev/null +++ b/backend/internal/repository/openai_oauth_service.go @@ -0,0 +1,89 @@ +package repository + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/imroc/req/v3" +) + +// NewOpenAIOAuthClient creates a new OpenAI OAuth client +func NewOpenAIOAuthClient() service.OpenAIOAuthClient { + return &openaiOAuthService{tokenURL: openai.TokenURL} +} + +type openaiOAuthService struct { + tokenURL string +} + +func (s *openaiOAuthService) ExchangeCode(ctx context.Context, code, codeVerifier, redirectURI, proxyURL string) (*openai.TokenResponse, error) { + client := createOpenAIReqClient(proxyURL) + + if redirectURI == "" { + redirectURI = openai.DefaultRedirectURI + } + + formData := url.Values{} + formData.Set("grant_type", "authorization_code") + formData.Set("client_id", openai.ClientID) + formData.Set("code", code) + formData.Set("redirect_uri", redirectURI) + formData.Set("code_verifier", codeVerifier) + + var tokenResp openai.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(s.tokenURL) + + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token exchange failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + return &tokenResp, nil +} + +func (s *openaiOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*openai.TokenResponse, error) { + client := createOpenAIReqClient(proxyURL) + + formData := url.Values{} + formData.Set("grant_type", "refresh_token") + formData.Set("refresh_token", refreshToken) + formData.Set("client_id", openai.ClientID) + formData.Set("scope", openai.RefreshScopes) + + var tokenResp openai.TokenResponse + + resp, err := client.R(). + SetContext(ctx). + SetFormDataFromValues(formData). + SetSuccessResult(&tokenResp). + Post(s.tokenURL) + + if err != nil { + return nil, fmt.Errorf("request failed: %w", err) + } + + if !resp.IsSuccessState() { + return nil, fmt.Errorf("token refresh failed: status %d, body: %s", resp.StatusCode, resp.String()) + } + + return &tokenResp, nil +} + +func createOpenAIReqClient(proxyURL string) *req.Client { + return getSharedReqClient(reqClientOptions{ + ProxyURL: proxyURL, + Timeout: 60 * time.Second, + }) +} diff --git a/backend/internal/repository/openai_oauth_service_test.go b/backend/internal/repository/openai_oauth_service_test.go new file mode 100644 index 00000000..51142306 --- /dev/null +++ b/backend/internal/repository/openai_oauth_service_test.go @@ -0,0 +1,249 @@ +package repository + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type OpenAIOAuthServiceSuite struct { + suite.Suite + ctx context.Context + srv *httptest.Server + svc *openaiOAuthService + received chan url.Values +} + +func (s *OpenAIOAuthServiceSuite) SetupTest() { + s.ctx = context.Background() + s.received = make(chan url.Values, 1) +} + +func (s *OpenAIOAuthServiceSuite) TearDownTest() { + if s.srv != nil { + s.srv.Close() + s.srv = nil + } +} + +func (s *OpenAIOAuthServiceSuite) setupServer(handler http.HandlerFunc) { + s.srv = newLocalTestServer(s.T(), handler) + s.svc = &openaiOAuthService{tokenURL: s.srv.URL} +} + +func (s *OpenAIOAuthServiceSuite) TestExchangeCode_DefaultRedirectURI() { + errCh := make(chan string, 1) + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + errCh <- "method mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if err := r.ParseForm(); err != nil { + errCh <- "ParseForm failed" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("grant_type"); got != "authorization_code" { + errCh <- "grant_type mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("client_id"); got != openai.ClientID { + errCh <- "client_id mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("code"); got != "code" { + errCh <- "code mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("redirect_uri"); got != openai.DefaultRedirectURI { + errCh <- "redirect_uri mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("code_verifier"); got != "ver" { + errCh <- "code_verifier mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{"access_token":"at","refresh_token":"rt","token_type":"bearer","expires_in":3600}`) + })) + + resp, err := s.svc.ExchangeCode(s.ctx, "code", "ver", "", "") + require.NoError(s.T(), err, "ExchangeCode") + select { + case msg := <-errCh: + require.Fail(s.T(), msg) + default: + } + require.Equal(s.T(), "at", resp.AccessToken) + require.Equal(s.T(), "rt", resp.RefreshToken) +} + +func (s *OpenAIOAuthServiceSuite) TestRefreshToken_FormFields() { + errCh := make(chan string, 1) + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + errCh <- "ParseForm failed" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("grant_type"); got != "refresh_token" { + errCh <- "grant_type mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("refresh_token"); got != "rt" { + errCh <- "refresh_token mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("client_id"); got != openai.ClientID { + errCh <- "client_id mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + if got := r.PostForm.Get("scope"); got != openai.RefreshScopes { + errCh <- "scope mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{"access_token":"at2","refresh_token":"rt2","token_type":"bearer","expires_in":3600}`) + })) + + resp, err := s.svc.RefreshToken(s.ctx, "rt", "") + require.NoError(s.T(), err, "RefreshToken") + select { + case msg := <-errCh: + require.Fail(s.T(), msg) + default: + } + require.Equal(s.T(), "at2", resp.AccessToken) + require.Equal(s.T(), "rt2", resp.RefreshToken) +} + +func (s *OpenAIOAuthServiceSuite) TestNonSuccessStatus_IncludesBody() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + _, _ = io.WriteString(w, "bad") + })) + + _, err := s.svc.ExchangeCode(s.ctx, "code", "ver", openai.DefaultRedirectURI, "") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "status 400") + require.ErrorContains(s.T(), err, "bad") +} + +func (s *OpenAIOAuthServiceSuite) TestRequestError_ClosedServer() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + s.srv.Close() + + _, err := s.svc.ExchangeCode(s.ctx, "code", "ver", openai.DefaultRedirectURI, "") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "request failed") +} + +func (s *OpenAIOAuthServiceSuite) TestContextCancel() { + started := make(chan struct{}) + block := make(chan struct{}) + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + close(started) + <-block + })) + + ctx, cancel := context.WithCancel(s.ctx) + + done := make(chan error, 1) + go func() { + _, err := s.svc.ExchangeCode(ctx, "code", "ver", openai.DefaultRedirectURI, "") + done <- err + }() + + <-started + cancel() + close(block) + + err := <-done + require.Error(s.T(), err) +} + +func (s *OpenAIOAuthServiceSuite) TestExchangeCode_UsesProvidedRedirectURI() { + want := "http://localhost:9999/cb" + errCh := make(chan string, 1) + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _ = r.ParseForm() + if got := r.PostForm.Get("redirect_uri"); got != want { + errCh <- "redirect_uri mismatch" + w.WriteHeader(http.StatusBadRequest) + return + } + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{"access_token":"at","token_type":"bearer","expires_in":1}`) + })) + + _, err := s.svc.ExchangeCode(s.ctx, "code", "ver", want, "") + require.NoError(s.T(), err, "ExchangeCode") + select { + case msg := <-errCh: + require.Fail(s.T(), msg) + default: + } +} + +func (s *OpenAIOAuthServiceSuite) TestTokenURL_CanBeOverriddenWithQuery() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _ = r.ParseForm() + s.received <- r.PostForm + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{"access_token":"at","token_type":"bearer","expires_in":1}`) + })) + s.svc.tokenURL = s.srv.URL + "?x=1" + + _, err := s.svc.ExchangeCode(s.ctx, "code", "ver", openai.DefaultRedirectURI, "") + require.NoError(s.T(), err, "ExchangeCode") + select { + case <-s.received: + default: + require.Fail(s.T(), "expected server to receive request") + } +} + +func (s *OpenAIOAuthServiceSuite) TestExchangeCode_SuccessButInvalidJSON() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = io.WriteString(w, "not-valid-json") + })) + + _, err := s.svc.ExchangeCode(s.ctx, "code", "ver", openai.DefaultRedirectURI, "") + require.Error(s.T(), err, "expected error for invalid JSON response") +} + +func (s *OpenAIOAuthServiceSuite) TestRefreshToken_NonSuccessStatus() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + _, _ = io.WriteString(w, "unauthorized") + })) + + _, err := s.svc.RefreshToken(s.ctx, "rt", "") + require.Error(s.T(), err, "expected error for non-2xx status") + require.ErrorContains(s.T(), err, "status 401") +} + +func TestOpenAIOAuthServiceSuite(t *testing.T) { + suite.Run(t, new(OpenAIOAuthServiceSuite)) +} diff --git a/backend/internal/repository/ops_repo.go b/backend/internal/repository/ops_repo.go new file mode 100644 index 00000000..613c5bd5 --- /dev/null +++ b/backend/internal/repository/ops_repo.go @@ -0,0 +1,1098 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +type opsRepository struct { + db *sql.DB +} + +func NewOpsRepository(db *sql.DB) service.OpsRepository { + return &opsRepository{db: db} +} + +func (r *opsRepository) InsertErrorLog(ctx context.Context, input *service.OpsInsertErrorLogInput) (int64, error) { + if r == nil || r.db == nil { + return 0, fmt.Errorf("nil ops repository") + } + if input == nil { + return 0, fmt.Errorf("nil input") + } + + q := ` +INSERT INTO ops_error_logs ( + request_id, + client_request_id, + user_id, + api_key_id, + account_id, + group_id, + client_ip, + platform, + model, + request_path, + stream, + user_agent, + error_phase, + error_type, + severity, + status_code, + is_business_limited, + is_count_tokens, + error_message, + error_body, + error_source, + error_owner, + upstream_status_code, + upstream_error_message, + upstream_error_detail, + upstream_errors, + time_to_first_token_ms, + request_body, + request_body_truncated, + request_body_bytes, + request_headers, + is_retryable, + retry_count, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30,$31,$32,$33,$34 +) RETURNING id` + + var id int64 + err := r.db.QueryRowContext( + ctx, + q, + opsNullString(input.RequestID), + opsNullString(input.ClientRequestID), + opsNullInt64(input.UserID), + opsNullInt64(input.APIKeyID), + opsNullInt64(input.AccountID), + opsNullInt64(input.GroupID), + opsNullString(input.ClientIP), + opsNullString(input.Platform), + opsNullString(input.Model), + opsNullString(input.RequestPath), + input.Stream, + opsNullString(input.UserAgent), + input.ErrorPhase, + input.ErrorType, + opsNullString(input.Severity), + opsNullInt(input.StatusCode), + input.IsBusinessLimited, + input.IsCountTokens, + opsNullString(input.ErrorMessage), + opsNullString(input.ErrorBody), + opsNullString(input.ErrorSource), + opsNullString(input.ErrorOwner), + opsNullInt(input.UpstreamStatusCode), + opsNullString(input.UpstreamErrorMessage), + opsNullString(input.UpstreamErrorDetail), + opsNullString(input.UpstreamErrorsJSON), + opsNullInt64(input.TimeToFirstTokenMs), + opsNullString(input.RequestBodyJSON), + input.RequestBodyTruncated, + opsNullInt(input.RequestBodyBytes), + opsNullString(input.RequestHeadersJSON), + input.IsRetryable, + input.RetryCount, + input.CreatedAt, + ).Scan(&id) + if err != nil { + return 0, err + } + return id, nil +} + +func (r *opsRepository) ListErrorLogs(ctx context.Context, filter *service.OpsErrorLogFilter) (*service.OpsErrorLogList, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + filter = &service.OpsErrorLogFilter{} + } + + page := filter.Page + if page <= 0 { + page = 1 + } + pageSize := filter.PageSize + if pageSize <= 0 { + pageSize = 20 + } + if pageSize > 500 { + pageSize = 500 + } + + where, args := buildOpsErrorLogsWhere(filter) + countSQL := "SELECT COUNT(*) FROM ops_error_logs e " + where + + var total int + if err := r.db.QueryRowContext(ctx, countSQL, args...).Scan(&total); err != nil { + return nil, err + } + + offset := (page - 1) * pageSize + argsWithLimit := append(args, pageSize, offset) + selectSQL := ` +SELECT + e.id, + e.created_at, + e.error_phase, + e.error_type, + COALESCE(e.error_owner, ''), + COALESCE(e.error_source, ''), + e.severity, + COALESCE(e.upstream_status_code, e.status_code, 0), + COALESCE(e.platform, ''), + COALESCE(e.model, ''), + COALESCE(e.is_retryable, false), + COALESCE(e.retry_count, 0), + COALESCE(e.resolved, false), + e.resolved_at, + e.resolved_by_user_id, + COALESCE(u2.email, ''), + e.resolved_retry_id, + COALESCE(e.client_request_id, ''), + COALESCE(e.request_id, ''), + COALESCE(e.error_message, ''), + e.user_id, + COALESCE(u.email, ''), + e.api_key_id, + e.account_id, + COALESCE(a.name, ''), + e.group_id, + COALESCE(g.name, ''), + CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END, + COALESCE(e.request_path, ''), + e.stream +FROM ops_error_logs e +LEFT JOIN accounts a ON e.account_id = a.id +LEFT JOIN groups g ON e.group_id = g.id +LEFT JOIN users u ON e.user_id = u.id +LEFT JOIN users u2 ON e.resolved_by_user_id = u2.id +` + where + ` +ORDER BY e.created_at DESC +LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2) + + rows, err := r.db.QueryContext(ctx, selectSQL, argsWithLimit...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsErrorLog, 0, pageSize) + for rows.Next() { + var item service.OpsErrorLog + var statusCode sql.NullInt64 + var clientIP sql.NullString + var userID sql.NullInt64 + var apiKeyID sql.NullInt64 + var accountID sql.NullInt64 + var accountName string + var groupID sql.NullInt64 + var groupName string + var userEmail string + var resolvedAt sql.NullTime + var resolvedBy sql.NullInt64 + var resolvedByName string + var resolvedRetryID sql.NullInt64 + if err := rows.Scan( + &item.ID, + &item.CreatedAt, + &item.Phase, + &item.Type, + &item.Owner, + &item.Source, + &item.Severity, + &statusCode, + &item.Platform, + &item.Model, + &item.IsRetryable, + &item.RetryCount, + &item.Resolved, + &resolvedAt, + &resolvedBy, + &resolvedByName, + &resolvedRetryID, + &item.ClientRequestID, + &item.RequestID, + &item.Message, + &userID, + &userEmail, + &apiKeyID, + &accountID, + &accountName, + &groupID, + &groupName, + &clientIP, + &item.RequestPath, + &item.Stream, + ); err != nil { + return nil, err + } + if resolvedAt.Valid { + t := resolvedAt.Time + item.ResolvedAt = &t + } + if resolvedBy.Valid { + v := resolvedBy.Int64 + item.ResolvedByUserID = &v + } + item.ResolvedByUserName = resolvedByName + if resolvedRetryID.Valid { + v := resolvedRetryID.Int64 + item.ResolvedRetryID = &v + } + item.StatusCode = int(statusCode.Int64) + if clientIP.Valid { + s := clientIP.String + item.ClientIP = &s + } + if userID.Valid { + v := userID.Int64 + item.UserID = &v + } + item.UserEmail = userEmail + if apiKeyID.Valid { + v := apiKeyID.Int64 + item.APIKeyID = &v + } + if accountID.Valid { + v := accountID.Int64 + item.AccountID = &v + } + item.AccountName = accountName + if groupID.Valid { + v := groupID.Int64 + item.GroupID = &v + } + item.GroupName = groupName + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return &service.OpsErrorLogList{ + Errors: out, + Total: total, + Page: page, + PageSize: pageSize, + }, nil +} + +func (r *opsRepository) GetErrorLogByID(ctx context.Context, id int64) (*service.OpsErrorLogDetail, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if id <= 0 { + return nil, fmt.Errorf("invalid id") + } + + q := ` +SELECT + e.id, + e.created_at, + e.error_phase, + e.error_type, + COALESCE(e.error_owner, ''), + COALESCE(e.error_source, ''), + e.severity, + COALESCE(e.upstream_status_code, e.status_code, 0), + COALESCE(e.platform, ''), + COALESCE(e.model, ''), + COALESCE(e.is_retryable, false), + COALESCE(e.retry_count, 0), + COALESCE(e.resolved, false), + e.resolved_at, + e.resolved_by_user_id, + e.resolved_retry_id, + COALESCE(e.client_request_id, ''), + COALESCE(e.request_id, ''), + COALESCE(e.error_message, ''), + COALESCE(e.error_body, ''), + e.upstream_status_code, + COALESCE(e.upstream_error_message, ''), + COALESCE(e.upstream_error_detail, ''), + COALESCE(e.upstream_errors::text, ''), + e.is_business_limited, + e.user_id, + COALESCE(u.email, ''), + e.api_key_id, + e.account_id, + COALESCE(a.name, ''), + e.group_id, + COALESCE(g.name, ''), + CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END, + COALESCE(e.request_path, ''), + e.stream, + COALESCE(e.user_agent, ''), + e.auth_latency_ms, + e.routing_latency_ms, + e.upstream_latency_ms, + e.response_latency_ms, + e.time_to_first_token_ms, + COALESCE(e.request_body::text, ''), + e.request_body_truncated, + e.request_body_bytes, + COALESCE(e.request_headers::text, '') +FROM ops_error_logs e +LEFT JOIN users u ON e.user_id = u.id +LEFT JOIN accounts a ON e.account_id = a.id +LEFT JOIN groups g ON e.group_id = g.id +WHERE e.id = $1 +LIMIT 1` + + var out service.OpsErrorLogDetail + var statusCode sql.NullInt64 + var upstreamStatusCode sql.NullInt64 + var resolvedAt sql.NullTime + var resolvedBy sql.NullInt64 + var resolvedRetryID sql.NullInt64 + var clientIP sql.NullString + var userID sql.NullInt64 + var apiKeyID sql.NullInt64 + var accountID sql.NullInt64 + var groupID sql.NullInt64 + var authLatency sql.NullInt64 + var routingLatency sql.NullInt64 + var upstreamLatency sql.NullInt64 + var responseLatency sql.NullInt64 + var ttft sql.NullInt64 + var requestBodyBytes sql.NullInt64 + + err := r.db.QueryRowContext(ctx, q, id).Scan( + &out.ID, + &out.CreatedAt, + &out.Phase, + &out.Type, + &out.Owner, + &out.Source, + &out.Severity, + &statusCode, + &out.Platform, + &out.Model, + &out.IsRetryable, + &out.RetryCount, + &out.Resolved, + &resolvedAt, + &resolvedBy, + &resolvedRetryID, + &out.ClientRequestID, + &out.RequestID, + &out.Message, + &out.ErrorBody, + &upstreamStatusCode, + &out.UpstreamErrorMessage, + &out.UpstreamErrorDetail, + &out.UpstreamErrors, + &out.IsBusinessLimited, + &userID, + &out.UserEmail, + &apiKeyID, + &accountID, + &out.AccountName, + &groupID, + &out.GroupName, + &clientIP, + &out.RequestPath, + &out.Stream, + &out.UserAgent, + &authLatency, + &routingLatency, + &upstreamLatency, + &responseLatency, + &ttft, + &out.RequestBody, + &out.RequestBodyTruncated, + &requestBodyBytes, + &out.RequestHeaders, + ) + if err != nil { + return nil, err + } + + out.StatusCode = int(statusCode.Int64) + if resolvedAt.Valid { + t := resolvedAt.Time + out.ResolvedAt = &t + } + if resolvedBy.Valid { + v := resolvedBy.Int64 + out.ResolvedByUserID = &v + } + if resolvedRetryID.Valid { + v := resolvedRetryID.Int64 + out.ResolvedRetryID = &v + } + if clientIP.Valid { + s := clientIP.String + out.ClientIP = &s + } + if upstreamStatusCode.Valid && upstreamStatusCode.Int64 > 0 { + v := int(upstreamStatusCode.Int64) + out.UpstreamStatusCode = &v + } + if userID.Valid { + v := userID.Int64 + out.UserID = &v + } + if apiKeyID.Valid { + v := apiKeyID.Int64 + out.APIKeyID = &v + } + if accountID.Valid { + v := accountID.Int64 + out.AccountID = &v + } + if groupID.Valid { + v := groupID.Int64 + out.GroupID = &v + } + if authLatency.Valid { + v := authLatency.Int64 + out.AuthLatencyMs = &v + } + if routingLatency.Valid { + v := routingLatency.Int64 + out.RoutingLatencyMs = &v + } + if upstreamLatency.Valid { + v := upstreamLatency.Int64 + out.UpstreamLatencyMs = &v + } + if responseLatency.Valid { + v := responseLatency.Int64 + out.ResponseLatencyMs = &v + } + if ttft.Valid { + v := ttft.Int64 + out.TimeToFirstTokenMs = &v + } + if requestBodyBytes.Valid { + v := int(requestBodyBytes.Int64) + out.RequestBodyBytes = &v + } + + // Normalize request_body to empty string when stored as JSON null. + out.RequestBody = strings.TrimSpace(out.RequestBody) + if out.RequestBody == "null" { + out.RequestBody = "" + } + // Normalize request_headers to empty string when stored as JSON null. + out.RequestHeaders = strings.TrimSpace(out.RequestHeaders) + if out.RequestHeaders == "null" { + out.RequestHeaders = "" + } + // Normalize upstream_errors to empty string when stored as JSON null. + out.UpstreamErrors = strings.TrimSpace(out.UpstreamErrors) + if out.UpstreamErrors == "null" { + out.UpstreamErrors = "" + } + + return &out, nil +} + +func (r *opsRepository) InsertRetryAttempt(ctx context.Context, input *service.OpsInsertRetryAttemptInput) (int64, error) { + if r == nil || r.db == nil { + return 0, fmt.Errorf("nil ops repository") + } + if input == nil { + return 0, fmt.Errorf("nil input") + } + if input.SourceErrorID <= 0 { + return 0, fmt.Errorf("invalid source_error_id") + } + if strings.TrimSpace(input.Mode) == "" { + return 0, fmt.Errorf("invalid mode") + } + + q := ` +INSERT INTO ops_retry_attempts ( + requested_by_user_id, + source_error_id, + mode, + pinned_account_id, + status, + started_at +) VALUES ( + $1,$2,$3,$4,$5,$6 +) RETURNING id` + + var id int64 + err := r.db.QueryRowContext( + ctx, + q, + opsNullInt64(&input.RequestedByUserID), + input.SourceErrorID, + strings.TrimSpace(input.Mode), + opsNullInt64(input.PinnedAccountID), + strings.TrimSpace(input.Status), + input.StartedAt, + ).Scan(&id) + if err != nil { + return 0, err + } + return id, nil +} + +func (r *opsRepository) UpdateRetryAttempt(ctx context.Context, input *service.OpsUpdateRetryAttemptInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + if input.ID <= 0 { + return fmt.Errorf("invalid id") + } + + q := ` +UPDATE ops_retry_attempts +SET + status = $2, + finished_at = $3, + duration_ms = $4, + success = $5, + http_status_code = $6, + upstream_request_id = $7, + used_account_id = $8, + response_preview = $9, + response_truncated = $10, + result_request_id = $11, + result_error_id = $12, + error_message = $13 +WHERE id = $1` + + _, err := r.db.ExecContext( + ctx, + q, + input.ID, + strings.TrimSpace(input.Status), + nullTime(input.FinishedAt), + input.DurationMs, + nullBool(input.Success), + nullInt(input.HTTPStatusCode), + opsNullString(input.UpstreamRequestID), + nullInt64(input.UsedAccountID), + opsNullString(input.ResponsePreview), + nullBool(input.ResponseTruncated), + opsNullString(input.ResultRequestID), + nullInt64(input.ResultErrorID), + opsNullString(input.ErrorMessage), + ) + return err +} + +func (r *opsRepository) GetLatestRetryAttemptForError(ctx context.Context, sourceErrorID int64) (*service.OpsRetryAttempt, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if sourceErrorID <= 0 { + return nil, fmt.Errorf("invalid source_error_id") + } + + q := ` +SELECT + id, + created_at, + COALESCE(requested_by_user_id, 0), + source_error_id, + COALESCE(mode, ''), + pinned_account_id, + COALESCE(status, ''), + started_at, + finished_at, + duration_ms, + success, + http_status_code, + upstream_request_id, + used_account_id, + response_preview, + response_truncated, + result_request_id, + result_error_id, + error_message +FROM ops_retry_attempts +WHERE source_error_id = $1 +ORDER BY created_at DESC +LIMIT 1` + + var out service.OpsRetryAttempt + var pinnedAccountID sql.NullInt64 + var requestedBy sql.NullInt64 + var startedAt sql.NullTime + var finishedAt sql.NullTime + var durationMs sql.NullInt64 + var success sql.NullBool + var httpStatusCode sql.NullInt64 + var upstreamRequestID sql.NullString + var usedAccountID sql.NullInt64 + var responsePreview sql.NullString + var responseTruncated sql.NullBool + var resultRequestID sql.NullString + var resultErrorID sql.NullInt64 + var errorMessage sql.NullString + + err := r.db.QueryRowContext(ctx, q, sourceErrorID).Scan( + &out.ID, + &out.CreatedAt, + &requestedBy, + &out.SourceErrorID, + &out.Mode, + &pinnedAccountID, + &out.Status, + &startedAt, + &finishedAt, + &durationMs, + &success, + &httpStatusCode, + &upstreamRequestID, + &usedAccountID, + &responsePreview, + &responseTruncated, + &resultRequestID, + &resultErrorID, + &errorMessage, + ) + if err != nil { + return nil, err + } + out.RequestedByUserID = requestedBy.Int64 + if pinnedAccountID.Valid { + v := pinnedAccountID.Int64 + out.PinnedAccountID = &v + } + if startedAt.Valid { + t := startedAt.Time + out.StartedAt = &t + } + if finishedAt.Valid { + t := finishedAt.Time + out.FinishedAt = &t + } + if durationMs.Valid { + v := durationMs.Int64 + out.DurationMs = &v + } + if success.Valid { + v := success.Bool + out.Success = &v + } + if httpStatusCode.Valid { + v := int(httpStatusCode.Int64) + out.HTTPStatusCode = &v + } + if upstreamRequestID.Valid { + s := upstreamRequestID.String + out.UpstreamRequestID = &s + } + if usedAccountID.Valid { + v := usedAccountID.Int64 + out.UsedAccountID = &v + } + if responsePreview.Valid { + s := responsePreview.String + out.ResponsePreview = &s + } + if responseTruncated.Valid { + v := responseTruncated.Bool + out.ResponseTruncated = &v + } + if resultRequestID.Valid { + s := resultRequestID.String + out.ResultRequestID = &s + } + if resultErrorID.Valid { + v := resultErrorID.Int64 + out.ResultErrorID = &v + } + if errorMessage.Valid { + s := errorMessage.String + out.ErrorMessage = &s + } + + return &out, nil +} + +func nullTime(t time.Time) sql.NullTime { + if t.IsZero() { + return sql.NullTime{} + } + return sql.NullTime{Time: t, Valid: true} +} + +func nullBool(v *bool) sql.NullBool { + if v == nil { + return sql.NullBool{} + } + return sql.NullBool{Bool: *v, Valid: true} +} + +func (r *opsRepository) ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*service.OpsRetryAttempt, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if sourceErrorID <= 0 { + return nil, fmt.Errorf("invalid source_error_id") + } + if limit <= 0 { + limit = 50 + } + if limit > 200 { + limit = 200 + } + + q := ` +SELECT + r.id, + r.created_at, + COALESCE(r.requested_by_user_id, 0), + r.source_error_id, + COALESCE(r.mode, ''), + r.pinned_account_id, + COALESCE(pa.name, ''), + COALESCE(r.status, ''), + r.started_at, + r.finished_at, + r.duration_ms, + r.success, + r.http_status_code, + r.upstream_request_id, + r.used_account_id, + COALESCE(ua.name, ''), + r.response_preview, + r.response_truncated, + r.result_request_id, + r.result_error_id, + r.error_message +FROM ops_retry_attempts r +LEFT JOIN accounts pa ON r.pinned_account_id = pa.id +LEFT JOIN accounts ua ON r.used_account_id = ua.id +WHERE r.source_error_id = $1 +ORDER BY r.created_at DESC +LIMIT $2` + + rows, err := r.db.QueryContext(ctx, q, sourceErrorID, limit) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsRetryAttempt, 0, 16) + for rows.Next() { + var item service.OpsRetryAttempt + var pinnedAccountID sql.NullInt64 + var pinnedAccountName string + var requestedBy sql.NullInt64 + var startedAt sql.NullTime + var finishedAt sql.NullTime + var durationMs sql.NullInt64 + var success sql.NullBool + var httpStatusCode sql.NullInt64 + var upstreamRequestID sql.NullString + var usedAccountID sql.NullInt64 + var usedAccountName string + var responsePreview sql.NullString + var responseTruncated sql.NullBool + var resultRequestID sql.NullString + var resultErrorID sql.NullInt64 + var errorMessage sql.NullString + + if err := rows.Scan( + &item.ID, + &item.CreatedAt, + &requestedBy, + &item.SourceErrorID, + &item.Mode, + &pinnedAccountID, + &pinnedAccountName, + &item.Status, + &startedAt, + &finishedAt, + &durationMs, + &success, + &httpStatusCode, + &upstreamRequestID, + &usedAccountID, + &usedAccountName, + &responsePreview, + &responseTruncated, + &resultRequestID, + &resultErrorID, + &errorMessage, + ); err != nil { + return nil, err + } + + item.RequestedByUserID = requestedBy.Int64 + if pinnedAccountID.Valid { + v := pinnedAccountID.Int64 + item.PinnedAccountID = &v + } + item.PinnedAccountName = pinnedAccountName + if startedAt.Valid { + t := startedAt.Time + item.StartedAt = &t + } + if finishedAt.Valid { + t := finishedAt.Time + item.FinishedAt = &t + } + if durationMs.Valid { + v := durationMs.Int64 + item.DurationMs = &v + } + if success.Valid { + v := success.Bool + item.Success = &v + } + if httpStatusCode.Valid { + v := int(httpStatusCode.Int64) + item.HTTPStatusCode = &v + } + if upstreamRequestID.Valid { + item.UpstreamRequestID = &upstreamRequestID.String + } + if usedAccountID.Valid { + v := usedAccountID.Int64 + item.UsedAccountID = &v + } + item.UsedAccountName = usedAccountName + if responsePreview.Valid { + item.ResponsePreview = &responsePreview.String + } + if responseTruncated.Valid { + v := responseTruncated.Bool + item.ResponseTruncated = &v + } + if resultRequestID.Valid { + item.ResultRequestID = &resultRequestID.String + } + if resultErrorID.Valid { + v := resultErrorID.Int64 + item.ResultErrorID = &v + } + if errorMessage.Valid { + item.ErrorMessage = &errorMessage.String + } + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if errorID <= 0 { + return fmt.Errorf("invalid error id") + } + + q := ` +UPDATE ops_error_logs +SET + resolved = $2, + resolved_at = $3, + resolved_by_user_id = $4, + resolved_retry_id = $5 +WHERE id = $1` + + at := sql.NullTime{} + if resolvedAt != nil && !resolvedAt.IsZero() { + at = sql.NullTime{Time: resolvedAt.UTC(), Valid: true} + } else if resolved { + now := time.Now().UTC() + at = sql.NullTime{Time: now, Valid: true} + } + + _, err := r.db.ExecContext( + ctx, + q, + errorID, + resolved, + at, + nullInt64(resolvedByUserID), + nullInt64(resolvedRetryID), + ) + return err +} + +func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { + clauses := make([]string, 0, 12) + args := make([]any, 0, 12) + clauses = append(clauses, "1=1") + + phaseFilter := "" + if filter != nil { + phaseFilter = strings.TrimSpace(strings.ToLower(filter.Phase)) + } + // ops_error_logs stores client-visible error requests (status>=400), + // but we also persist "recovered" upstream errors (status<400) for upstream health visibility. + // If Resolved is not specified, do not filter by resolved state (backward-compatible). + resolvedFilter := (*bool)(nil) + if filter != nil { + resolvedFilter = filter.Resolved + } + // Keep list endpoints scoped to client errors unless explicitly filtering upstream phase. + if phaseFilter != "upstream" { + clauses = append(clauses, "COALESCE(status_code, 0) >= 400") + } + + if filter.StartTime != nil && !filter.StartTime.IsZero() { + args = append(args, filter.StartTime.UTC()) + clauses = append(clauses, "e.created_at >= $"+itoa(len(args))) + } + if filter.EndTime != nil && !filter.EndTime.IsZero() { + args = append(args, filter.EndTime.UTC()) + // Keep time-window semantics consistent with other ops queries: [start, end) + clauses = append(clauses, "e.created_at < $"+itoa(len(args))) + } + if p := strings.TrimSpace(filter.Platform); p != "" { + args = append(args, p) + clauses = append(clauses, "platform = $"+itoa(len(args))) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + args = append(args, *filter.GroupID) + clauses = append(clauses, "group_id = $"+itoa(len(args))) + } + if filter.AccountID != nil && *filter.AccountID > 0 { + args = append(args, *filter.AccountID) + clauses = append(clauses, "account_id = $"+itoa(len(args))) + } + if phase := phaseFilter; phase != "" { + args = append(args, phase) + clauses = append(clauses, "error_phase = $"+itoa(len(args))) + } + if filter != nil { + if owner := strings.TrimSpace(strings.ToLower(filter.Owner)); owner != "" { + args = append(args, owner) + clauses = append(clauses, "LOWER(COALESCE(error_owner,'')) = $"+itoa(len(args))) + } + if source := strings.TrimSpace(strings.ToLower(filter.Source)); source != "" { + args = append(args, source) + clauses = append(clauses, "LOWER(COALESCE(error_source,'')) = $"+itoa(len(args))) + } + } + if resolvedFilter != nil { + args = append(args, *resolvedFilter) + clauses = append(clauses, "COALESCE(resolved,false) = $"+itoa(len(args))) + } + + // View filter: errors vs excluded vs all. + // Excluded = upstream 429/529 and business-limited (quota/concurrency/billing) errors. + view := "" + if filter != nil { + view = strings.ToLower(strings.TrimSpace(filter.View)) + } + switch view { + case "", "errors": + clauses = append(clauses, "COALESCE(is_business_limited,false) = false") + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") + case "excluded": + clauses = append(clauses, "(COALESCE(is_business_limited,false) = true OR COALESCE(upstream_status_code, status_code, 0) IN (429, 529))") + case "all": + // no-op + default: + // treat unknown as default 'errors' + clauses = append(clauses, "COALESCE(is_business_limited,false) = false") + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") + } + if len(filter.StatusCodes) > 0 { + args = append(args, pq.Array(filter.StatusCodes)) + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+")") + } else if filter.StatusCodesOther { + // "Other" means: status codes not in the common list. + known := []int{400, 401, 403, 404, 409, 422, 429, 500, 502, 503, 504, 529} + args = append(args, pq.Array(known)) + clauses = append(clauses, "NOT (COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+"))") + } + // Exact correlation keys (preferred for request↔upstream linkage). + if rid := strings.TrimSpace(filter.RequestID); rid != "" { + args = append(args, rid) + clauses = append(clauses, "COALESCE(request_id,'') = $"+itoa(len(args))) + } + if crid := strings.TrimSpace(filter.ClientRequestID); crid != "" { + args = append(args, crid) + clauses = append(clauses, "COALESCE(client_request_id,'') = $"+itoa(len(args))) + } + + if q := strings.TrimSpace(filter.Query); q != "" { + like := "%" + q + "%" + args = append(args, like) + n := itoa(len(args)) + clauses = append(clauses, "(request_id ILIKE $"+n+" OR client_request_id ILIKE $"+n+" OR error_message ILIKE $"+n+")") + } + + if userQuery := strings.TrimSpace(filter.UserQuery); userQuery != "" { + like := "%" + userQuery + "%" + args = append(args, like) + n := itoa(len(args)) + clauses = append(clauses, "u.email ILIKE $"+n) + } + + return "WHERE " + strings.Join(clauses, " AND "), args +} + +// Helpers for nullable args +func opsNullString(v any) any { + switch s := v.(type) { + case nil: + return sql.NullString{} + case *string: + if s == nil || strings.TrimSpace(*s) == "" { + return sql.NullString{} + } + return sql.NullString{String: strings.TrimSpace(*s), Valid: true} + case string: + if strings.TrimSpace(s) == "" { + return sql.NullString{} + } + return sql.NullString{String: strings.TrimSpace(s), Valid: true} + default: + return sql.NullString{} + } +} + +func opsNullInt64(v *int64) any { + if v == nil || *v == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *v, Valid: true} +} + +func opsNullInt(v any) any { + switch n := v.(type) { + case nil: + return sql.NullInt64{} + case *int: + if n == nil || *n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(*n), Valid: true} + case *int64: + if n == nil || *n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *n, Valid: true} + case int: + if n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(n), Valid: true} + default: + return sql.NullInt64{} + } +} diff --git a/backend/internal/repository/ops_repo_alerts.go b/backend/internal/repository/ops_repo_alerts.go new file mode 100644 index 00000000..bd98b7e4 --- /dev/null +++ b/backend/internal/repository/ops_repo_alerts.go @@ -0,0 +1,853 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) ListAlertRules(ctx context.Context) ([]*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + + q := ` +SELECT + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at +FROM ops_alert_rules +ORDER BY id DESC` + + rows, err := r.db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := []*service.OpsAlertRule{} + for rows.Next() { + var rule service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + if err := rows.Scan( + &rule.ID, + &rule.Name, + &rule.Description, + &rule.Enabled, + &rule.Severity, + &rule.MetricType, + &rule.Operator, + &rule.Threshold, + &rule.WindowMinutes, + &rule.SustainedMinutes, + &rule.CooldownMinutes, + &rule.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &rule.CreatedAt, + &rule.UpdatedAt, + ); err != nil { + return nil, err + } + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + rule.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + rule.Filters = decoded + } + } + out = append(out, &rule) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) CreateAlertRule(ctx context.Context, input *service.OpsAlertRule) (*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + + filtersArg, err := opsNullJSONMap(input.Filters) + if err != nil { + return nil, err + } + + q := ` +INSERT INTO ops_alert_rules ( + name, + description, + enabled, + severity, + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + notify_email, + filters, + created_at, + updated_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,NOW(),NOW() +) +RETURNING + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at` + + var out service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + + if err := r.db.QueryRowContext( + ctx, + q, + strings.TrimSpace(input.Name), + strings.TrimSpace(input.Description), + input.Enabled, + strings.TrimSpace(input.Severity), + strings.TrimSpace(input.MetricType), + strings.TrimSpace(input.Operator), + input.Threshold, + input.WindowMinutes, + input.SustainedMinutes, + input.CooldownMinutes, + input.NotifyEmail, + filtersArg, + ).Scan( + &out.ID, + &out.Name, + &out.Description, + &out.Enabled, + &out.Severity, + &out.MetricType, + &out.Operator, + &out.Threshold, + &out.WindowMinutes, + &out.SustainedMinutes, + &out.CooldownMinutes, + &out.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &out.CreatedAt, + &out.UpdatedAt, + ); err != nil { + return nil, err + } + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + out.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + out.Filters = decoded + } + } + + return &out, nil +} + +func (r *opsRepository) UpdateAlertRule(ctx context.Context, input *service.OpsAlertRule) (*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + if input.ID <= 0 { + return nil, fmt.Errorf("invalid id") + } + + filtersArg, err := opsNullJSONMap(input.Filters) + if err != nil { + return nil, err + } + + q := ` +UPDATE ops_alert_rules +SET + name = $2, + description = $3, + enabled = $4, + severity = $5, + metric_type = $6, + operator = $7, + threshold = $8, + window_minutes = $9, + sustained_minutes = $10, + cooldown_minutes = $11, + notify_email = $12, + filters = $13, + updated_at = NOW() +WHERE id = $1 +RETURNING + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at` + + var out service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + + if err := r.db.QueryRowContext( + ctx, + q, + input.ID, + strings.TrimSpace(input.Name), + strings.TrimSpace(input.Description), + input.Enabled, + strings.TrimSpace(input.Severity), + strings.TrimSpace(input.MetricType), + strings.TrimSpace(input.Operator), + input.Threshold, + input.WindowMinutes, + input.SustainedMinutes, + input.CooldownMinutes, + input.NotifyEmail, + filtersArg, + ).Scan( + &out.ID, + &out.Name, + &out.Description, + &out.Enabled, + &out.Severity, + &out.MetricType, + &out.Operator, + &out.Threshold, + &out.WindowMinutes, + &out.SustainedMinutes, + &out.CooldownMinutes, + &out.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &out.CreatedAt, + &out.UpdatedAt, + ); err != nil { + return nil, err + } + + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + out.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + out.Filters = decoded + } + } + + return &out, nil +} + +func (r *opsRepository) DeleteAlertRule(ctx context.Context, id int64) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if id <= 0 { + return fmt.Errorf("invalid id") + } + + res, err := r.db.ExecContext(ctx, "DELETE FROM ops_alert_rules WHERE id = $1", id) + if err != nil { + return err + } + affected, err := res.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *opsRepository) ListAlertEvents(ctx context.Context, filter *service.OpsAlertEventFilter) ([]*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + filter = &service.OpsAlertEventFilter{} + } + + limit := filter.Limit + if limit <= 0 { + limit = 100 + } + if limit > 500 { + limit = 500 + } + + where, args := buildOpsAlertEventsWhere(filter) + args = append(args, limit) + limitArg := "$" + itoa(len(args)) + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +` + where + ` +ORDER BY fired_at DESC, id DESC +LIMIT ` + limitArg + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := []*service.OpsAlertEvent{} + for rows.Next() { + var ev service.OpsAlertEvent + var metricValue sql.NullFloat64 + var thresholdValue sql.NullFloat64 + var dimensionsRaw []byte + var resolvedAt sql.NullTime + if err := rows.Scan( + &ev.ID, + &ev.RuleID, + &ev.Severity, + &ev.Status, + &ev.Title, + &ev.Description, + &metricValue, + &thresholdValue, + &dimensionsRaw, + &ev.FiredAt, + &resolvedAt, + &ev.EmailSent, + &ev.CreatedAt, + ); err != nil { + return nil, err + } + if metricValue.Valid { + v := metricValue.Float64 + ev.MetricValue = &v + } + if thresholdValue.Valid { + v := thresholdValue.Float64 + ev.ThresholdValue = &v + } + if resolvedAt.Valid { + v := resolvedAt.Time + ev.ResolvedAt = &v + } + if len(dimensionsRaw) > 0 && string(dimensionsRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(dimensionsRaw, &decoded); err == nil { + ev.Dimensions = decoded + } + } + out = append(out, &ev) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) GetAlertEventByID(ctx context.Context, eventID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return nil, fmt.Errorf("invalid event id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE id = $1` + + row := r.db.QueryRowContext(ctx, q, eventID) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return nil, fmt.Errorf("invalid rule id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE rule_id = $1 AND status = $2 +ORDER BY fired_at DESC +LIMIT 1` + + row := r.db.QueryRowContext(ctx, q, ruleID, service.OpsAlertStatusFiring) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return nil, fmt.Errorf("invalid rule id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE rule_id = $1 +ORDER BY fired_at DESC +LIMIT 1` + + row := r.db.QueryRowContext(ctx, q, ruleID) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) CreateAlertEvent(ctx context.Context, event *service.OpsAlertEvent) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if event == nil { + return nil, fmt.Errorf("nil event") + } + + dimensionsArg, err := opsNullJSONMap(event.Dimensions) + if err != nil { + return nil, err + } + + q := ` +INSERT INTO ops_alert_events ( + rule_id, + severity, + status, + title, + description, + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,NOW() +) +RETURNING + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at` + + row := r.db.QueryRowContext( + ctx, + q, + opsNullInt64(&event.RuleID), + opsNullString(event.Severity), + opsNullString(event.Status), + opsNullString(event.Title), + opsNullString(event.Description), + opsNullFloat64(event.MetricValue), + opsNullFloat64(event.ThresholdValue), + dimensionsArg, + event.FiredAt, + opsNullTime(event.ResolvedAt), + event.EmailSent, + ) + return scanOpsAlertEvent(row) +} + +func (r *opsRepository) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return fmt.Errorf("invalid event id") + } + if strings.TrimSpace(status) == "" { + return fmt.Errorf("invalid status") + } + + q := ` +UPDATE ops_alert_events +SET status = $2, + resolved_at = $3 +WHERE id = $1` + + _, err := r.db.ExecContext(ctx, q, eventID, strings.TrimSpace(status), opsNullTime(resolvedAt)) + return err +} + +func (r *opsRepository) UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return fmt.Errorf("invalid event id") + } + + _, err := r.db.ExecContext(ctx, "UPDATE ops_alert_events SET email_sent = $2 WHERE id = $1", eventID, emailSent) + return err +} + +type opsAlertEventRow interface { + Scan(dest ...any) error +} + +func (r *opsRepository) CreateAlertSilence(ctx context.Context, input *service.OpsAlertSilence) (*service.OpsAlertSilence, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + if input.RuleID <= 0 { + return nil, fmt.Errorf("invalid rule_id") + } + platform := strings.TrimSpace(input.Platform) + if platform == "" { + return nil, fmt.Errorf("invalid platform") + } + if input.Until.IsZero() { + return nil, fmt.Errorf("invalid until") + } + + q := ` +INSERT INTO ops_alert_silences ( + rule_id, + platform, + group_id, + region, + until, + reason, + created_by, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,NOW() +) +RETURNING id, rule_id, platform, group_id, region, until, COALESCE(reason,''), created_by, created_at` + + row := r.db.QueryRowContext( + ctx, + q, + input.RuleID, + platform, + opsNullInt64(input.GroupID), + opsNullString(input.Region), + input.Until, + opsNullString(input.Reason), + opsNullInt64(input.CreatedBy), + ) + + var out service.OpsAlertSilence + var groupID sql.NullInt64 + var region sql.NullString + var createdBy sql.NullInt64 + if err := row.Scan( + &out.ID, + &out.RuleID, + &out.Platform, + &groupID, + ®ion, + &out.Until, + &out.Reason, + &createdBy, + &out.CreatedAt, + ); err != nil { + return nil, err + } + if groupID.Valid { + v := groupID.Int64 + out.GroupID = &v + } + if region.Valid { + v := strings.TrimSpace(region.String) + if v != "" { + out.Region = &v + } + } + if createdBy.Valid { + v := createdBy.Int64 + out.CreatedBy = &v + } + return &out, nil +} + +func (r *opsRepository) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) { + if r == nil || r.db == nil { + return false, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return false, fmt.Errorf("invalid rule id") + } + platform = strings.TrimSpace(platform) + if platform == "" { + return false, nil + } + if now.IsZero() { + now = time.Now().UTC() + } + + q := ` +SELECT 1 +FROM ops_alert_silences +WHERE rule_id = $1 + AND platform = $2 + AND (group_id IS NOT DISTINCT FROM $3) + AND (region IS NOT DISTINCT FROM $4) + AND until > $5 +LIMIT 1` + + var dummy int + err := r.db.QueryRowContext(ctx, q, ruleID, platform, opsNullInt64(groupID), opsNullString(region), now).Scan(&dummy) + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + +func scanOpsAlertEvent(row opsAlertEventRow) (*service.OpsAlertEvent, error) { + var ev service.OpsAlertEvent + var metricValue sql.NullFloat64 + var thresholdValue sql.NullFloat64 + var dimensionsRaw []byte + var resolvedAt sql.NullTime + + if err := row.Scan( + &ev.ID, + &ev.RuleID, + &ev.Severity, + &ev.Status, + &ev.Title, + &ev.Description, + &metricValue, + &thresholdValue, + &dimensionsRaw, + &ev.FiredAt, + &resolvedAt, + &ev.EmailSent, + &ev.CreatedAt, + ); err != nil { + return nil, err + } + if metricValue.Valid { + v := metricValue.Float64 + ev.MetricValue = &v + } + if thresholdValue.Valid { + v := thresholdValue.Float64 + ev.ThresholdValue = &v + } + if resolvedAt.Valid { + v := resolvedAt.Time + ev.ResolvedAt = &v + } + if len(dimensionsRaw) > 0 && string(dimensionsRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(dimensionsRaw, &decoded); err == nil { + ev.Dimensions = decoded + } + } + return &ev, nil +} + +func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []any) { + clauses := []string{"1=1"} + args := []any{} + + if filter == nil { + return "WHERE " + strings.Join(clauses, " AND "), args + } + + if status := strings.TrimSpace(filter.Status); status != "" { + args = append(args, status) + clauses = append(clauses, "status = $"+itoa(len(args))) + } + if severity := strings.TrimSpace(filter.Severity); severity != "" { + args = append(args, severity) + clauses = append(clauses, "severity = $"+itoa(len(args))) + } + if filter.EmailSent != nil { + args = append(args, *filter.EmailSent) + clauses = append(clauses, "email_sent = $"+itoa(len(args))) + } + if filter.StartTime != nil && !filter.StartTime.IsZero() { + args = append(args, *filter.StartTime) + clauses = append(clauses, "fired_at >= $"+itoa(len(args))) + } + if filter.EndTime != nil && !filter.EndTime.IsZero() { + args = append(args, *filter.EndTime) + clauses = append(clauses, "fired_at < $"+itoa(len(args))) + } + + // Cursor pagination (descending by fired_at, then id) + if filter.BeforeFiredAt != nil && !filter.BeforeFiredAt.IsZero() && filter.BeforeID != nil && *filter.BeforeID > 0 { + args = append(args, *filter.BeforeFiredAt) + tsArg := "$" + itoa(len(args)) + args = append(args, *filter.BeforeID) + idArg := "$" + itoa(len(args)) + clauses = append(clauses, fmt.Sprintf("(fired_at < %s OR (fired_at = %s AND id < %s))", tsArg, tsArg, idArg)) + } + // Dimensions are stored in JSONB. We filter best-effort without requiring GIN indexes. + if platform := strings.TrimSpace(filter.Platform); platform != "" { + args = append(args, platform) + clauses = append(clauses, "(dimensions->>'platform') = $"+itoa(len(args))) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + args = append(args, fmt.Sprintf("%d", *filter.GroupID)) + clauses = append(clauses, "(dimensions->>'group_id') = $"+itoa(len(args))) + } + + return "WHERE " + strings.Join(clauses, " AND "), args +} + +func opsNullJSONMap(v map[string]any) (any, error) { + if v == nil { + return sql.NullString{}, nil + } + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + if len(b) == 0 { + return sql.NullString{}, nil + } + return sql.NullString{String: string(b), Valid: true}, nil +} diff --git a/backend/internal/repository/ops_repo_dashboard.go b/backend/internal/repository/ops_repo_dashboard.go new file mode 100644 index 00000000..85791a9a --- /dev/null +++ b/backend/internal/repository/ops_repo_dashboard.go @@ -0,0 +1,1015 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetDashboardOverview(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + mode := filter.QueryMode + if !mode.IsValid() { + mode = service.OpsQueryModeRaw + } + + switch mode { + case service.OpsQueryModePreagg: + return r.getDashboardOverviewPreaggregated(ctx, filter) + case service.OpsQueryModeAuto: + out, err := r.getDashboardOverviewPreaggregated(ctx, filter) + if err != nil && errors.Is(err, service.ErrOpsPreaggregatedNotPopulated) { + return r.getDashboardOverviewRaw(ctx, filter) + } + return out, err + default: + return r.getDashboardOverviewRaw(ctx, filter) + } +} + +func (r *opsRepository) getDashboardOverviewRaw(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + windowSeconds := end.Sub(start).Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + requestCountSLA := successCount + errorCountSLA + + sla := safeDivideFloat64(float64(successCount), float64(requestCountSLA)) + errorRate := safeDivideFloat64(float64(errorCountSLA), float64(requestCountSLA)) + upstreamErrorRate := safeDivideFloat64(float64(upstreamExcl), float64(requestCountSLA)) + + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + return &service.OpsDashboardOverview{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorCountSLA, + RequestCountTotal: requestCountTotal, + RequestCountSLA: requestCountSLA, + TokenConsumed: tokenConsumed, + + SLA: roundTo4DP(sla), + ErrorRate: roundTo4DP(errorRate), + UpstreamErrorRate: roundTo4DP(upstreamErrorRate), + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + + Duration: duration, + TTFT: ttft, + }, nil +} + +type opsDashboardPartial struct { + successCount int64 + errorCountTotal int64 + businessLimitedCount int64 + errorCountSLA int64 + + upstreamErrorCountExcl429529 int64 + upstream429Count int64 + upstream529Count int64 + + tokenConsumed int64 + + duration service.OpsPercentiles + ttft service.OpsPercentiles +} + +func (r *opsRepository) getDashboardOverviewPreaggregated(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + // Stable full-hour range covered by pre-aggregation. + aggSafeEnd := preaggSafeEnd(end) + aggFullStart := utcCeilToHour(start) + aggFullEnd := utcFloorToHour(aggSafeEnd) + + // If there are no stable full-hour buckets, use raw directly (short windows). + if !aggFullStart.Before(aggFullEnd) { + return r.getDashboardOverviewRaw(ctx, filter) + } + + // 1) Pre-aggregated stable segment. + preaggRows, err := r.listHourlyMetricsRows(ctx, filter, aggFullStart, aggFullEnd) + if err != nil { + return nil, err + } + if len(preaggRows) == 0 { + // Distinguish "no data" vs "preagg not populated yet". + if exists, err := r.rawOpsDataExists(ctx, filter, aggFullStart, aggFullEnd); err == nil && exists { + return nil, service.ErrOpsPreaggregatedNotPopulated + } + } + preagg := aggregateHourlyRows(preaggRows) + + // 2) Raw head/tail fragments (at most ~1 hour each). + head := opsDashboardPartial{} + tail := opsDashboardPartial{} + + if start.Before(aggFullStart) { + part, err := r.queryRawPartial(ctx, filter, start, minTime(end, aggFullStart)) + if err != nil { + return nil, err + } + head = *part + } + if aggFullEnd.Before(end) { + part, err := r.queryRawPartial(ctx, filter, maxTime(start, aggFullEnd), end) + if err != nil { + return nil, err + } + tail = *part + } + + // Merge counts. + successCount := preagg.successCount + head.successCount + tail.successCount + errorTotal := preagg.errorCountTotal + head.errorCountTotal + tail.errorCountTotal + businessLimited := preagg.businessLimitedCount + head.businessLimitedCount + tail.businessLimitedCount + errorCountSLA := preagg.errorCountSLA + head.errorCountSLA + tail.errorCountSLA + + upstreamExcl := preagg.upstreamErrorCountExcl429529 + head.upstreamErrorCountExcl429529 + tail.upstreamErrorCountExcl429529 + upstream429 := preagg.upstream429Count + head.upstream429Count + tail.upstream429Count + upstream529 := preagg.upstream529Count + head.upstream529Count + tail.upstream529Count + + tokenConsumed := preagg.tokenConsumed + head.tokenConsumed + tail.tokenConsumed + + // Approximate percentiles across segments: + // - p50/p90/avg: weighted average by success_count + // - p95/p99/max: max (conservative tail) + duration := combineApproxPercentiles([]opsPercentileSegment{ + {weight: preagg.successCount, p: preagg.duration}, + {weight: head.successCount, p: head.duration}, + {weight: tail.successCount, p: tail.duration}, + }) + ttft := combineApproxPercentiles([]opsPercentileSegment{ + {weight: preagg.successCount, p: preagg.ttft}, + {weight: head.successCount, p: head.ttft}, + {weight: tail.successCount, p: tail.ttft}, + }) + + windowSeconds := end.Sub(start).Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + requestCountSLA := successCount + errorCountSLA + + sla := safeDivideFloat64(float64(successCount), float64(requestCountSLA)) + errorRate := safeDivideFloat64(float64(errorCountSLA), float64(requestCountSLA)) + upstreamErrorRate := safeDivideFloat64(float64(upstreamExcl), float64(requestCountSLA)) + + // Keep "current" rates as raw, to preserve realtime semantics. + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + // NOTE: peak still uses raw logs (minute granularity). This is typically cheaper than percentile_cont + // and keeps semantics consistent across modes. + qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + return &service.OpsDashboardOverview{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorCountSLA, + RequestCountTotal: requestCountTotal, + RequestCountSLA: requestCountSLA, + TokenConsumed: tokenConsumed, + + SLA: roundTo4DP(sla), + ErrorRate: roundTo4DP(errorRate), + UpstreamErrorRate: roundTo4DP(upstreamErrorRate), + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + + Duration: duration, + TTFT: ttft, + }, nil +} + +type opsHourlyMetricsRow struct { + bucketStart time.Time + + successCount int64 + errorCountTotal int64 + businessLimitedCount int64 + errorCountSLA int64 + + upstreamErrorCountExcl429529 int64 + upstream429Count int64 + upstream529Count int64 + + tokenConsumed int64 + + durationP50 sql.NullInt64 + durationP90 sql.NullInt64 + durationP95 sql.NullInt64 + durationP99 sql.NullInt64 + durationAvg sql.NullFloat64 + durationMax sql.NullInt64 + + ttftP50 sql.NullInt64 + ttftP90 sql.NullInt64 + ttftP95 sql.NullInt64 + ttftP99 sql.NullInt64 + ttftAvg sql.NullFloat64 + ttftMax sql.NullInt64 +} + +func (r *opsRepository) listHourlyMetricsRows(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) ([]opsHourlyMetricsRow, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if start.IsZero() || end.IsZero() || !start.Before(end) { + return []opsHourlyMetricsRow{}, nil + } + + where := "bucket_start >= $1 AND bucket_start < $2" + args := []any{start.UTC(), end.UTC()} + idx := 3 + + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + switch { + case groupID != nil && *groupID > 0: + where += fmt.Sprintf(" AND group_id = $%d", idx) + args = append(args, *groupID) + idx++ + if platform != "" { + where += fmt.Sprintf(" AND platform = $%d", idx) + args = append(args, platform) + // idx++ removed - not used after this + } + case platform != "": + where += fmt.Sprintf(" AND platform = $%d AND group_id IS NULL", idx) + args = append(args, platform) + // idx++ removed - not used after this + default: + where += " AND platform IS NULL AND group_id IS NULL" + } + + q := ` +SELECT + bucket_start, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms +FROM ops_metrics_hourly +WHERE ` + where + ` +ORDER BY bucket_start ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]opsHourlyMetricsRow, 0, 64) + for rows.Next() { + var row opsHourlyMetricsRow + if err := rows.Scan( + &row.bucketStart, + &row.successCount, + &row.errorCountTotal, + &row.businessLimitedCount, + &row.errorCountSLA, + &row.upstreamErrorCountExcl429529, + &row.upstream429Count, + &row.upstream529Count, + &row.tokenConsumed, + &row.durationP50, + &row.durationP90, + &row.durationP95, + &row.durationP99, + &row.durationAvg, + &row.durationMax, + &row.ttftP50, + &row.ttftP90, + &row.ttftP95, + &row.ttftP99, + &row.ttftAvg, + &row.ttftMax, + ); err != nil { + return nil, err + } + out = append(out, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func aggregateHourlyRows(rows []opsHourlyMetricsRow) opsDashboardPartial { + out := opsDashboardPartial{} + if len(rows) == 0 { + return out + } + + var ( + p50Sum float64 + p50W int64 + p90Sum float64 + p90W int64 + avgSum float64 + avgW int64 + ) + var ( + ttftP50Sum float64 + ttftP50W int64 + ttftP90Sum float64 + ttftP90W int64 + ttftAvgSum float64 + ttftAvgW int64 + ) + + var ( + p95Max *int + p99Max *int + maxMax *int + + ttftP95Max *int + ttftP99Max *int + ttftMaxMax *int + ) + + for _, row := range rows { + out.successCount += row.successCount + out.errorCountTotal += row.errorCountTotal + out.businessLimitedCount += row.businessLimitedCount + out.errorCountSLA += row.errorCountSLA + + out.upstreamErrorCountExcl429529 += row.upstreamErrorCountExcl429529 + out.upstream429Count += row.upstream429Count + out.upstream529Count += row.upstream529Count + + out.tokenConsumed += row.tokenConsumed + + if row.successCount > 0 { + if row.durationP50.Valid { + p50Sum += float64(row.durationP50.Int64) * float64(row.successCount) + p50W += row.successCount + } + if row.durationP90.Valid { + p90Sum += float64(row.durationP90.Int64) * float64(row.successCount) + p90W += row.successCount + } + if row.durationAvg.Valid { + avgSum += row.durationAvg.Float64 * float64(row.successCount) + avgW += row.successCount + } + if row.ttftP50.Valid { + ttftP50Sum += float64(row.ttftP50.Int64) * float64(row.successCount) + ttftP50W += row.successCount + } + if row.ttftP90.Valid { + ttftP90Sum += float64(row.ttftP90.Int64) * float64(row.successCount) + ttftP90W += row.successCount + } + if row.ttftAvg.Valid { + ttftAvgSum += row.ttftAvg.Float64 * float64(row.successCount) + ttftAvgW += row.successCount + } + } + + if row.durationP95.Valid { + v := int(row.durationP95.Int64) + if p95Max == nil || v > *p95Max { + p95Max = &v + } + } + if row.durationP99.Valid { + v := int(row.durationP99.Int64) + if p99Max == nil || v > *p99Max { + p99Max = &v + } + } + if row.durationMax.Valid { + v := int(row.durationMax.Int64) + if maxMax == nil || v > *maxMax { + maxMax = &v + } + } + + if row.ttftP95.Valid { + v := int(row.ttftP95.Int64) + if ttftP95Max == nil || v > *ttftP95Max { + ttftP95Max = &v + } + } + if row.ttftP99.Valid { + v := int(row.ttftP99.Int64) + if ttftP99Max == nil || v > *ttftP99Max { + ttftP99Max = &v + } + } + if row.ttftMax.Valid { + v := int(row.ttftMax.Int64) + if ttftMaxMax == nil || v > *ttftMaxMax { + ttftMaxMax = &v + } + } + } + + // duration + if p50W > 0 { + v := int(math.Round(p50Sum / float64(p50W))) + out.duration.P50 = &v + } + if p90W > 0 { + v := int(math.Round(p90Sum / float64(p90W))) + out.duration.P90 = &v + } + out.duration.P95 = p95Max + out.duration.P99 = p99Max + if avgW > 0 { + v := int(math.Round(avgSum / float64(avgW))) + out.duration.Avg = &v + } + out.duration.Max = maxMax + + // ttft + if ttftP50W > 0 { + v := int(math.Round(ttftP50Sum / float64(ttftP50W))) + out.ttft.P50 = &v + } + if ttftP90W > 0 { + v := int(math.Round(ttftP90Sum / float64(ttftP90W))) + out.ttft.P90 = &v + } + out.ttft.P95 = ttftP95Max + out.ttft.P99 = ttftP99Max + if ttftAvgW > 0 { + v := int(math.Round(ttftAvgSum / float64(ttftAvgW))) + out.ttft.Avg = &v + } + out.ttft.Max = ttftMaxMax + + return out +} + +func (r *opsRepository) queryRawPartial(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (*opsDashboardPartial, error) { + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + return &opsDashboardPartial{ + successCount: successCount, + errorCountTotal: errorTotal, + businessLimitedCount: businessLimited, + errorCountSLA: errorCountSLA, + upstreamErrorCountExcl429529: upstreamExcl, + upstream429Count: upstream429, + upstream529Count: upstream529, + tokenConsumed: tokenConsumed, + duration: duration, + ttft: ttft, + }, nil +} + +func (r *opsRepository) rawOpsDataExists(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (bool, error) { + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := `SELECT EXISTS(SELECT 1 FROM usage_logs ul ` + join + ` ` + where + ` LIMIT 1)` + var exists bool + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&exists); err != nil { + return false, err + } + if exists { + return true, nil + } + } + + { + where, args, _ := buildErrorWhere(filter, start, end, 1) + q := `SELECT EXISTS(SELECT 1 FROM ops_error_logs ` + where + ` LIMIT 1)` + var exists bool + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&exists); err != nil { + return false, err + } + return exists, nil + } +} + +type opsPercentileSegment struct { + weight int64 + p service.OpsPercentiles +} + +func combineApproxPercentiles(segments []opsPercentileSegment) service.OpsPercentiles { + weightedInt := func(get func(service.OpsPercentiles) *int) *int { + var sum float64 + var w int64 + for _, seg := range segments { + if seg.weight <= 0 { + continue + } + v := get(seg.p) + if v == nil { + continue + } + sum += float64(*v) * float64(seg.weight) + w += seg.weight + } + if w <= 0 { + return nil + } + out := int(math.Round(sum / float64(w))) + return &out + } + + maxInt := func(get func(service.OpsPercentiles) *int) *int { + var max *int + for _, seg := range segments { + v := get(seg.p) + if v == nil { + continue + } + if max == nil || *v > *max { + c := *v + max = &c + } + } + return max + } + + return service.OpsPercentiles{ + P50: weightedInt(func(p service.OpsPercentiles) *int { return p.P50 }), + P90: weightedInt(func(p service.OpsPercentiles) *int { return p.P90 }), + P95: maxInt(func(p service.OpsPercentiles) *int { return p.P95 }), + P99: maxInt(func(p service.OpsPercentiles) *int { return p.P99 }), + Avg: weightedInt(func(p service.OpsPercentiles) *int { return p.Avg }), + Max: maxInt(func(p service.OpsPercentiles) *int { return p.Max }), + } +} + +func preaggSafeEnd(endTime time.Time) time.Time { + now := time.Now().UTC() + cutoff := now.Add(-5 * time.Minute) + if endTime.After(cutoff) { + return cutoff + } + return endTime +} + +func utcCeilToHour(t time.Time) time.Time { + u := t.UTC() + f := u.Truncate(time.Hour) + if f.Equal(u) { + return f + } + return f.Add(time.Hour) +} + +func utcFloorToHour(t time.Time) time.Time { + return t.UTC().Truncate(time.Hour) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} + +func (r *opsRepository) queryUsageCounts(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (successCount int64, tokenConsumed int64, err error) { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed +FROM usage_logs ul +` + join + ` +` + where + + var tokens sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&successCount, &tokens); err != nil { + return 0, 0, err + } + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + return successCount, tokenConsumed, nil +} + +func (r *opsRepository) queryUsageLatency(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (duration service.OpsPercentiles, ttft service.OpsPercentiles, err error) { + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) AS p99, + AVG(duration_ms) AS avg_ms, + MAX(duration_ms) AS max_ms +FROM usage_logs ul +` + join + ` +` + where + ` +AND duration_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return service.OpsPercentiles{}, service.OpsPercentiles{}, err + } + duration.P50 = floatToIntPtr(p50) + duration.P90 = floatToIntPtr(p90) + duration.P95 = floatToIntPtr(p95) + duration.P99 = floatToIntPtr(p99) + duration.Avg = floatToIntPtr(avg) + if max.Valid { + v := int(max.Int64) + duration.Max = &v + } + } + + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) AS p99, + AVG(first_token_ms) AS avg_ms, + MAX(first_token_ms) AS max_ms +FROM usage_logs ul +` + join + ` +` + where + ` +AND first_token_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return service.OpsPercentiles{}, service.OpsPercentiles{}, err + } + ttft.P50 = floatToIntPtr(p50) + ttft.P90 = floatToIntPtr(p90) + ttft.P95 = floatToIntPtr(p95) + ttft.P99 = floatToIntPtr(p99) + ttft.Avg = floatToIntPtr(avg) + if max.Valid { + v := int(max.Int64) + ttft.Max = &v + } + } + + return duration, ttft, nil +} + +func (r *opsRepository) queryErrorCounts(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) ( + errorTotal int64, + businessLimited int64, + errorCountSLA int64, + upstreamExcl429529 int64, + upstream429 int64, + upstream529 int64, + err error, +) { + where, args, _ := buildErrorWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400), 0) AS error_total, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited), 0) AS business_limited, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited), 0) AS error_sla, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)), 0) AS upstream_excl, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429), 0) AS upstream_429, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529), 0) AS upstream_529 +FROM ops_error_logs +` + where + + if err := r.db.QueryRowContext(ctx, q, args...).Scan( + &errorTotal, + &businessLimited, + &errorCountSLA, + &upstreamExcl429529, + &upstream429, + &upstream529, + ); err != nil { + return 0, 0, 0, 0, 0, 0, err + } + return errorTotal, businessLimited, errorCountSLA, upstreamExcl429529, upstream429, upstream529, nil +} + +func (r *opsRepository) queryCurrentRates(ctx context.Context, filter *service.OpsDashboardFilter, end time.Time) (qpsCurrent float64, tpsCurrent float64, err error) { + windowStart := end.Add(-1 * time.Minute) + + successCount1m, token1m, err := r.queryUsageCounts(ctx, filter, windowStart, end) + if err != nil { + return 0, 0, err + } + errorCount1m, _, _, _, _, _, err := r.queryErrorCounts(ctx, filter, windowStart, end) + if err != nil { + return 0, 0, err + } + + qpsCurrent = roundTo1DP(float64(successCount1m+errorCount1m) / 60.0) + tpsCurrent = roundTo1DP(float64(token1m) / 60.0) + return qpsCurrent, tpsCurrent, nil +} + +func (r *opsRepository) queryPeakQPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + q := ` +WITH usage_buckets AS ( + SELECT date_trunc('minute', ul.created_at) AS bucket, COUNT(*) AS cnt + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT date_trunc('minute', created_at) AS bucket, COUNT(*) AS cnt + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.cnt, 0) + COALESCE(e.cnt, 0) AS total + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT COALESCE(MAX(total), 0) FROM combined` + + args := append(usageArgs, errorArgs...) + + var maxPerMinute sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { + return 0, err + } + if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { + return 0, nil + } + return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil +} + +func (r *opsRepository) queryPeakTPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + + q := ` +SELECT COALESCE(MAX(tokens_per_min), 0) +FROM ( + SELECT + date_trunc('minute', ul.created_at) AS bucket, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS tokens_per_min + FROM usage_logs ul + ` + join + ` + ` + where + ` + GROUP BY 1 +) t` + + var maxPerMinute sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { + return 0, err + } + if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { + return 0, nil + } + return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil +} + +func buildUsageWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (join string, where string, args []any, nextIndex int) { + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + idx := startIndex + clauses := make([]string, 0, 4) + args = make([]any, 0, 4) + + args = append(args, start) + clauses = append(clauses, fmt.Sprintf("ul.created_at >= $%d", idx)) + idx++ + args = append(args, end) + clauses = append(clauses, fmt.Sprintf("ul.created_at < $%d", idx)) + idx++ + + if groupID != nil && *groupID > 0 { + args = append(args, *groupID) + clauses = append(clauses, fmt.Sprintf("ul.group_id = $%d", idx)) + idx++ + } + if platform != "" { + // Prefer group.platform when available; fall back to account.platform so we don't + // drop rows where group_id is NULL. + join = "LEFT JOIN groups g ON g.id = ul.group_id LEFT JOIN accounts a ON a.id = ul.account_id" + args = append(args, platform) + clauses = append(clauses, fmt.Sprintf("COALESCE(NULLIF(g.platform,''), a.platform) = $%d", idx)) + idx++ + } + + where = "WHERE " + strings.Join(clauses, " AND ") + return join, where, args, idx +} + +func buildErrorWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (where string, args []any, nextIndex int) { + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + idx := startIndex + clauses := make([]string, 0, 5) + args = make([]any, 0, 5) + + args = append(args, start) + clauses = append(clauses, fmt.Sprintf("created_at >= $%d", idx)) + idx++ + args = append(args, end) + clauses = append(clauses, fmt.Sprintf("created_at < $%d", idx)) + idx++ + + clauses = append(clauses, "is_count_tokens = FALSE") + + if groupID != nil && *groupID > 0 { + args = append(args, *groupID) + clauses = append(clauses, fmt.Sprintf("group_id = $%d", idx)) + idx++ + } + if platform != "" { + args = append(args, platform) + clauses = append(clauses, fmt.Sprintf("platform = $%d", idx)) + idx++ + } + + where = "WHERE " + strings.Join(clauses, " AND ") + return where, args, idx +} + +func floatToIntPtr(v sql.NullFloat64) *int { + if !v.Valid { + return nil + } + n := int(math.Round(v.Float64)) + return &n +} + +func safeDivideFloat64(numerator float64, denominator float64) float64 { + if denominator == 0 { + return 0 + } + return numerator / denominator +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func roundTo4DP(v float64) float64 { + return math.Round(v*10000) / 10000 +} diff --git a/backend/internal/repository/ops_repo_histograms.go b/backend/internal/repository/ops_repo_histograms.go new file mode 100644 index 00000000..c2978798 --- /dev/null +++ b/backend/internal/repository/ops_repo_histograms.go @@ -0,0 +1,79 @@ +package repository + +import ( + "context" + "fmt" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetLatencyHistogram(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsLatencyHistogramResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + rangeExpr := latencyHistogramRangeCaseExpr("ul.duration_ms") + orderExpr := latencyHistogramRangeOrderCaseExpr("ul.duration_ms") + + q := ` +SELECT + ` + rangeExpr + ` AS range, + COALESCE(COUNT(*), 0) AS count, + ` + orderExpr + ` AS ord +FROM usage_logs ul +` + join + ` +` + where + ` +AND ul.duration_ms IS NOT NULL +GROUP BY 1, 3 +ORDER BY 3 ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + counts := make(map[string]int64, len(latencyHistogramOrderedRanges)) + var total int64 + for rows.Next() { + var label string + var count int64 + var _ord int + if err := rows.Scan(&label, &count, &_ord); err != nil { + return nil, err + } + counts[label] = count + total += count + } + if err := rows.Err(); err != nil { + return nil, err + } + + buckets := make([]*service.OpsLatencyHistogramBucket, 0, len(latencyHistogramOrderedRanges)) + for _, label := range latencyHistogramOrderedRanges { + buckets = append(buckets, &service.OpsLatencyHistogramBucket{ + Range: label, + Count: counts[label], + }) + } + + return &service.OpsLatencyHistogramResponse{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + TotalRequests: total, + Buckets: buckets, + }, nil +} diff --git a/backend/internal/repository/ops_repo_latency_histogram_buckets.go b/backend/internal/repository/ops_repo_latency_histogram_buckets.go new file mode 100644 index 00000000..cd5bed37 --- /dev/null +++ b/backend/internal/repository/ops_repo_latency_histogram_buckets.go @@ -0,0 +1,64 @@ +package repository + +import ( + "fmt" + "strings" +) + +type latencyHistogramBucket struct { + upperMs int + label string +} + +var latencyHistogramBuckets = []latencyHistogramBucket{ + {upperMs: 100, label: "0-100ms"}, + {upperMs: 200, label: "100-200ms"}, + {upperMs: 500, label: "200-500ms"}, + {upperMs: 1000, label: "500-1000ms"}, + {upperMs: 2000, label: "1000-2000ms"}, + {upperMs: 0, label: "2000ms+"}, // default bucket +} + +var latencyHistogramOrderedRanges = func() []string { + out := make([]string, 0, len(latencyHistogramBuckets)) + for _, b := range latencyHistogramBuckets { + out = append(out, b.label) + } + return out +}() + +func latencyHistogramRangeCaseExpr(column string) string { + var sb strings.Builder + _, _ = sb.WriteString("CASE\n") + + for _, b := range latencyHistogramBuckets { + if b.upperMs <= 0 { + continue + } + _, _ = sb.WriteString(fmt.Sprintf("\tWHEN %s < %d THEN '%s'\n", column, b.upperMs, b.label)) + } + + // Default bucket. + last := latencyHistogramBuckets[len(latencyHistogramBuckets)-1] + _, _ = sb.WriteString(fmt.Sprintf("\tELSE '%s'\n", last.label)) + _, _ = sb.WriteString("END") + return sb.String() +} + +func latencyHistogramRangeOrderCaseExpr(column string) string { + var sb strings.Builder + _, _ = sb.WriteString("CASE\n") + + order := 1 + for _, b := range latencyHistogramBuckets { + if b.upperMs <= 0 { + continue + } + _, _ = sb.WriteString(fmt.Sprintf("\tWHEN %s < %d THEN %d\n", column, b.upperMs, order)) + order++ + } + + _, _ = sb.WriteString(fmt.Sprintf("\tELSE %d\n", order)) + _, _ = sb.WriteString("END") + return sb.String() +} diff --git a/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go b/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go new file mode 100644 index 00000000..dc79f6cc --- /dev/null +++ b/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go @@ -0,0 +1,14 @@ +package repository + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLatencyHistogramBuckets_AreConsistent(t *testing.T) { + require.Equal(t, len(latencyHistogramBuckets), len(latencyHistogramOrderedRanges)) + for i, b := range latencyHistogramBuckets { + require.Equal(t, b.label, latencyHistogramOrderedRanges[i]) + } +} diff --git a/backend/internal/repository/ops_repo_metrics.go b/backend/internal/repository/ops_repo_metrics.go new file mode 100644 index 00000000..bc80ed6e --- /dev/null +++ b/backend/internal/repository/ops_repo_metrics.go @@ -0,0 +1,422 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) InsertSystemMetrics(ctx context.Context, input *service.OpsInsertSystemMetricsInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + + window := input.WindowMinutes + if window <= 0 { + window = 1 + } + createdAt := input.CreatedAt + if createdAt.IsZero() { + createdAt = time.Now().UTC() + } + + q := ` +INSERT INTO ops_system_metrics ( + created_at, + window_minutes, + platform, + group_id, + + success_count, + error_count_total, + business_limited_count, + error_count_sla, + + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + + token_consumed, + qps, + tps, + + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + + cpu_usage_percent, + memory_used_mb, + memory_total_mb, + memory_usage_percent, + + db_ok, + redis_ok, + + redis_conn_total, + redis_conn_idle, + + db_conn_active, + db_conn_idle, + db_conn_waiting, + + goroutine_count, + concurrency_queue_depth +) VALUES ( + $1,$2,$3,$4, + $5,$6,$7,$8, + $9,$10,$11, + $12,$13,$14, + $15,$16,$17,$18,$19,$20, + $21,$22,$23,$24,$25,$26, + $27,$28,$29,$30, + $31,$32, + $33,$34, + $35,$36,$37, + $38,$39 +)` + + _, err := r.db.ExecContext( + ctx, + q, + createdAt, + window, + opsNullString(input.Platform), + opsNullInt64(input.GroupID), + + input.SuccessCount, + input.ErrorCountTotal, + input.BusinessLimitedCount, + input.ErrorCountSLA, + + input.UpstreamErrorCountExcl429529, + input.Upstream429Count, + input.Upstream529Count, + + input.TokenConsumed, + opsNullFloat64(input.QPS), + opsNullFloat64(input.TPS), + + opsNullInt(input.DurationP50Ms), + opsNullInt(input.DurationP90Ms), + opsNullInt(input.DurationP95Ms), + opsNullInt(input.DurationP99Ms), + opsNullFloat64(input.DurationAvgMs), + opsNullInt(input.DurationMaxMs), + + opsNullInt(input.TTFTP50Ms), + opsNullInt(input.TTFTP90Ms), + opsNullInt(input.TTFTP95Ms), + opsNullInt(input.TTFTP99Ms), + opsNullFloat64(input.TTFTAvgMs), + opsNullInt(input.TTFTMaxMs), + + opsNullFloat64(input.CPUUsagePercent), + opsNullInt(input.MemoryUsedMB), + opsNullInt(input.MemoryTotalMB), + opsNullFloat64(input.MemoryUsagePercent), + + opsNullBool(input.DBOK), + opsNullBool(input.RedisOK), + + opsNullInt(input.RedisConnTotal), + opsNullInt(input.RedisConnIdle), + + opsNullInt(input.DBConnActive), + opsNullInt(input.DBConnIdle), + opsNullInt(input.DBConnWaiting), + + opsNullInt(input.GoroutineCount), + opsNullInt(input.ConcurrencyQueueDepth), + ) + return err +} + +func (r *opsRepository) GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*service.OpsSystemMetricsSnapshot, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if windowMinutes <= 0 { + windowMinutes = 1 + } + + q := ` +SELECT + id, + created_at, + window_minutes, + + cpu_usage_percent, + memory_used_mb, + memory_total_mb, + memory_usage_percent, + + db_ok, + redis_ok, + + redis_conn_total, + redis_conn_idle, + + db_conn_active, + db_conn_idle, + db_conn_waiting, + + goroutine_count, + concurrency_queue_depth +FROM ops_system_metrics +WHERE window_minutes = $1 + AND platform IS NULL + AND group_id IS NULL +ORDER BY created_at DESC +LIMIT 1` + + var out service.OpsSystemMetricsSnapshot + var cpu sql.NullFloat64 + var memUsed sql.NullInt64 + var memTotal sql.NullInt64 + var memPct sql.NullFloat64 + var dbOK sql.NullBool + var redisOK sql.NullBool + var redisTotal sql.NullInt64 + var redisIdle sql.NullInt64 + var dbActive sql.NullInt64 + var dbIdle sql.NullInt64 + var dbWaiting sql.NullInt64 + var goroutines sql.NullInt64 + var queueDepth sql.NullInt64 + + if err := r.db.QueryRowContext(ctx, q, windowMinutes).Scan( + &out.ID, + &out.CreatedAt, + &out.WindowMinutes, + &cpu, + &memUsed, + &memTotal, + &memPct, + &dbOK, + &redisOK, + &redisTotal, + &redisIdle, + &dbActive, + &dbIdle, + &dbWaiting, + &goroutines, + &queueDepth, + ); err != nil { + return nil, err + } + + if cpu.Valid { + v := cpu.Float64 + out.CPUUsagePercent = &v + } + if memUsed.Valid { + v := memUsed.Int64 + out.MemoryUsedMB = &v + } + if memTotal.Valid { + v := memTotal.Int64 + out.MemoryTotalMB = &v + } + if memPct.Valid { + v := memPct.Float64 + out.MemoryUsagePercent = &v + } + if dbOK.Valid { + v := dbOK.Bool + out.DBOK = &v + } + if redisOK.Valid { + v := redisOK.Bool + out.RedisOK = &v + } + if redisTotal.Valid { + v := int(redisTotal.Int64) + out.RedisConnTotal = &v + } + if redisIdle.Valid { + v := int(redisIdle.Int64) + out.RedisConnIdle = &v + } + if dbActive.Valid { + v := int(dbActive.Int64) + out.DBConnActive = &v + } + if dbIdle.Valid { + v := int(dbIdle.Int64) + out.DBConnIdle = &v + } + if dbWaiting.Valid { + v := int(dbWaiting.Int64) + out.DBConnWaiting = &v + } + if goroutines.Valid { + v := int(goroutines.Int64) + out.GoroutineCount = &v + } + if queueDepth.Valid { + v := int(queueDepth.Int64) + out.ConcurrencyQueueDepth = &v + } + + return &out, nil +} + +func (r *opsRepository) UpsertJobHeartbeat(ctx context.Context, input *service.OpsUpsertJobHeartbeatInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + if input.JobName == "" { + return fmt.Errorf("job_name required") + } + + q := ` +INSERT INTO ops_job_heartbeats ( + job_name, + last_run_at, + last_success_at, + last_error_at, + last_error, + last_duration_ms, + updated_at +) VALUES ( + $1,$2,$3,$4,$5,$6,NOW() +) +ON CONFLICT (job_name) DO UPDATE SET + last_run_at = COALESCE(EXCLUDED.last_run_at, ops_job_heartbeats.last_run_at), + last_success_at = COALESCE(EXCLUDED.last_success_at, ops_job_heartbeats.last_success_at), + last_error_at = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN NULL + ELSE COALESCE(EXCLUDED.last_error_at, ops_job_heartbeats.last_error_at) + END, + last_error = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN NULL + ELSE COALESCE(EXCLUDED.last_error, ops_job_heartbeats.last_error) + END, + last_duration_ms = COALESCE(EXCLUDED.last_duration_ms, ops_job_heartbeats.last_duration_ms), + updated_at = NOW()` + + _, err := r.db.ExecContext( + ctx, + q, + input.JobName, + opsNullTime(input.LastRunAt), + opsNullTime(input.LastSuccessAt), + opsNullTime(input.LastErrorAt), + opsNullString(input.LastError), + opsNullInt(input.LastDurationMs), + ) + return err +} + +func (r *opsRepository) ListJobHeartbeats(ctx context.Context) ([]*service.OpsJobHeartbeat, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + + q := ` +SELECT + job_name, + last_run_at, + last_success_at, + last_error_at, + last_error, + last_duration_ms, + updated_at +FROM ops_job_heartbeats +ORDER BY job_name ASC` + + rows, err := r.db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsJobHeartbeat, 0, 8) + for rows.Next() { + var item service.OpsJobHeartbeat + var lastRun sql.NullTime + var lastSuccess sql.NullTime + var lastErrorAt sql.NullTime + var lastError sql.NullString + var lastDuration sql.NullInt64 + + if err := rows.Scan( + &item.JobName, + &lastRun, + &lastSuccess, + &lastErrorAt, + &lastError, + &lastDuration, + &item.UpdatedAt, + ); err != nil { + return nil, err + } + + if lastRun.Valid { + v := lastRun.Time + item.LastRunAt = &v + } + if lastSuccess.Valid { + v := lastSuccess.Time + item.LastSuccessAt = &v + } + if lastErrorAt.Valid { + v := lastErrorAt.Time + item.LastErrorAt = &v + } + if lastError.Valid { + v := lastError.String + item.LastError = &v + } + if lastDuration.Valid { + v := lastDuration.Int64 + item.LastDurationMs = &v + } + + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func opsNullBool(v *bool) any { + if v == nil { + return sql.NullBool{} + } + return sql.NullBool{Bool: *v, Valid: true} +} + +func opsNullFloat64(v *float64) any { + if v == nil { + return sql.NullFloat64{} + } + return sql.NullFloat64{Float64: *v, Valid: true} +} + +func opsNullTime(v *time.Time) any { + if v == nil || v.IsZero() { + return sql.NullTime{} + } + return sql.NullTime{Time: *v, Valid: true} +} diff --git a/backend/internal/repository/ops_repo_preagg.go b/backend/internal/repository/ops_repo_preagg.go new file mode 100644 index 00000000..ad94e13f --- /dev/null +++ b/backend/internal/repository/ops_repo_preagg.go @@ -0,0 +1,363 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "time" +) + +func (r *opsRepository) UpsertHourlyMetrics(ctx context.Context, startTime, endTime time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if startTime.IsZero() || endTime.IsZero() || !endTime.After(startTime) { + return nil + } + + start := startTime.UTC() + end := endTime.UTC() + + // NOTE: + // - We aggregate usage_logs + ops_error_logs into ops_metrics_hourly. + // - We emit three dimension granularities via GROUPING SETS: + // 1) overall: (bucket_start) + // 2) platform: (bucket_start, platform) + // 3) group: (bucket_start, platform, group_id) + // + // IMPORTANT: Postgres UNIQUE treats NULLs as distinct, so the table uses a COALESCE-based + // unique index; our ON CONFLICT target must match that expression set. + q := ` +WITH usage_base AS ( + SELECT + date_trunc('hour', ul.created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start, + g.platform AS platform, + ul.group_id AS group_id, + ul.duration_ms AS duration_ms, + ul.first_token_ms AS first_token_ms, + (ul.input_tokens + ul.output_tokens + ul.cache_creation_tokens + ul.cache_read_tokens) AS tokens + FROM usage_logs ul + JOIN groups g ON g.id = ul.group_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 +), +usage_agg AS ( + SELECT + bucket_start, + CASE WHEN GROUPING(platform) = 1 THEN NULL ELSE platform END AS platform, + CASE WHEN GROUPING(group_id) = 1 THEN NULL ELSE group_id END AS group_id, + COUNT(*) AS success_count, + COALESCE(SUM(tokens), 0) AS token_consumed, + + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p50_ms, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p90_ms, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p95_ms, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p99_ms, + AVG(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_avg_ms, + MAX(duration_ms) AS duration_max_ms, + + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p50_ms, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p90_ms, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p95_ms, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p99_ms, + AVG(first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_avg_ms, + MAX(first_token_ms) AS ttft_max_ms + FROM usage_base + GROUP BY GROUPING SETS ( + (bucket_start), + (bucket_start, platform), + (bucket_start, platform, group_id) + ) +), +error_base AS ( + SELECT + date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start, + -- platform is NULL for some early-phase errors (e.g. before routing); map to a sentinel + -- value so platform-level GROUPING SETS don't collide with the overall (platform=NULL) row. + COALESCE(platform, 'unknown') AS platform, + group_id AS group_id, + is_business_limited AS is_business_limited, + error_owner AS error_owner, + status_code AS client_status_code, + COALESCE(upstream_status_code, status_code, 0) AS effective_status_code + FROM ops_error_logs + -- Exclude count_tokens requests from error metrics as they are informational probes + WHERE created_at >= $1 AND created_at < $2 + AND is_count_tokens = FALSE +), +error_agg AS ( + SELECT + bucket_start, + CASE WHEN GROUPING(platform) = 1 THEN NULL ELSE platform END AS platform, + CASE WHEN GROUPING(group_id) = 1 THEN NULL ELSE group_id END AS group_id, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400) AS error_count_total, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400 AND is_business_limited) AS business_limited_count, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400 AND NOT is_business_limited) AS error_count_sla, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) NOT IN (429, 529)) AS upstream_error_count_excl_429_529, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) = 429) AS upstream_429_count, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) = 529) AS upstream_529_count + FROM error_base + GROUP BY GROUPING SETS ( + (bucket_start), + (bucket_start, platform), + (bucket_start, platform, group_id) + ) + HAVING GROUPING(group_id) = 1 OR group_id IS NOT NULL +), +combined AS ( + SELECT + COALESCE(u.bucket_start, e.bucket_start) AS bucket_start, + COALESCE(u.platform, e.platform) AS platform, + COALESCE(u.group_id, e.group_id) AS group_id, + + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count_total, 0) AS error_count_total, + COALESCE(e.business_limited_count, 0) AS business_limited_count, + COALESCE(e.error_count_sla, 0) AS error_count_sla, + COALESCE(e.upstream_error_count_excl_429_529, 0) AS upstream_error_count_excl_429_529, + COALESCE(e.upstream_429_count, 0) AS upstream_429_count, + COALESCE(e.upstream_529_count, 0) AS upstream_529_count, + + COALESCE(u.token_consumed, 0) AS token_consumed, + + u.duration_p50_ms, + u.duration_p90_ms, + u.duration_p95_ms, + u.duration_p99_ms, + u.duration_avg_ms, + u.duration_max_ms, + + u.ttft_p50_ms, + u.ttft_p90_ms, + u.ttft_p95_ms, + u.ttft_p99_ms, + u.ttft_avg_ms, + u.ttft_max_ms + FROM usage_agg u + FULL OUTER JOIN error_agg e + ON u.bucket_start = e.bucket_start + AND COALESCE(u.platform, '') = COALESCE(e.platform, '') + AND COALESCE(u.group_id, 0) = COALESCE(e.group_id, 0) +) +INSERT INTO ops_metrics_hourly ( + bucket_start, + platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + computed_at +) +SELECT + bucket_start, + NULLIF(platform, '') AS platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms::int, + duration_p90_ms::int, + duration_p95_ms::int, + duration_p99_ms::int, + duration_avg_ms, + duration_max_ms::int, + ttft_p50_ms::int, + ttft_p90_ms::int, + ttft_p95_ms::int, + ttft_p99_ms::int, + ttft_avg_ms, + ttft_max_ms::int, + NOW() +FROM combined +WHERE bucket_start IS NOT NULL + AND (platform IS NULL OR platform <> '') +ON CONFLICT (bucket_start, COALESCE(platform, ''), COALESCE(group_id, 0)) DO UPDATE SET + success_count = EXCLUDED.success_count, + error_count_total = EXCLUDED.error_count_total, + business_limited_count = EXCLUDED.business_limited_count, + error_count_sla = EXCLUDED.error_count_sla, + upstream_error_count_excl_429_529 = EXCLUDED.upstream_error_count_excl_429_529, + upstream_429_count = EXCLUDED.upstream_429_count, + upstream_529_count = EXCLUDED.upstream_529_count, + token_consumed = EXCLUDED.token_consumed, + + duration_p50_ms = EXCLUDED.duration_p50_ms, + duration_p90_ms = EXCLUDED.duration_p90_ms, + duration_p95_ms = EXCLUDED.duration_p95_ms, + duration_p99_ms = EXCLUDED.duration_p99_ms, + duration_avg_ms = EXCLUDED.duration_avg_ms, + duration_max_ms = EXCLUDED.duration_max_ms, + + ttft_p50_ms = EXCLUDED.ttft_p50_ms, + ttft_p90_ms = EXCLUDED.ttft_p90_ms, + ttft_p95_ms = EXCLUDED.ttft_p95_ms, + ttft_p99_ms = EXCLUDED.ttft_p99_ms, + ttft_avg_ms = EXCLUDED.ttft_avg_ms, + ttft_max_ms = EXCLUDED.ttft_max_ms, + + computed_at = NOW() +` + + _, err := r.db.ExecContext(ctx, q, start, end) + return err +} + +func (r *opsRepository) UpsertDailyMetrics(ctx context.Context, startTime, endTime time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if startTime.IsZero() || endTime.IsZero() || !endTime.After(startTime) { + return nil + } + + start := startTime.UTC() + end := endTime.UTC() + + q := ` +INSERT INTO ops_metrics_daily ( + bucket_date, + platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + computed_at +) +SELECT + (bucket_start AT TIME ZONE 'UTC')::date AS bucket_date, + platform, + group_id, + + COALESCE(SUM(success_count), 0) AS success_count, + COALESCE(SUM(error_count_total), 0) AS error_count_total, + COALESCE(SUM(business_limited_count), 0) AS business_limited_count, + COALESCE(SUM(error_count_sla), 0) AS error_count_sla, + COALESCE(SUM(upstream_error_count_excl_429_529), 0) AS upstream_error_count_excl_429_529, + COALESCE(SUM(upstream_429_count), 0) AS upstream_429_count, + COALESCE(SUM(upstream_529_count), 0) AS upstream_529_count, + COALESCE(SUM(token_consumed), 0) AS token_consumed, + + -- Approximation: weighted average for p50/p90, max for p95/p99 (conservative tail). + ROUND(SUM(duration_p50_ms::double precision * success_count) FILTER (WHERE duration_p50_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_p50_ms IS NOT NULL), 0))::int AS duration_p50_ms, + ROUND(SUM(duration_p90_ms::double precision * success_count) FILTER (WHERE duration_p90_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_p90_ms IS NOT NULL), 0))::int AS duration_p90_ms, + MAX(duration_p95_ms) AS duration_p95_ms, + MAX(duration_p99_ms) AS duration_p99_ms, + SUM(duration_avg_ms * success_count) FILTER (WHERE duration_avg_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_avg_ms IS NOT NULL), 0) AS duration_avg_ms, + MAX(duration_max_ms) AS duration_max_ms, + + ROUND(SUM(ttft_p50_ms::double precision * success_count) FILTER (WHERE ttft_p50_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_p50_ms IS NOT NULL), 0))::int AS ttft_p50_ms, + ROUND(SUM(ttft_p90_ms::double precision * success_count) FILTER (WHERE ttft_p90_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_p90_ms IS NOT NULL), 0))::int AS ttft_p90_ms, + MAX(ttft_p95_ms) AS ttft_p95_ms, + MAX(ttft_p99_ms) AS ttft_p99_ms, + SUM(ttft_avg_ms * success_count) FILTER (WHERE ttft_avg_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_avg_ms IS NOT NULL), 0) AS ttft_avg_ms, + MAX(ttft_max_ms) AS ttft_max_ms, + + NOW() +FROM ops_metrics_hourly +WHERE bucket_start >= $1 AND bucket_start < $2 +GROUP BY 1, 2, 3 +ON CONFLICT (bucket_date, COALESCE(platform, ''), COALESCE(group_id, 0)) DO UPDATE SET + success_count = EXCLUDED.success_count, + error_count_total = EXCLUDED.error_count_total, + business_limited_count = EXCLUDED.business_limited_count, + error_count_sla = EXCLUDED.error_count_sla, + upstream_error_count_excl_429_529 = EXCLUDED.upstream_error_count_excl_429_529, + upstream_429_count = EXCLUDED.upstream_429_count, + upstream_529_count = EXCLUDED.upstream_529_count, + token_consumed = EXCLUDED.token_consumed, + + duration_p50_ms = EXCLUDED.duration_p50_ms, + duration_p90_ms = EXCLUDED.duration_p90_ms, + duration_p95_ms = EXCLUDED.duration_p95_ms, + duration_p99_ms = EXCLUDED.duration_p99_ms, + duration_avg_ms = EXCLUDED.duration_avg_ms, + duration_max_ms = EXCLUDED.duration_max_ms, + + ttft_p50_ms = EXCLUDED.ttft_p50_ms, + ttft_p90_ms = EXCLUDED.ttft_p90_ms, + ttft_p95_ms = EXCLUDED.ttft_p95_ms, + ttft_p99_ms = EXCLUDED.ttft_p99_ms, + ttft_avg_ms = EXCLUDED.ttft_avg_ms, + ttft_max_ms = EXCLUDED.ttft_max_ms, + + computed_at = NOW() +` + + _, err := r.db.ExecContext(ctx, q, start, end) + return err +} + +func (r *opsRepository) GetLatestHourlyBucketStart(ctx context.Context) (time.Time, bool, error) { + if r == nil || r.db == nil { + return time.Time{}, false, fmt.Errorf("nil ops repository") + } + + var value sql.NullTime + if err := r.db.QueryRowContext(ctx, `SELECT MAX(bucket_start) FROM ops_metrics_hourly`).Scan(&value); err != nil { + return time.Time{}, false, err + } + if !value.Valid { + return time.Time{}, false, nil + } + return value.Time.UTC(), true, nil +} + +func (r *opsRepository) GetLatestDailyBucketDate(ctx context.Context) (time.Time, bool, error) { + if r == nil || r.db == nil { + return time.Time{}, false, fmt.Errorf("nil ops repository") + } + + var value sql.NullTime + if err := r.db.QueryRowContext(ctx, `SELECT MAX(bucket_date) FROM ops_metrics_daily`).Scan(&value); err != nil { + return time.Time{}, false, err + } + if !value.Valid { + return time.Time{}, false, nil + } + t := value.Time.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC), true, nil +} diff --git a/backend/internal/repository/ops_repo_realtime_traffic.go b/backend/internal/repository/ops_repo_realtime_traffic.go new file mode 100644 index 00000000..a9b0b929 --- /dev/null +++ b/backend/internal/repository/ops_repo_realtime_traffic.go @@ -0,0 +1,129 @@ +package repository + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetRealtimeTrafficSummary(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsRealtimeTrafficSummary, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + if start.After(end) { + return nil, fmt.Errorf("start_time must be <= end_time") + } + + window := end.Sub(start) + if window <= 0 { + return nil, fmt.Errorf("invalid time window") + } + if window > time.Hour { + return nil, fmt.Errorf("window too large") + } + + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + q := ` +WITH usage_buckets AS ( + SELECT + date_trunc('minute', ul.created_at) AS bucket, + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_sum + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT + date_trunc('minute', created_at) AS bucket, + COALESCE(COUNT(*), 0) AS error_count + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT + COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(u.token_sum, 0) AS token_sum, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.success_count, 0) + COALESCE(e.error_count, 0) AS request_total + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT + COALESCE(SUM(success_count), 0) AS success_total, + COALESCE(SUM(error_count), 0) AS error_total, + COALESCE(SUM(token_sum), 0) AS token_total, + COALESCE(MAX(request_total), 0) AS peak_requests_per_min, + COALESCE(MAX(token_sum), 0) AS peak_tokens_per_min +FROM combined` + + args := append(usageArgs, errorArgs...) + var successCount int64 + var errorTotal int64 + var tokenConsumed int64 + var peakRequestsPerMin int64 + var peakTokensPerMin int64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan( + &successCount, + &errorTotal, + &tokenConsumed, + &peakRequestsPerMin, + &peakTokensPerMin, + ); err != nil { + return nil, err + } + + windowSeconds := window.Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + // Keep "current" consistent with the dashboard overview semantics: last 1 minute. + // This remains "within the selected window" since end=start+window. + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + qpsPeak := roundTo1DP(float64(peakRequestsPerMin) / 60.0) + tpsPeak := roundTo1DP(float64(peakTokensPerMin) / 60.0) + + return &service.OpsRealtimeTrafficSummary{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + }, nil +} diff --git a/backend/internal/repository/ops_repo_request_details.go b/backend/internal/repository/ops_repo_request_details.go new file mode 100644 index 00000000..d8d5d111 --- /dev/null +++ b/backend/internal/repository/ops_repo_request_details.go @@ -0,0 +1,286 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) ListRequestDetails(ctx context.Context, filter *service.OpsRequestDetailFilter) ([]*service.OpsRequestDetail, int64, error) { + if r == nil || r.db == nil { + return nil, 0, fmt.Errorf("nil ops repository") + } + + page, pageSize, startTime, endTime := filter.Normalize() + offset := (page - 1) * pageSize + + conditions := make([]string, 0, 16) + args := make([]any, 0, 24) + + // Placeholders $1/$2 reserved for time window inside the CTE. + args = append(args, startTime.UTC(), endTime.UTC()) + + addCondition := func(condition string, values ...any) { + conditions = append(conditions, condition) + args = append(args, values...) + } + + if filter != nil { + if kind := strings.TrimSpace(strings.ToLower(filter.Kind)); kind != "" && kind != "all" { + if kind != string(service.OpsRequestKindSuccess) && kind != string(service.OpsRequestKindError) { + return nil, 0, fmt.Errorf("invalid kind") + } + addCondition(fmt.Sprintf("kind = $%d", len(args)+1), kind) + } + + if platform := strings.TrimSpace(strings.ToLower(filter.Platform)); platform != "" { + addCondition(fmt.Sprintf("platform = $%d", len(args)+1), platform) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + addCondition(fmt.Sprintf("group_id = $%d", len(args)+1), *filter.GroupID) + } + + if filter.UserID != nil && *filter.UserID > 0 { + addCondition(fmt.Sprintf("user_id = $%d", len(args)+1), *filter.UserID) + } + if filter.APIKeyID != nil && *filter.APIKeyID > 0 { + addCondition(fmt.Sprintf("api_key_id = $%d", len(args)+1), *filter.APIKeyID) + } + if filter.AccountID != nil && *filter.AccountID > 0 { + addCondition(fmt.Sprintf("account_id = $%d", len(args)+1), *filter.AccountID) + } + + if model := strings.TrimSpace(filter.Model); model != "" { + addCondition(fmt.Sprintf("model = $%d", len(args)+1), model) + } + if requestID := strings.TrimSpace(filter.RequestID); requestID != "" { + addCondition(fmt.Sprintf("request_id = $%d", len(args)+1), requestID) + } + if q := strings.TrimSpace(filter.Query); q != "" { + like := "%" + strings.ToLower(q) + "%" + startIdx := len(args) + 1 + addCondition( + fmt.Sprintf("(LOWER(COALESCE(request_id,'')) LIKE $%d OR LOWER(COALESCE(model,'')) LIKE $%d OR LOWER(COALESCE(message,'')) LIKE $%d)", + startIdx, startIdx+1, startIdx+2, + ), + like, like, like, + ) + } + + if filter.MinDurationMs != nil { + addCondition(fmt.Sprintf("duration_ms >= $%d", len(args)+1), *filter.MinDurationMs) + } + if filter.MaxDurationMs != nil { + addCondition(fmt.Sprintf("duration_ms <= $%d", len(args)+1), *filter.MaxDurationMs) + } + } + + where := "" + if len(conditions) > 0 { + where = "WHERE " + strings.Join(conditions, " AND ") + } + + cte := ` +WITH combined AS ( + SELECT + 'success'::TEXT AS kind, + ul.created_at AS created_at, + ul.request_id AS request_id, + COALESCE(NULLIF(g.platform, ''), NULLIF(a.platform, ''), '') AS platform, + ul.model AS model, + ul.duration_ms AS duration_ms, + NULL::INT AS status_code, + NULL::BIGINT AS error_id, + NULL::TEXT AS phase, + NULL::TEXT AS severity, + NULL::TEXT AS message, + ul.user_id AS user_id, + ul.api_key_id AS api_key_id, + ul.account_id AS account_id, + ul.group_id AS group_id, + ul.stream AS stream + FROM usage_logs ul + LEFT JOIN groups g ON g.id = ul.group_id + LEFT JOIN accounts a ON a.id = ul.account_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + + UNION ALL + + SELECT + 'error'::TEXT AS kind, + o.created_at AS created_at, + COALESCE(NULLIF(o.request_id,''), NULLIF(o.client_request_id,''), '') AS request_id, + COALESCE(NULLIF(o.platform, ''), NULLIF(g.platform, ''), NULLIF(a.platform, ''), '') AS platform, + o.model AS model, + o.duration_ms AS duration_ms, + o.status_code AS status_code, + o.id AS error_id, + o.error_phase AS phase, + o.severity AS severity, + o.error_message AS message, + o.user_id AS user_id, + o.api_key_id AS api_key_id, + o.account_id AS account_id, + o.group_id AS group_id, + o.stream AS stream + FROM ops_error_logs o + LEFT JOIN groups g ON g.id = o.group_id + LEFT JOIN accounts a ON a.id = o.account_id + WHERE o.created_at >= $1 AND o.created_at < $2 + AND COALESCE(o.status_code, 0) >= 400 +) +` + + countQuery := fmt.Sprintf(`%s SELECT COUNT(1) FROM combined %s`, cte, where) + var total int64 + if err := r.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil { + if err == sql.ErrNoRows { + total = 0 + } else { + return nil, 0, err + } + } + + sort := "ORDER BY created_at DESC" + if filter != nil { + switch strings.TrimSpace(strings.ToLower(filter.Sort)) { + case "", "created_at_desc": + // default + case "duration_desc": + sort = "ORDER BY duration_ms DESC NULLS LAST, created_at DESC" + default: + return nil, 0, fmt.Errorf("invalid sort") + } + } + + listQuery := fmt.Sprintf(` +%s +SELECT + kind, + created_at, + request_id, + platform, + model, + duration_ms, + status_code, + error_id, + phase, + severity, + message, + user_id, + api_key_id, + account_id, + group_id, + stream +FROM combined +%s +%s +LIMIT $%d OFFSET $%d +`, cte, where, sort, len(args)+1, len(args)+2) + + listArgs := append(append([]any{}, args...), pageSize, offset) + rows, err := r.db.QueryContext(ctx, listQuery, listArgs...) + if err != nil { + return nil, 0, err + } + defer func() { _ = rows.Close() }() + + toIntPtr := func(v sql.NullInt64) *int { + if !v.Valid { + return nil + } + i := int(v.Int64) + return &i + } + toInt64Ptr := func(v sql.NullInt64) *int64 { + if !v.Valid { + return nil + } + i := v.Int64 + return &i + } + + out := make([]*service.OpsRequestDetail, 0, pageSize) + for rows.Next() { + var ( + kind string + createdAt time.Time + requestID sql.NullString + platform sql.NullString + model sql.NullString + + durationMs sql.NullInt64 + statusCode sql.NullInt64 + errorID sql.NullInt64 + + phase sql.NullString + severity sql.NullString + message sql.NullString + + userID sql.NullInt64 + apiKeyID sql.NullInt64 + accountID sql.NullInt64 + groupID sql.NullInt64 + + stream bool + ) + + if err := rows.Scan( + &kind, + &createdAt, + &requestID, + &platform, + &model, + &durationMs, + &statusCode, + &errorID, + &phase, + &severity, + &message, + &userID, + &apiKeyID, + &accountID, + &groupID, + &stream, + ); err != nil { + return nil, 0, err + } + + item := &service.OpsRequestDetail{ + Kind: service.OpsRequestKind(kind), + CreatedAt: createdAt, + RequestID: strings.TrimSpace(requestID.String), + Platform: strings.TrimSpace(platform.String), + Model: strings.TrimSpace(model.String), + + DurationMs: toIntPtr(durationMs), + StatusCode: toIntPtr(statusCode), + ErrorID: toInt64Ptr(errorID), + Phase: phase.String, + Severity: severity.String, + Message: message.String, + + UserID: toInt64Ptr(userID), + APIKeyID: toInt64Ptr(apiKeyID), + AccountID: toInt64Ptr(accountID), + GroupID: toInt64Ptr(groupID), + + Stream: stream, + } + + if item.Platform == "" { + item.Platform = "unknown" + } + + out = append(out, item) + } + if err := rows.Err(); err != nil { + return nil, 0, err + } + + return out, total, nil +} diff --git a/backend/internal/repository/ops_repo_trends.go b/backend/internal/repository/ops_repo_trends.go new file mode 100644 index 00000000..022d1187 --- /dev/null +++ b/backend/internal/repository/ops_repo_trends.go @@ -0,0 +1,573 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetThroughputTrend(ctx context.Context, filter *service.OpsDashboardFilter, bucketSeconds int) (*service.OpsThroughputTrendResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if bucketSeconds != 60 && bucketSeconds != 300 && bucketSeconds != 3600 { + // Keep a small, predictable set of supported buckets for now. + bucketSeconds = 60 + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + usageBucketExpr := opsBucketExprForUsage(bucketSeconds) + errorBucketExpr := opsBucketExprForError(bucketSeconds) + + q := ` +WITH usage_buckets AS ( + SELECT ` + usageBucketExpr + ` AS bucket, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT ` + errorBucketExpr + ` AS bucket, + COUNT(*) AS error_count + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT + bucket, + (success_count + error_count) AS request_count, + token_consumed +FROM combined +ORDER BY bucket ASC` + + args := append(usageArgs, errorArgs...) + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + points := make([]*service.OpsThroughputTrendPoint, 0, 256) + for rows.Next() { + var bucket time.Time + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&bucket, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + + denom := float64(bucketSeconds) + if denom <= 0 { + denom = 60 + } + qps := roundTo1DP(float64(requests) / denom) + tps := roundTo1DP(float64(tokenConsumed) / denom) + + points = append(points, &service.OpsThroughputTrendPoint{ + BucketStart: bucket.UTC(), + RequestCount: requests, + TokenConsumed: tokenConsumed, + QPS: qps, + TPS: tps, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + // Fill missing buckets with zeros so charts render continuous timelines. + points = fillOpsThroughputBuckets(start, end, bucketSeconds, points) + + var byPlatform []*service.OpsThroughputPlatformBreakdownItem + var topGroups []*service.OpsThroughputGroupBreakdownItem + + platform := "" + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + } + groupID := (*int64)(nil) + if filter != nil { + groupID = filter.GroupID + } + + // Drilldown helpers: + // - No platform/group: totals by platform + // - Platform selected but no group: top groups in that platform + if platform == "" && (groupID == nil || *groupID <= 0) { + items, err := r.getThroughputBreakdownByPlatform(ctx, start, end) + if err != nil { + return nil, err + } + byPlatform = items + } else if platform != "" && (groupID == nil || *groupID <= 0) { + items, err := r.getThroughputTopGroupsByPlatform(ctx, start, end, platform, 10) + if err != nil { + return nil, err + } + topGroups = items + } + + return &service.OpsThroughputTrendResponse{ + Bucket: opsBucketLabel(bucketSeconds), + Points: points, + + ByPlatform: byPlatform, + TopGroups: topGroups, + }, nil +} + +func (r *opsRepository) getThroughputBreakdownByPlatform(ctx context.Context, start, end time.Time) ([]*service.OpsThroughputPlatformBreakdownItem, error) { + q := ` +WITH usage_totals AS ( + SELECT COALESCE(NULLIF(g.platform,''), a.platform) AS platform, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + LEFT JOIN groups g ON g.id = ul.group_id + LEFT JOIN accounts a ON a.id = ul.account_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + GROUP BY 1 +), +error_totals AS ( + SELECT platform, + COUNT(*) AS error_count + FROM ops_error_logs + WHERE created_at >= $1 AND created_at < $2 + AND COALESCE(status_code, 0) >= 400 + AND is_count_tokens = FALSE -- 排除 count_tokens 请求的错误 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.platform, e.platform) AS platform, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_totals u + FULL OUTER JOIN error_totals e ON u.platform = e.platform +) +SELECT platform, (success_count + error_count) AS request_count, token_consumed +FROM combined +WHERE platform IS NOT NULL AND platform <> '' +ORDER BY request_count DESC` + + rows, err := r.db.QueryContext(ctx, q, start, end) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsThroughputPlatformBreakdownItem, 0, 8) + for rows.Next() { + var platform string + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&platform, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + items = append(items, &service.OpsThroughputPlatformBreakdownItem{ + Platform: platform, + RequestCount: requests, + TokenConsumed: tokenConsumed, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func (r *opsRepository) getThroughputTopGroupsByPlatform(ctx context.Context, start, end time.Time, platform string, limit int) ([]*service.OpsThroughputGroupBreakdownItem, error) { + if strings.TrimSpace(platform) == "" { + return nil, nil + } + if limit <= 0 || limit > 100 { + limit = 10 + } + + q := ` +WITH usage_totals AS ( + SELECT ul.group_id AS group_id, + g.name AS group_name, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + JOIN groups g ON g.id = ul.group_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + AND g.platform = $3 + GROUP BY 1, 2 +), +error_totals AS ( + SELECT group_id, + COUNT(*) AS error_count + FROM ops_error_logs + WHERE created_at >= $1 AND created_at < $2 + AND platform = $3 + AND group_id IS NOT NULL + AND COALESCE(status_code, 0) >= 400 + AND is_count_tokens = FALSE -- 排除 count_tokens 请求的错误 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.group_id, e.group_id) AS group_id, + COALESCE(u.group_name, g2.name, '') AS group_name, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_totals u + FULL OUTER JOIN error_totals e ON u.group_id = e.group_id + LEFT JOIN groups g2 ON g2.id = COALESCE(u.group_id, e.group_id) +) +SELECT group_id, group_name, (success_count + error_count) AS request_count, token_consumed +FROM combined +WHERE group_id IS NOT NULL +ORDER BY request_count DESC +LIMIT $4` + + rows, err := r.db.QueryContext(ctx, q, start, end, platform, limit) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsThroughputGroupBreakdownItem, 0, limit) + for rows.Next() { + var groupID int64 + var groupName sql.NullString + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&groupID, &groupName, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + name := "" + if groupName.Valid { + name = groupName.String + } + items = append(items, &service.OpsThroughputGroupBreakdownItem{ + GroupID: groupID, + GroupName: name, + RequestCount: requests, + TokenConsumed: tokenConsumed, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func opsBucketExprForUsage(bucketSeconds int) string { + switch bucketSeconds { + case 3600: + return "date_trunc('hour', ul.created_at)" + case 300: + // 5-minute buckets in UTC. + return "to_timestamp(floor(extract(epoch from ul.created_at) / 300) * 300)" + default: + return "date_trunc('minute', ul.created_at)" + } +} + +func opsBucketExprForError(bucketSeconds int) string { + switch bucketSeconds { + case 3600: + return "date_trunc('hour', created_at)" + case 300: + return "to_timestamp(floor(extract(epoch from created_at) / 300) * 300)" + default: + return "date_trunc('minute', created_at)" + } +} + +func opsBucketLabel(bucketSeconds int) string { + if bucketSeconds <= 0 { + return "1m" + } + if bucketSeconds%3600 == 0 { + h := bucketSeconds / 3600 + if h <= 0 { + h = 1 + } + return fmt.Sprintf("%dh", h) + } + m := bucketSeconds / 60 + if m <= 0 { + m = 1 + } + return fmt.Sprintf("%dm", m) +} + +func opsFloorToBucketStart(t time.Time, bucketSeconds int) time.Time { + t = t.UTC() + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + secs := t.Unix() + floored := secs - (secs % int64(bucketSeconds)) + return time.Unix(floored, 0).UTC() +} + +func fillOpsThroughputBuckets(start, end time.Time, bucketSeconds int, points []*service.OpsThroughputTrendPoint) []*service.OpsThroughputTrendPoint { + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if !start.Before(end) { + return points + } + + endMinus := end.Add(-time.Nanosecond) + if endMinus.Before(start) { + return points + } + + first := opsFloorToBucketStart(start, bucketSeconds) + last := opsFloorToBucketStart(endMinus, bucketSeconds) + step := time.Duration(bucketSeconds) * time.Second + + existing := make(map[int64]*service.OpsThroughputTrendPoint, len(points)) + for _, p := range points { + if p == nil { + continue + } + existing[p.BucketStart.UTC().Unix()] = p + } + + out := make([]*service.OpsThroughputTrendPoint, 0, int(last.Sub(first)/step)+1) + for cursor := first; !cursor.After(last); cursor = cursor.Add(step) { + if p, ok := existing[cursor.Unix()]; ok && p != nil { + out = append(out, p) + continue + } + out = append(out, &service.OpsThroughputTrendPoint{ + BucketStart: cursor, + RequestCount: 0, + TokenConsumed: 0, + QPS: 0, + TPS: 0, + }) + } + return out +} + +func (r *opsRepository) GetErrorTrend(ctx context.Context, filter *service.OpsDashboardFilter, bucketSeconds int) (*service.OpsErrorTrendResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if bucketSeconds != 60 && bucketSeconds != 300 && bucketSeconds != 3600 { + bucketSeconds = 60 + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + where, args, _ := buildErrorWhere(filter, start, end, 1) + bucketExpr := opsBucketExprForError(bucketSeconds) + + q := ` +SELECT + ` + bucketExpr + ` AS bucket, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400) AS error_total, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited) AS business_limited, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited) AS error_sla, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)) AS upstream_excl, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429) AS upstream_429, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529) AS upstream_529 +FROM ops_error_logs +` + where + ` +GROUP BY 1 +ORDER BY 1 ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + points := make([]*service.OpsErrorTrendPoint, 0, 256) + for rows.Next() { + var bucket time.Time + var total, businessLimited, sla, upstreamExcl, upstream429, upstream529 int64 + if err := rows.Scan(&bucket, &total, &businessLimited, &sla, &upstreamExcl, &upstream429, &upstream529); err != nil { + return nil, err + } + points = append(points, &service.OpsErrorTrendPoint{ + BucketStart: bucket.UTC(), + + ErrorCountTotal: total, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: sla, + + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + points = fillOpsErrorTrendBuckets(start, end, bucketSeconds, points) + + return &service.OpsErrorTrendResponse{ + Bucket: opsBucketLabel(bucketSeconds), + Points: points, + }, nil +} + +func fillOpsErrorTrendBuckets(start, end time.Time, bucketSeconds int, points []*service.OpsErrorTrendPoint) []*service.OpsErrorTrendPoint { + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if !start.Before(end) { + return points + } + + endMinus := end.Add(-time.Nanosecond) + if endMinus.Before(start) { + return points + } + + first := opsFloorToBucketStart(start, bucketSeconds) + last := opsFloorToBucketStart(endMinus, bucketSeconds) + step := time.Duration(bucketSeconds) * time.Second + + existing := make(map[int64]*service.OpsErrorTrendPoint, len(points)) + for _, p := range points { + if p == nil { + continue + } + existing[p.BucketStart.UTC().Unix()] = p + } + + out := make([]*service.OpsErrorTrendPoint, 0, int(last.Sub(first)/step)+1) + for cursor := first; !cursor.After(last); cursor = cursor.Add(step) { + if p, ok := existing[cursor.Unix()]; ok && p != nil { + out = append(out, p) + continue + } + out = append(out, &service.OpsErrorTrendPoint{ + BucketStart: cursor, + + ErrorCountTotal: 0, + BusinessLimitedCount: 0, + ErrorCountSLA: 0, + + UpstreamErrorCountExcl429529: 0, + Upstream429Count: 0, + Upstream529Count: 0, + }) + } + return out +} + +func (r *opsRepository) GetErrorDistribution(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsErrorDistributionResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + where, args, _ := buildErrorWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(upstream_status_code, status_code, 0) AS status_code, + COUNT(*) AS total, + COUNT(*) FILTER (WHERE NOT is_business_limited) AS sla, + COUNT(*) FILTER (WHERE is_business_limited) AS business_limited +FROM ops_error_logs +` + where + ` + AND COALESCE(status_code, 0) >= 400 +GROUP BY 1 +ORDER BY total DESC +LIMIT 20` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsErrorDistributionItem, 0, 16) + var total int64 + for rows.Next() { + var statusCode int + var cntTotal, cntSLA, cntBiz int64 + if err := rows.Scan(&statusCode, &cntTotal, &cntSLA, &cntBiz); err != nil { + return nil, err + } + total += cntTotal + items = append(items, &service.OpsErrorDistributionItem{ + StatusCode: statusCode, + Total: cntTotal, + SLA: cntSLA, + BusinessLimited: cntBiz, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return &service.OpsErrorDistributionResponse{ + Total: total, + Items: items, + }, nil +} diff --git a/backend/internal/repository/ops_repo_window_stats.go b/backend/internal/repository/ops_repo_window_stats.go new file mode 100644 index 00000000..8221c473 --- /dev/null +++ b/backend/internal/repository/ops_repo_window_stats.go @@ -0,0 +1,50 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetWindowStats(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsWindowStats, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + if start.After(end) { + return nil, fmt.Errorf("start_time must be <= end_time") + } + // Bound excessively large windows to prevent accidental heavy queries. + if end.Sub(start) > 24*time.Hour { + return nil, fmt.Errorf("window too large") + } + + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, _, _, _, _, _, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + return &service.OpsWindowStats{ + StartTime: start, + EndTime: end, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + TokenConsumed: tokenConsumed, + }, nil +} diff --git a/backend/internal/repository/pagination.go b/backend/internal/repository/pagination.go new file mode 100644 index 00000000..ff08c34b --- /dev/null +++ b/backend/internal/repository/pagination.go @@ -0,0 +1,16 @@ +package repository + +import "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + +func paginationResultFromTotal(total int64, params pagination.PaginationParams) *pagination.PaginationResult { + pages := int(total) / params.Limit() + if int(total)%params.Limit() > 0 { + pages++ + } + return &pagination.PaginationResult{ + Total: total, + Page: params.Page, + PageSize: params.Limit(), + Pages: pages, + } +} diff --git a/backend/internal/repository/pricing_service.go b/backend/internal/repository/pricing_service.go new file mode 100644 index 00000000..07d796b8 --- /dev/null +++ b/backend/internal/repository/pricing_service.go @@ -0,0 +1,81 @@ +package repository + +import ( + "context" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type pricingRemoteClient struct { + httpClient *http.Client +} + +// NewPricingRemoteClient 创建定价数据远程客户端 +// proxyURL 为空时直连,支持 http/https/socks5/socks5h 协议 +func NewPricingRemoteClient(proxyURL string) service.PricingRemoteClient { + sharedClient, err := httpclient.GetClient(httpclient.Options{ + Timeout: 30 * time.Second, + ProxyURL: proxyURL, + }) + if err != nil { + sharedClient = &http.Client{Timeout: 30 * time.Second} + } + return &pricingRemoteClient{ + httpClient: sharedClient, + } +} + +func (c *pricingRemoteClient) FetchPricingJSON(ctx context.Context, url string) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP %d", resp.StatusCode) + } + + return io.ReadAll(resp.Body) +} + +func (c *pricingRemoteClient) FetchHashText(ctx context.Context, url string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return "", err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("HTTP %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + // 哈希文件格式:hash filename 或者纯 hash + hash := strings.TrimSpace(string(body)) + parts := strings.Fields(hash) + if len(parts) > 0 { + return parts[0], nil + } + return hash, nil +} diff --git a/backend/internal/repository/pricing_service_test.go b/backend/internal/repository/pricing_service_test.go new file mode 100644 index 00000000..6ea11211 --- /dev/null +++ b/backend/internal/repository/pricing_service_test.go @@ -0,0 +1,145 @@ +package repository + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type PricingServiceSuite struct { + suite.Suite + ctx context.Context + srv *httptest.Server + client *pricingRemoteClient +} + +func (s *PricingServiceSuite) SetupTest() { + s.ctx = context.Background() + client, ok := NewPricingRemoteClient("").(*pricingRemoteClient) + require.True(s.T(), ok, "type assertion failed") + s.client = client +} + +func (s *PricingServiceSuite) TearDownTest() { + if s.srv != nil { + s.srv.Close() + s.srv = nil + } +} + +func (s *PricingServiceSuite) setupServer(handler http.HandlerFunc) { + s.srv = newLocalTestServer(s.T(), handler) +} + +func (s *PricingServiceSuite) TestFetchPricingJSON_Success() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/ok" { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{"ok":true}`)) + return + } + w.WriteHeader(http.StatusInternalServerError) + })) + + body, err := s.client.FetchPricingJSON(s.ctx, s.srv.URL+"/ok") + require.NoError(s.T(), err, "FetchPricingJSON") + require.Equal(s.T(), `{"ok":true}`, string(body), "body mismatch") +} + +func (s *PricingServiceSuite) TestFetchPricingJSON_NonOKStatus() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + + _, err := s.client.FetchPricingJSON(s.ctx, s.srv.URL+"/err") + require.Error(s.T(), err, "expected error for non-200 status") +} + +func (s *PricingServiceSuite) TestFetchHashText_ParsesFields() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/hashfile": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("abc123 model_prices.json\n")) + case "/hashonly": + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("def456\n")) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + + hash, err := s.client.FetchHashText(s.ctx, s.srv.URL+"/hashfile") + require.NoError(s.T(), err, "FetchHashText") + require.Equal(s.T(), "abc123", hash, "hash mismatch") + + hash2, err := s.client.FetchHashText(s.ctx, s.srv.URL+"/hashonly") + require.NoError(s.T(), err, "FetchHashText") + require.Equal(s.T(), "def456", hash2, "hash mismatch") +} + +func (s *PricingServiceSuite) TestFetchHashText_NonOKStatus() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + + _, err := s.client.FetchHashText(s.ctx, s.srv.URL+"/nope") + require.Error(s.T(), err, "expected error for non-200 status") +} + +func (s *PricingServiceSuite) TestFetchPricingJSON_InvalidURL() { + _, err := s.client.FetchPricingJSON(s.ctx, "://invalid-url") + require.Error(s.T(), err, "expected error for invalid URL") +} + +func (s *PricingServiceSuite) TestFetchHashText_EmptyBody() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + // empty body + })) + + hash, err := s.client.FetchHashText(s.ctx, s.srv.URL+"/empty") + require.NoError(s.T(), err, "FetchHashText empty body should not error") + require.Equal(s.T(), "", hash, "expected empty hash") +} + +func (s *PricingServiceSuite) TestFetchHashText_WhitespaceOnly() { + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(" \n")) + })) + + hash, err := s.client.FetchHashText(s.ctx, s.srv.URL+"/ws") + require.NoError(s.T(), err, "FetchHashText whitespace body should not error") + require.Equal(s.T(), "", hash, "expected empty hash after trimming") +} + +func (s *PricingServiceSuite) TestFetchPricingJSON_ContextCancel() { + started := make(chan struct{}) + s.setupServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + close(started) + <-r.Context().Done() + })) + + ctx, cancel := context.WithCancel(s.ctx) + + done := make(chan error, 1) + go func() { + _, err := s.client.FetchPricingJSON(ctx, s.srv.URL+"/block") + done <- err + }() + + <-started + cancel() + + err := <-done + require.Error(s.T(), err) +} + +func TestPricingServiceSuite(t *testing.T) { + suite.Run(t, new(PricingServiceSuite)) +} diff --git a/backend/internal/repository/promo_code_repo.go b/backend/internal/repository/promo_code_repo.go new file mode 100644 index 00000000..98b422e0 --- /dev/null +++ b/backend/internal/repository/promo_code_repo.go @@ -0,0 +1,273 @@ +package repository + +import ( + "context" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type promoCodeRepository struct { + client *dbent.Client +} + +func NewPromoCodeRepository(client *dbent.Client) service.PromoCodeRepository { + return &promoCodeRepository{client: client} +} + +func (r *promoCodeRepository) Create(ctx context.Context, code *service.PromoCode) error { + client := clientFromContext(ctx, r.client) + builder := client.PromoCode.Create(). + SetCode(code.Code). + SetBonusAmount(code.BonusAmount). + SetMaxUses(code.MaxUses). + SetUsedCount(code.UsedCount). + SetStatus(code.Status). + SetNotes(code.Notes) + + if code.ExpiresAt != nil { + builder.SetExpiresAt(*code.ExpiresAt) + } + + created, err := builder.Save(ctx) + if err != nil { + return err + } + + code.ID = created.ID + code.CreatedAt = created.CreatedAt + code.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *promoCodeRepository) GetByID(ctx context.Context, id int64) (*service.PromoCode, error) { + m, err := r.client.PromoCode.Query(). + Where(promocode.IDEQ(id)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) GetByCode(ctx context.Context, code string) (*service.PromoCode, error) { + m, err := r.client.PromoCode.Query(). + Where(promocode.CodeEqualFold(code)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) GetByCodeForUpdate(ctx context.Context, code string) (*service.PromoCode, error) { + client := clientFromContext(ctx, r.client) + m, err := client.PromoCode.Query(). + Where(promocode.CodeEqualFold(code)). + ForUpdate(). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) Update(ctx context.Context, code *service.PromoCode) error { + client := clientFromContext(ctx, r.client) + builder := client.PromoCode.UpdateOneID(code.ID). + SetCode(code.Code). + SetBonusAmount(code.BonusAmount). + SetMaxUses(code.MaxUses). + SetUsedCount(code.UsedCount). + SetStatus(code.Status). + SetNotes(code.Notes) + + if code.ExpiresAt != nil { + builder.SetExpiresAt(*code.ExpiresAt) + } else { + builder.ClearExpiresAt() + } + + updated, err := builder.Save(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return service.ErrPromoCodeNotFound + } + return err + } + + code.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *promoCodeRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.PromoCode.Delete().Where(promocode.IDEQ(id)).Exec(ctx) + return err +} + +func (r *promoCodeRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.PromoCode, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "") +} + +func (r *promoCodeRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, status, search string) ([]service.PromoCode, *pagination.PaginationResult, error) { + q := r.client.PromoCode.Query() + + if status != "" { + q = q.Where(promocode.StatusEQ(status)) + } + if search != "" { + q = q.Where(promocode.CodeContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + codes, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(promocode.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outCodes := promoCodeEntitiesToService(codes) + + return outCodes, paginationResultFromTotal(int64(total), params), nil +} + +func (r *promoCodeRepository) CreateUsage(ctx context.Context, usage *service.PromoCodeUsage) error { + client := clientFromContext(ctx, r.client) + created, err := client.PromoCodeUsage.Create(). + SetPromoCodeID(usage.PromoCodeID). + SetUserID(usage.UserID). + SetBonusAmount(usage.BonusAmount). + SetUsedAt(usage.UsedAt). + Save(ctx) + if err != nil { + return err + } + + usage.ID = created.ID + return nil +} + +func (r *promoCodeRepository) GetUsageByPromoCodeAndUser(ctx context.Context, promoCodeID, userID int64) (*service.PromoCodeUsage, error) { + m, err := r.client.PromoCodeUsage.Query(). + Where( + promocodeusage.PromoCodeIDEQ(promoCodeID), + promocodeusage.UserIDEQ(userID), + ). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, nil + } + return nil, err + } + return promoCodeUsageEntityToService(m), nil +} + +func (r *promoCodeRepository) ListUsagesByPromoCode(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]service.PromoCodeUsage, *pagination.PaginationResult, error) { + q := r.client.PromoCodeUsage.Query(). + Where(promocodeusage.PromoCodeIDEQ(promoCodeID)) + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + usages, err := q. + WithUser(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(promocodeusage.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outUsages := promoCodeUsageEntitiesToService(usages) + + return outUsages, paginationResultFromTotal(int64(total), params), nil +} + +func (r *promoCodeRepository) IncrementUsedCount(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.PromoCode.UpdateOneID(id). + AddUsedCount(1). + Save(ctx) + return err +} + +// Entity to Service conversions + +func promoCodeEntityToService(m *dbent.PromoCode) *service.PromoCode { + if m == nil { + return nil + } + return &service.PromoCode{ + ID: m.ID, + Code: m.Code, + BonusAmount: m.BonusAmount, + MaxUses: m.MaxUses, + UsedCount: m.UsedCount, + Status: m.Status, + ExpiresAt: m.ExpiresAt, + Notes: derefString(m.Notes), + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func promoCodeEntitiesToService(models []*dbent.PromoCode) []service.PromoCode { + out := make([]service.PromoCode, 0, len(models)) + for i := range models { + if s := promoCodeEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} + +func promoCodeUsageEntityToService(m *dbent.PromoCodeUsage) *service.PromoCodeUsage { + if m == nil { + return nil + } + out := &service.PromoCodeUsage{ + ID: m.ID, + PromoCodeID: m.PromoCodeID, + UserID: m.UserID, + BonusAmount: m.BonusAmount, + UsedAt: m.UsedAt, + } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + return out +} + +func promoCodeUsageEntitiesToService(models []*dbent.PromoCodeUsage) []service.PromoCodeUsage { + out := make([]service.PromoCodeUsage, 0, len(models)) + for i := range models { + if s := promoCodeUsageEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} diff --git a/backend/internal/repository/proxy_latency_cache.go b/backend/internal/repository/proxy_latency_cache.go new file mode 100644 index 00000000..4458b5e1 --- /dev/null +++ b/backend/internal/repository/proxy_latency_cache.go @@ -0,0 +1,74 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const proxyLatencyKeyPrefix = "proxy:latency:" + +func proxyLatencyKey(proxyID int64) string { + return fmt.Sprintf("%s%d", proxyLatencyKeyPrefix, proxyID) +} + +type proxyLatencyCache struct { + rdb *redis.Client +} + +func NewProxyLatencyCache(rdb *redis.Client) service.ProxyLatencyCache { + return &proxyLatencyCache{rdb: rdb} +} + +func (c *proxyLatencyCache) GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*service.ProxyLatencyInfo, error) { + results := make(map[int64]*service.ProxyLatencyInfo) + if len(proxyIDs) == 0 { + return results, nil + } + + keys := make([]string, 0, len(proxyIDs)) + for _, id := range proxyIDs { + keys = append(keys, proxyLatencyKey(id)) + } + + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return results, err + } + + for i, raw := range values { + if raw == nil { + continue + } + var payload []byte + switch v := raw.(type) { + case string: + payload = []byte(v) + case []byte: + payload = v + default: + continue + } + var info service.ProxyLatencyInfo + if err := json.Unmarshal(payload, &info); err != nil { + continue + } + results[proxyIDs[i]] = &info + } + + return results, nil +} + +func (c *proxyLatencyCache) SetProxyLatency(ctx context.Context, proxyID int64, info *service.ProxyLatencyInfo) error { + if info == nil { + return nil + } + payload, err := json.Marshal(info) + if err != nil { + return err + } + return c.rdb.Set(ctx, proxyLatencyKey(proxyID), payload, 0).Err() +} diff --git a/backend/internal/repository/proxy_probe_service.go b/backend/internal/repository/proxy_probe_service.go new file mode 100644 index 00000000..fb6f405e --- /dev/null +++ b/backend/internal/repository/proxy_probe_service.go @@ -0,0 +1,118 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func NewProxyExitInfoProber(cfg *config.Config) service.ProxyExitInfoProber { + insecure := false + allowPrivate := false + validateResolvedIP := true + if cfg != nil { + insecure = cfg.Security.ProxyProbe.InsecureSkipVerify + allowPrivate = cfg.Security.URLAllowlist.AllowPrivateHosts + validateResolvedIP = cfg.Security.URLAllowlist.Enabled + } + if insecure { + log.Printf("[ProxyProbe] Warning: insecure_skip_verify is not allowed and will cause probe failure.") + } + return &proxyProbeService{ + ipInfoURL: defaultIPInfoURL, + insecureSkipVerify: insecure, + allowPrivateHosts: allowPrivate, + validateResolvedIP: validateResolvedIP, + } +} + +const ( + defaultIPInfoURL = "http://ip-api.com/json/?lang=zh-CN" + defaultProxyProbeTimeout = 30 * time.Second +) + +type proxyProbeService struct { + ipInfoURL string + insecureSkipVerify bool + allowPrivateHosts bool + validateResolvedIP bool +} + +func (s *proxyProbeService) ProbeProxy(ctx context.Context, proxyURL string) (*service.ProxyExitInfo, int64, error) { + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: proxyURL, + Timeout: defaultProxyProbeTimeout, + InsecureSkipVerify: s.insecureSkipVerify, + ProxyStrict: true, + ValidateResolvedIP: s.validateResolvedIP, + AllowPrivateHosts: s.allowPrivateHosts, + }) + if err != nil { + return nil, 0, fmt.Errorf("failed to create proxy client: %w", err) + } + + startTime := time.Now() + req, err := http.NewRequestWithContext(ctx, "GET", s.ipInfoURL, nil) + if err != nil { + return nil, 0, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := client.Do(req) + if err != nil { + return nil, 0, fmt.Errorf("proxy connection failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + latencyMs := time.Since(startTime).Milliseconds() + + if resp.StatusCode != http.StatusOK { + return nil, latencyMs, fmt.Errorf("request failed with status: %d", resp.StatusCode) + } + + var ipInfo struct { + Status string `json:"status"` + Message string `json:"message"` + Query string `json:"query"` + City string `json:"city"` + Region string `json:"region"` + RegionName string `json:"regionName"` + Country string `json:"country"` + CountryCode string `json:"countryCode"` + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, latencyMs, fmt.Errorf("failed to read response: %w", err) + } + + if err := json.Unmarshal(body, &ipInfo); err != nil { + return nil, latencyMs, fmt.Errorf("failed to parse response: %w", err) + } + if strings.ToLower(ipInfo.Status) != "success" { + if ipInfo.Message == "" { + ipInfo.Message = "ip-api request failed" + } + return nil, latencyMs, fmt.Errorf("ip-api request failed: %s", ipInfo.Message) + } + + region := ipInfo.RegionName + if region == "" { + region = ipInfo.Region + } + return &service.ProxyExitInfo{ + IP: ipInfo.Query, + City: ipInfo.City, + Region: region, + Country: ipInfo.Country, + CountryCode: ipInfo.CountryCode, + }, latencyMs, nil +} diff --git a/backend/internal/repository/proxy_probe_service_test.go b/backend/internal/repository/proxy_probe_service_test.go new file mode 100644 index 00000000..f1cd5721 --- /dev/null +++ b/backend/internal/repository/proxy_probe_service_test.go @@ -0,0 +1,119 @@ +package repository + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type ProxyProbeServiceSuite struct { + suite.Suite + ctx context.Context + proxySrv *httptest.Server + prober *proxyProbeService +} + +func (s *ProxyProbeServiceSuite) SetupTest() { + s.ctx = context.Background() + s.prober = &proxyProbeService{ + ipInfoURL: "http://ip-api.test/json/?lang=zh-CN", + allowPrivateHosts: true, + } +} + +func (s *ProxyProbeServiceSuite) TearDownTest() { + if s.proxySrv != nil { + s.proxySrv.Close() + s.proxySrv = nil + } +} + +func (s *ProxyProbeServiceSuite) setupProxyServer(handler http.HandlerFunc) { + s.proxySrv = newLocalTestServer(s.T(), handler) +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_InvalidProxyURL() { + _, _, err := s.prober.ProbeProxy(s.ctx, "://bad") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "failed to create proxy client") +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_UnsupportedProxyScheme() { + _, _, err := s.prober.ProbeProxy(s.ctx, "ftp://127.0.0.1:1") + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "failed to create proxy client") +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_Success() { + seen := make(chan string, 1) + s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + seen <- r.RequestURI + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, `{"status":"success","query":"1.2.3.4","city":"c","regionName":"r","country":"cc","countryCode":"CC"}`) + })) + + info, latencyMs, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) + require.NoError(s.T(), err, "ProbeProxy") + require.GreaterOrEqual(s.T(), latencyMs, int64(0), "unexpected latency") + require.Equal(s.T(), "1.2.3.4", info.IP) + require.Equal(s.T(), "c", info.City) + require.Equal(s.T(), "r", info.Region) + require.Equal(s.T(), "cc", info.Country) + require.Equal(s.T(), "CC", info.CountryCode) + + // Verify proxy received the request + select { + case uri := <-seen: + require.Contains(s.T(), uri, "ip-api.test", "expected request to go through proxy") + default: + require.Fail(s.T(), "expected proxy to receive request") + } +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_NonOKStatus() { + s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusServiceUnavailable) + })) + + _, _, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "status: 503") +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_InvalidJSON() { + s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, "not-json") + })) + + _, _, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) + require.Error(s.T(), err) + require.ErrorContains(s.T(), err, "failed to parse response") +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_InvalidIPInfoURL() { + s.prober.ipInfoURL = "://invalid-url" + s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + _, _, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) + require.Error(s.T(), err, "expected error for invalid ipInfoURL") +} + +func (s *ProxyProbeServiceSuite) TestProbeProxy_ProxyServerClosed() { + s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + s.proxySrv.Close() + + _, _, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) + require.Error(s.T(), err, "expected error when proxy server is closed") +} + +func TestProxyProbeServiceSuite(t *testing.T) { + suite.Run(t, new(ProxyProbeServiceSuite)) +} diff --git a/backend/internal/repository/proxy_repo.go b/backend/internal/repository/proxy_repo.go new file mode 100644 index 00000000..36965c05 --- /dev/null +++ b/backend/internal/repository/proxy_repo.go @@ -0,0 +1,359 @@ +package repository + +import ( + "context" + "database/sql" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/proxy" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +type sqlQuerier interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) +} + +type proxyRepository struct { + client *dbent.Client + sql sqlQuerier +} + +func NewProxyRepository(client *dbent.Client, sqlDB *sql.DB) service.ProxyRepository { + return newProxyRepositoryWithSQL(client, sqlDB) +} + +func newProxyRepositoryWithSQL(client *dbent.Client, sqlq sqlQuerier) *proxyRepository { + return &proxyRepository{client: client, sql: sqlq} +} + +func (r *proxyRepository) Create(ctx context.Context, proxyIn *service.Proxy) error { + builder := r.client.Proxy.Create(). + SetName(proxyIn.Name). + SetProtocol(proxyIn.Protocol). + SetHost(proxyIn.Host). + SetPort(proxyIn.Port). + SetStatus(proxyIn.Status) + if proxyIn.Username != "" { + builder.SetUsername(proxyIn.Username) + } + if proxyIn.Password != "" { + builder.SetPassword(proxyIn.Password) + } + + created, err := builder.Save(ctx) + if err == nil { + applyProxyEntityToService(proxyIn, created) + } + return err +} + +func (r *proxyRepository) GetByID(ctx context.Context, id int64) (*service.Proxy, error) { + m, err := r.client.Proxy.Get(ctx, id) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrProxyNotFound + } + return nil, err + } + return proxyEntityToService(m), nil +} + +func (r *proxyRepository) Update(ctx context.Context, proxyIn *service.Proxy) error { + builder := r.client.Proxy.UpdateOneID(proxyIn.ID). + SetName(proxyIn.Name). + SetProtocol(proxyIn.Protocol). + SetHost(proxyIn.Host). + SetPort(proxyIn.Port). + SetStatus(proxyIn.Status) + if proxyIn.Username != "" { + builder.SetUsername(proxyIn.Username) + } else { + builder.ClearUsername() + } + if proxyIn.Password != "" { + builder.SetPassword(proxyIn.Password) + } else { + builder.ClearPassword() + } + + updated, err := builder.Save(ctx) + if err == nil { + applyProxyEntityToService(proxyIn, updated) + return nil + } + if dbent.IsNotFound(err) { + return service.ErrProxyNotFound + } + return err +} + +func (r *proxyRepository) Delete(ctx context.Context, id int64) error { + _, err := r.client.Proxy.Delete().Where(proxy.IDEQ(id)).Exec(ctx) + return err +} + +func (r *proxyRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Proxy, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "", "") +} + +// ListWithFilters lists proxies with optional filtering by protocol, status, and search query +func (r *proxyRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.Proxy, *pagination.PaginationResult, error) { + q := r.client.Proxy.Query() + if protocol != "" { + q = q.Where(proxy.ProtocolEQ(protocol)) + } + if status != "" { + q = q.Where(proxy.StatusEQ(status)) + } + if search != "" { + q = q.Where(proxy.NameContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + proxies, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(proxy.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outProxies := make([]service.Proxy, 0, len(proxies)) + for i := range proxies { + outProxies = append(outProxies, *proxyEntityToService(proxies[i])) + } + + return outProxies, paginationResultFromTotal(int64(total), params), nil +} + +// ListWithFiltersAndAccountCount lists proxies with filters and includes account count per proxy +func (r *proxyRepository) ListWithFiltersAndAccountCount(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.ProxyWithAccountCount, *pagination.PaginationResult, error) { + q := r.client.Proxy.Query() + if protocol != "" { + q = q.Where(proxy.ProtocolEQ(protocol)) + } + if status != "" { + q = q.Where(proxy.StatusEQ(status)) + } + if search != "" { + q = q.Where(proxy.NameContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + proxies, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(proxy.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + // Get account counts + counts, err := r.GetAccountCountsForProxies(ctx) + if err != nil { + return nil, nil, err + } + + // Build result with account counts + result := make([]service.ProxyWithAccountCount, 0, len(proxies)) + for i := range proxies { + proxyOut := proxyEntityToService(proxies[i]) + if proxyOut == nil { + continue + } + result = append(result, service.ProxyWithAccountCount{ + Proxy: *proxyOut, + AccountCount: counts[proxyOut.ID], + }) + } + + return result, paginationResultFromTotal(int64(total), params), nil +} + +func (r *proxyRepository) ListActive(ctx context.Context) ([]service.Proxy, error) { + proxies, err := r.client.Proxy.Query(). + Where(proxy.StatusEQ(service.StatusActive)). + All(ctx) + if err != nil { + return nil, err + } + outProxies := make([]service.Proxy, 0, len(proxies)) + for i := range proxies { + outProxies = append(outProxies, *proxyEntityToService(proxies[i])) + } + return outProxies, nil +} + +// ExistsByHostPortAuth checks if a proxy with the same host, port, username, and password exists +func (r *proxyRepository) ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) { + q := r.client.Proxy.Query(). + Where(proxy.HostEQ(host), proxy.PortEQ(port)) + + if username == "" { + q = q.Where(proxy.Or(proxy.UsernameIsNil(), proxy.UsernameEQ(""))) + } else { + q = q.Where(proxy.UsernameEQ(username)) + } + if password == "" { + q = q.Where(proxy.Or(proxy.PasswordIsNil(), proxy.PasswordEQ(""))) + } else { + q = q.Where(proxy.PasswordEQ(password)) + } + + count, err := q.Count(ctx) + return count > 0, err +} + +// CountAccountsByProxyID returns the number of accounts using a specific proxy +func (r *proxyRepository) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { + var count int64 + if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1 AND deleted_at IS NULL", []any{proxyID}, &count); err != nil { + return 0, err + } + return count, nil +} + +func (r *proxyRepository) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + rows, err := r.sql.QueryContext(ctx, ` + SELECT id, name, platform, type, notes + FROM accounts + WHERE proxy_id = $1 AND deleted_at IS NULL + ORDER BY id DESC + `, proxyID) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]service.ProxyAccountSummary, 0) + for rows.Next() { + var ( + id int64 + name string + platform string + accType string + notes sql.NullString + ) + if err := rows.Scan(&id, &name, &platform, &accType, ¬es); err != nil { + return nil, err + } + var notesPtr *string + if notes.Valid { + notesPtr = ¬es.String + } + out = append(out, service.ProxyAccountSummary{ + ID: id, + Name: name, + Platform: platform, + Type: accType, + Notes: notesPtr, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +// GetAccountCountsForProxies returns a map of proxy ID to account count for all proxies +func (r *proxyRepository) GetAccountCountsForProxies(ctx context.Context) (counts map[int64]int64, err error) { + rows, err := r.sql.QueryContext(ctx, "SELECT proxy_id, COUNT(*) AS count FROM accounts WHERE proxy_id IS NOT NULL AND deleted_at IS NULL GROUP BY proxy_id") + if err != nil { + return nil, err + } + defer func() { + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + counts = nil + } + }() + + counts = make(map[int64]int64) + for rows.Next() { + var proxyID, count int64 + if err = rows.Scan(&proxyID, &count); err != nil { + return nil, err + } + counts[proxyID] = count + } + if err = rows.Err(); err != nil { + return nil, err + } + return counts, nil +} + +// ListActiveWithAccountCount returns all active proxies with account count, sorted by creation time descending +func (r *proxyRepository) ListActiveWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { + proxies, err := r.client.Proxy.Query(). + Where(proxy.StatusEQ(service.StatusActive)). + Order(dbent.Desc(proxy.FieldCreatedAt)). + All(ctx) + if err != nil { + return nil, err + } + + // Get account counts + counts, err := r.GetAccountCountsForProxies(ctx) + if err != nil { + return nil, err + } + + // Build result with account counts + result := make([]service.ProxyWithAccountCount, 0, len(proxies)) + for i := range proxies { + proxyOut := proxyEntityToService(proxies[i]) + if proxyOut == nil { + continue + } + result = append(result, service.ProxyWithAccountCount{ + Proxy: *proxyOut, + AccountCount: counts[proxyOut.ID], + }) + } + + return result, nil +} + +func proxyEntityToService(m *dbent.Proxy) *service.Proxy { + if m == nil { + return nil + } + out := &service.Proxy{ + ID: m.ID, + Name: m.Name, + Protocol: m.Protocol, + Host: m.Host, + Port: m.Port, + Status: m.Status, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } + if m.Username != nil { + out.Username = *m.Username + } + if m.Password != nil { + out.Password = *m.Password + } + return out +} + +func applyProxyEntityToService(dst *service.Proxy, src *dbent.Proxy) { + if dst == nil || src == nil { + return + } + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt +} diff --git a/backend/internal/repository/proxy_repo_integration_test.go b/backend/internal/repository/proxy_repo_integration_test.go new file mode 100644 index 00000000..8f5ef01e --- /dev/null +++ b/backend/internal/repository/proxy_repo_integration_test.go @@ -0,0 +1,329 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type ProxyRepoSuite struct { + suite.Suite + ctx context.Context + tx *dbent.Tx + repo *proxyRepository +} + +func (s *ProxyRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.tx = tx + s.repo = newProxyRepositoryWithSQL(tx.Client(), tx) +} + +func TestProxyRepoSuite(t *testing.T) { + suite.Run(t, new(ProxyRepoSuite)) +} + +// --- Create / GetByID / Update / Delete --- + +func (s *ProxyRepoSuite) TestCreate() { + proxy := &service.Proxy{ + Name: "test-create", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + } + + err := s.repo.Create(s.ctx, proxy) + s.Require().NoError(err, "Create") + s.Require().NotZero(proxy.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, proxy.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("test-create", got.Name) +} + +func (s *ProxyRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *ProxyRepoSuite) TestUpdate() { + proxy := &service.Proxy{ + Name: "original", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, proxy)) + + proxy.Name = "updated" + err := s.repo.Update(s.ctx, proxy) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, proxy.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("updated", got.Name) +} + +func (s *ProxyRepoSuite) TestDelete() { + proxy := &service.Proxy{ + Name: "to-delete", + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: service.StatusActive, + } + s.Require().NoError(s.repo.Create(s.ctx, proxy)) + + err := s.repo.Delete(s.ctx, proxy.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, proxy.ID) + s.Require().Error(err, "expected error after delete") +} + +// --- List / ListWithFilters --- + +func (s *ProxyRepoSuite) TestList() { + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) + + proxies, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List") + s.Require().Len(proxies, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *ProxyRepoSuite) TestListWithFilters_Protocol() { + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "socks5", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) + + proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "socks5", "", "") + s.Require().NoError(err) + s.Require().Len(proxies, 1) + s.Require().Equal("socks5", proxies[0].Protocol) +} + +func (s *ProxyRepoSuite) TestListWithFilters_Status() { + s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusDisabled}) + + proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusDisabled, "") + s.Require().NoError(err) + s.Require().Len(proxies, 1) + s.Require().Equal(service.StatusDisabled, proxies[0].Status) +} + +func (s *ProxyRepoSuite) TestListWithFilters_Search() { + s.mustCreateProxy(&service.Proxy{Name: "production-proxy", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "dev-proxy", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) + + proxies, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "prod") + s.Require().NoError(err) + s.Require().Len(proxies, 1) + s.Require().Contains(proxies[0].Name, "production") +} + +// --- ListActive --- + +func (s *ProxyRepoSuite) TestListActive() { + s.mustCreateProxy(&service.Proxy{Name: "active1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustCreateProxy(&service.Proxy{Name: "inactive1", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusDisabled}) + + proxies, err := s.repo.ListActive(s.ctx) + s.Require().NoError(err, "ListActive") + s.Require().Len(proxies, 1) + s.Require().Equal("active1", proxies[0].Name) +} + +// --- ExistsByHostPortAuth --- + +func (s *ProxyRepoSuite) TestExistsByHostPortAuth() { + s.mustCreateProxy(&service.Proxy{ + Name: "p1", + Protocol: "http", + Host: "1.2.3.4", + Port: 8080, + Username: "user", + Password: "pass", + Status: service.StatusActive, + }) + + exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "1.2.3.4", 8080, "user", "pass") + s.Require().NoError(err, "ExistsByHostPortAuth") + s.Require().True(exists) + + notExists, err := s.repo.ExistsByHostPortAuth(s.ctx, "1.2.3.4", 8080, "wrong", "creds") + s.Require().NoError(err) + s.Require().False(notExists) +} + +func (s *ProxyRepoSuite) TestExistsByHostPortAuth_NoAuth() { + s.mustCreateProxy(&service.Proxy{ + Name: "p-noauth", + Protocol: "http", + Host: "5.6.7.8", + Port: 8081, + Username: "", + Password: "", + Status: service.StatusActive, + }) + + exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "5.6.7.8", 8081, "", "") + s.Require().NoError(err) + s.Require().True(exists) +} + +// --- CountAccountsByProxyID --- + +func (s *ProxyRepoSuite) TestCountAccountsByProxyID() { + proxy := s.mustCreateProxy(&service.Proxy{Name: "p-count", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + s.mustInsertAccount("a1", &proxy.ID) + s.mustInsertAccount("a2", &proxy.ID) + s.mustInsertAccount("a3", nil) // no proxy + + count, err := s.repo.CountAccountsByProxyID(s.ctx, proxy.ID) + s.Require().NoError(err, "CountAccountsByProxyID") + s.Require().Equal(int64(2), count) +} + +func (s *ProxyRepoSuite) TestCountAccountsByProxyID_Zero() { + proxy := s.mustCreateProxy(&service.Proxy{Name: "p-zero", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + + count, err := s.repo.CountAccountsByProxyID(s.ctx, proxy.ID) + s.Require().NoError(err) + s.Require().Zero(count) +} + +// --- GetAccountCountsForProxies --- + +func (s *ProxyRepoSuite) TestGetAccountCountsForProxies() { + p1 := s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "127.0.0.1", Port: 8080, Status: service.StatusActive}) + p2 := s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "127.0.0.1", Port: 8081, Status: service.StatusActive}) + + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) + + counts, err := s.repo.GetAccountCountsForProxies(s.ctx) + s.Require().NoError(err, "GetAccountCountsForProxies") + s.Require().Equal(int64(2), counts[p1.ID]) + s.Require().Equal(int64(1), counts[p2.ID]) +} + +func (s *ProxyRepoSuite) TestGetAccountCountsForProxies_Empty() { + counts, err := s.repo.GetAccountCountsForProxies(s.ctx) + s.Require().NoError(err) + s.Require().Empty(counts) +} + +// --- ListActiveWithAccountCount --- + +func (s *ProxyRepoSuite) TestListActiveWithAccountCount() { + base := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + p1 := s.mustCreateProxyWithTimes("p1", service.StatusActive, base.Add(-1*time.Hour)) + p2 := s.mustCreateProxyWithTimes("p2", service.StatusActive, base) + s.mustCreateProxyWithTimes("p3-inactive", service.StatusDisabled, base.Add(1*time.Hour)) + + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) + + withCounts, err := s.repo.ListActiveWithAccountCount(s.ctx) + s.Require().NoError(err, "ListActiveWithAccountCount") + s.Require().Len(withCounts, 2, "expected 2 active proxies") + + // Sorted by created_at DESC, so p2 first + s.Require().Equal(p2.ID, withCounts[0].ID) + s.Require().Equal(int64(1), withCounts[0].AccountCount) + s.Require().Equal(p1.ID, withCounts[1].ID) + s.Require().Equal(int64(2), withCounts[1].AccountCount) +} + +// --- Combined original test --- + +func (s *ProxyRepoSuite) TestExistsByHostPortAuth_And_AccountCountAggregates() { + p1 := s.mustCreateProxy(&service.Proxy{Name: "p1", Protocol: "http", Host: "1.2.3.4", Port: 8080, Username: "u", Password: "p", Status: service.StatusActive}) + p2 := s.mustCreateProxy(&service.Proxy{Name: "p2", Protocol: "http", Host: "5.6.7.8", Port: 8081, Username: "", Password: "", Status: service.StatusActive}) + + exists, err := s.repo.ExistsByHostPortAuth(s.ctx, "1.2.3.4", 8080, "u", "p") + s.Require().NoError(err, "ExistsByHostPortAuth") + s.Require().True(exists, "expected proxy to exist") + + s.mustInsertAccount("a1", &p1.ID) + s.mustInsertAccount("a2", &p1.ID) + s.mustInsertAccount("a3", &p2.ID) + + count1, err := s.repo.CountAccountsByProxyID(s.ctx, p1.ID) + s.Require().NoError(err, "CountAccountsByProxyID") + s.Require().Equal(int64(2), count1, "expected 2 accounts for p1") + + counts, err := s.repo.GetAccountCountsForProxies(s.ctx) + s.Require().NoError(err, "GetAccountCountsForProxies") + s.Require().Equal(int64(2), counts[p1.ID]) + s.Require().Equal(int64(1), counts[p2.ID]) + + withCounts, err := s.repo.ListActiveWithAccountCount(s.ctx) + s.Require().NoError(err, "ListActiveWithAccountCount") + s.Require().Len(withCounts, 2, "expected 2 proxies") + for _, pc := range withCounts { + switch pc.ID { + case p1.ID: + s.Require().Equal(int64(2), pc.AccountCount, "p1 count mismatch") + case p2.ID: + s.Require().Equal(int64(1), pc.AccountCount, "p2 count mismatch") + default: + s.Require().Fail("unexpected proxy id", pc.ID) + } + } +} + +func (s *ProxyRepoSuite) mustCreateProxy(p *service.Proxy) *service.Proxy { + s.T().Helper() + s.Require().NoError(s.repo.Create(s.ctx, p), "create proxy") + return p +} + +func (s *ProxyRepoSuite) mustCreateProxyWithTimes(name, status string, createdAt time.Time) *service.Proxy { + s.T().Helper() + + // Use the repository create for standard fields, then update timestamps via raw SQL to keep deterministic ordering. + p := s.mustCreateProxy(&service.Proxy{ + Name: name, + Protocol: "http", + Host: "127.0.0.1", + Port: 8080, + Status: status, + }) + _, err := s.tx.ExecContext(s.ctx, "UPDATE proxies SET created_at = $1, updated_at = $1 WHERE id = $2", createdAt, p.ID) + s.Require().NoError(err, "update proxy timestamps") + return p +} + +func (s *ProxyRepoSuite) mustInsertAccount(name string, proxyID *int64) { + s.T().Helper() + var pid any + if proxyID != nil { + pid = *proxyID + } + _, err := s.tx.ExecContext( + s.ctx, + "INSERT INTO accounts (name, platform, type, proxy_id) VALUES ($1, $2, $3, $4)", + name, + service.PlatformAnthropic, + service.AccountTypeOAuth, + pid, + ) + s.Require().NoError(err, "insert account") +} diff --git a/backend/internal/repository/redeem_cache.go b/backend/internal/repository/redeem_cache.go new file mode 100644 index 00000000..831aaf57 --- /dev/null +++ b/backend/internal/repository/redeem_cache.go @@ -0,0 +1,62 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + redeemRateLimitKeyPrefix = "redeem:ratelimit:" + redeemLockKeyPrefix = "redeem:lock:" + redeemRateLimitDuration = 24 * time.Hour +) + +// redeemRateLimitKey generates the Redis key for redeem attempt rate limiting. +func redeemRateLimitKey(userID int64) string { + return fmt.Sprintf("%s%d", redeemRateLimitKeyPrefix, userID) +} + +// redeemLockKey generates the Redis key for redeem code locking. +func redeemLockKey(code string) string { + return redeemLockKeyPrefix + code +} + +type redeemCache struct { + rdb *redis.Client +} + +func NewRedeemCache(rdb *redis.Client) service.RedeemCache { + return &redeemCache{rdb: rdb} +} + +func (c *redeemCache) GetRedeemAttemptCount(ctx context.Context, userID int64) (int, error) { + key := redeemRateLimitKey(userID) + count, err := c.rdb.Get(ctx, key).Int() + if err == redis.Nil { + return 0, nil + } + return count, err +} + +func (c *redeemCache) IncrementRedeemAttemptCount(ctx context.Context, userID int64) error { + key := redeemRateLimitKey(userID) + pipe := c.rdb.Pipeline() + pipe.Incr(ctx, key) + pipe.Expire(ctx, key, redeemRateLimitDuration) + _, err := pipe.Exec(ctx) + return err +} + +func (c *redeemCache) AcquireRedeemLock(ctx context.Context, code string, ttl time.Duration) (bool, error) { + key := redeemLockKey(code) + return c.rdb.SetNX(ctx, key, 1, ttl).Result() +} + +func (c *redeemCache) ReleaseRedeemLock(ctx context.Context, code string) error { + key := redeemLockKey(code) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/redeem_cache_integration_test.go b/backend/internal/repository/redeem_cache_integration_test.go new file mode 100644 index 00000000..6398a801 --- /dev/null +++ b/backend/internal/repository/redeem_cache_integration_test.go @@ -0,0 +1,103 @@ +//go:build integration + +package repository + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RedeemCacheSuite struct { + IntegrationRedisSuite + cache *redeemCache +} + +func (s *RedeemCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewRedeemCache(s.rdb).(*redeemCache) +} + +func (s *RedeemCacheSuite) TestGetRedeemAttemptCount_Missing() { + missingUserID := int64(99999) + count, err := s.cache.GetRedeemAttemptCount(s.ctx, missingUserID) + require.NoError(s.T(), err, "expected nil error for missing rate-limit key") + require.Equal(s.T(), 0, count, "expected zero count for missing key") +} + +func (s *RedeemCacheSuite) TestIncrementAndGetRedeemAttemptCount() { + userID := int64(1) + key := fmt.Sprintf("%s%d", redeemRateLimitKeyPrefix, userID) + + require.NoError(s.T(), s.cache.IncrementRedeemAttemptCount(s.ctx, userID), "IncrementRedeemAttemptCount") + count, err := s.cache.GetRedeemAttemptCount(s.ctx, userID) + require.NoError(s.T(), err, "GetRedeemAttemptCount") + require.Equal(s.T(), 1, count, "count mismatch") + + ttl, err := s.rdb.TTL(s.ctx, key).Result() + require.NoError(s.T(), err, "TTL") + s.AssertTTLWithin(ttl, 1*time.Second, redeemRateLimitDuration) +} + +func (s *RedeemCacheSuite) TestMultipleIncrements() { + userID := int64(2) + + require.NoError(s.T(), s.cache.IncrementRedeemAttemptCount(s.ctx, userID)) + require.NoError(s.T(), s.cache.IncrementRedeemAttemptCount(s.ctx, userID)) + require.NoError(s.T(), s.cache.IncrementRedeemAttemptCount(s.ctx, userID)) + + count, err := s.cache.GetRedeemAttemptCount(s.ctx, userID) + require.NoError(s.T(), err) + require.Equal(s.T(), 3, count, "count after 3 increments") +} + +func (s *RedeemCacheSuite) TestAcquireAndReleaseRedeemLock() { + ok, err := s.cache.AcquireRedeemLock(s.ctx, "CODE", 10*time.Second) + require.NoError(s.T(), err, "AcquireRedeemLock") + require.True(s.T(), ok) + + // Second acquire should fail + ok, err = s.cache.AcquireRedeemLock(s.ctx, "CODE", 10*time.Second) + require.NoError(s.T(), err, "AcquireRedeemLock 2") + require.False(s.T(), ok, "expected lock to be held") + + // Release + require.NoError(s.T(), s.cache.ReleaseRedeemLock(s.ctx, "CODE"), "ReleaseRedeemLock") + + // Now acquire should succeed + ok, err = s.cache.AcquireRedeemLock(s.ctx, "CODE", 10*time.Second) + require.NoError(s.T(), err, "AcquireRedeemLock after release") + require.True(s.T(), ok) +} + +func (s *RedeemCacheSuite) TestAcquireRedeemLock_TTL() { + lockKey := redeemLockKeyPrefix + "CODE2" + lockTTL := 15 * time.Second + + ok, err := s.cache.AcquireRedeemLock(s.ctx, "CODE2", lockTTL) + require.NoError(s.T(), err, "AcquireRedeemLock CODE2") + require.True(s.T(), ok) + + ttl, err := s.rdb.TTL(s.ctx, lockKey).Result() + require.NoError(s.T(), err, "TTL lock key") + s.AssertTTLWithin(ttl, 1*time.Second, lockTTL) +} + +func (s *RedeemCacheSuite) TestReleaseRedeemLock_Idempotent() { + // Release a lock that doesn't exist should not error + require.NoError(s.T(), s.cache.ReleaseRedeemLock(s.ctx, "NONEXISTENT")) + + // Acquire, release, release again + ok, err := s.cache.AcquireRedeemLock(s.ctx, "IDEMPOTENT", 10*time.Second) + require.NoError(s.T(), err) + require.True(s.T(), ok) + require.NoError(s.T(), s.cache.ReleaseRedeemLock(s.ctx, "IDEMPOTENT")) + require.NoError(s.T(), s.cache.ReleaseRedeemLock(s.ctx, "IDEMPOTENT"), "second release should be idempotent") +} + +func TestRedeemCacheSuite(t *testing.T) { + suite.Run(t, new(RedeemCacheSuite)) +} diff --git a/backend/internal/repository/redeem_cache_test.go b/backend/internal/repository/redeem_cache_test.go new file mode 100644 index 00000000..9b547b74 --- /dev/null +++ b/backend/internal/repository/redeem_cache_test.go @@ -0,0 +1,77 @@ +//go:build unit + +package repository + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRedeemRateLimitKey(t *testing.T) { + tests := []struct { + name string + userID int64 + expected string + }{ + { + name: "normal_user_id", + userID: 123, + expected: "redeem:ratelimit:123", + }, + { + name: "zero_user_id", + userID: 0, + expected: "redeem:ratelimit:0", + }, + { + name: "negative_user_id", + userID: -1, + expected: "redeem:ratelimit:-1", + }, + { + name: "max_int64", + userID: math.MaxInt64, + expected: "redeem:ratelimit:9223372036854775807", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := redeemRateLimitKey(tc.userID) + require.Equal(t, tc.expected, got) + }) + } +} + +func TestRedeemLockKey(t *testing.T) { + tests := []struct { + name string + code string + expected string + }{ + { + name: "normal_code", + code: "ABC123", + expected: "redeem:lock:ABC123", + }, + { + name: "empty_code", + code: "", + expected: "redeem:lock:", + }, + { + name: "code_with_special_chars", + code: "CODE-2024:test", + expected: "redeem:lock:CODE-2024:test", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := redeemLockKey(tc.code) + require.Equal(t, tc.expected, got) + }) + } +} diff --git a/backend/internal/repository/redeem_code_repo.go b/backend/internal/repository/redeem_code_repo.go new file mode 100644 index 00000000..ee8a01b5 --- /dev/null +++ b/backend/internal/repository/redeem_code_repo.go @@ -0,0 +1,239 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/redeemcode" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type redeemCodeRepository struct { + client *dbent.Client +} + +func NewRedeemCodeRepository(client *dbent.Client) service.RedeemCodeRepository { + return &redeemCodeRepository{client: client} +} + +func (r *redeemCodeRepository) Create(ctx context.Context, code *service.RedeemCode) error { + created, err := r.client.RedeemCode.Create(). + SetCode(code.Code). + SetType(code.Type). + SetValue(code.Value). + SetStatus(code.Status). + SetNotes(code.Notes). + SetValidityDays(code.ValidityDays). + SetNillableUsedBy(code.UsedBy). + SetNillableUsedAt(code.UsedAt). + SetNillableGroupID(code.GroupID). + Save(ctx) + if err == nil { + code.ID = created.ID + code.CreatedAt = created.CreatedAt + } + return err +} + +func (r *redeemCodeRepository) CreateBatch(ctx context.Context, codes []service.RedeemCode) error { + if len(codes) == 0 { + return nil + } + + builders := make([]*dbent.RedeemCodeCreate, 0, len(codes)) + for i := range codes { + c := &codes[i] + b := r.client.RedeemCode.Create(). + SetCode(c.Code). + SetType(c.Type). + SetValue(c.Value). + SetStatus(c.Status). + SetNotes(c.Notes). + SetValidityDays(c.ValidityDays). + SetNillableUsedBy(c.UsedBy). + SetNillableUsedAt(c.UsedAt). + SetNillableGroupID(c.GroupID) + builders = append(builders, b) + } + + return r.client.RedeemCode.CreateBulk(builders...).Exec(ctx) +} + +func (r *redeemCodeRepository) GetByID(ctx context.Context, id int64) (*service.RedeemCode, error) { + m, err := r.client.RedeemCode.Query(). + Where(redeemcode.IDEQ(id)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrRedeemCodeNotFound + } + return nil, err + } + return redeemCodeEntityToService(m), nil +} + +func (r *redeemCodeRepository) GetByCode(ctx context.Context, code string) (*service.RedeemCode, error) { + m, err := r.client.RedeemCode.Query(). + Where(redeemcode.CodeEQ(code)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrRedeemCodeNotFound + } + return nil, err + } + return redeemCodeEntityToService(m), nil +} + +func (r *redeemCodeRepository) Delete(ctx context.Context, id int64) error { + _, err := r.client.RedeemCode.Delete().Where(redeemcode.IDEQ(id)).Exec(ctx) + return err +} + +func (r *redeemCodeRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.RedeemCode, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "", "") +} + +func (r *redeemCodeRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]service.RedeemCode, *pagination.PaginationResult, error) { + q := r.client.RedeemCode.Query() + + if codeType != "" { + q = q.Where(redeemcode.TypeEQ(codeType)) + } + if status != "" { + q = q.Where(redeemcode.StatusEQ(status)) + } + if search != "" { + q = q.Where(redeemcode.CodeContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + codes, err := q. + WithUser(). + WithGroup(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(redeemcode.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outCodes := redeemCodeEntitiesToService(codes) + + return outCodes, paginationResultFromTotal(int64(total), params), nil +} + +func (r *redeemCodeRepository) Update(ctx context.Context, code *service.RedeemCode) error { + up := r.client.RedeemCode.UpdateOneID(code.ID). + SetCode(code.Code). + SetType(code.Type). + SetValue(code.Value). + SetStatus(code.Status). + SetNotes(code.Notes). + SetValidityDays(code.ValidityDays) + + if code.UsedBy != nil { + up.SetUsedBy(*code.UsedBy) + } else { + up.ClearUsedBy() + } + if code.UsedAt != nil { + up.SetUsedAt(*code.UsedAt) + } else { + up.ClearUsedAt() + } + if code.GroupID != nil { + up.SetGroupID(*code.GroupID) + } else { + up.ClearGroupID() + } + + updated, err := up.Save(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return service.ErrRedeemCodeNotFound + } + return err + } + code.CreatedAt = updated.CreatedAt + return nil +} + +func (r *redeemCodeRepository) Use(ctx context.Context, id, userID int64) error { + now := time.Now() + client := clientFromContext(ctx, r.client) + affected, err := client.RedeemCode.Update(). + Where(redeemcode.IDEQ(id), redeemcode.StatusEQ(service.StatusUnused)). + SetStatus(service.StatusUsed). + SetUsedBy(userID). + SetUsedAt(now). + Save(ctx) + if err != nil { + return err + } + if affected == 0 { + return service.ErrRedeemCodeUsed + } + return nil +} + +func (r *redeemCodeRepository) ListByUser(ctx context.Context, userID int64, limit int) ([]service.RedeemCode, error) { + if limit <= 0 { + limit = 10 + } + + codes, err := r.client.RedeemCode.Query(). + Where(redeemcode.UsedByEQ(userID)). + WithGroup(). + Order(dbent.Desc(redeemcode.FieldUsedAt)). + Limit(limit). + All(ctx) + if err != nil { + return nil, err + } + + return redeemCodeEntitiesToService(codes), nil +} + +func redeemCodeEntityToService(m *dbent.RedeemCode) *service.RedeemCode { + if m == nil { + return nil + } + out := &service.RedeemCode{ + ID: m.ID, + Code: m.Code, + Type: m.Type, + Value: m.Value, + Status: m.Status, + UsedBy: m.UsedBy, + UsedAt: m.UsedAt, + Notes: derefString(m.Notes), + CreatedAt: m.CreatedAt, + GroupID: m.GroupID, + ValidityDays: m.ValidityDays, + } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + return out +} + +func redeemCodeEntitiesToService(models []*dbent.RedeemCode) []service.RedeemCode { + out := make([]service.RedeemCode, 0, len(models)) + for i := range models { + if s := redeemCodeEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} diff --git a/backend/internal/repository/redeem_code_repo_integration_test.go b/backend/internal/repository/redeem_code_repo_integration_test.go new file mode 100644 index 00000000..39674b52 --- /dev/null +++ b/backend/internal/repository/redeem_code_repo_integration_test.go @@ -0,0 +1,390 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type RedeemCodeRepoSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + repo *redeemCodeRepository +} + +func (s *RedeemCodeRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.client = tx.Client() + s.repo = NewRedeemCodeRepository(s.client).(*redeemCodeRepository) +} + +func TestRedeemCodeRepoSuite(t *testing.T) { + suite.Run(t, new(RedeemCodeRepoSuite)) +} + +func (s *RedeemCodeRepoSuite) createUser(email string) *dbent.User { + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + Save(s.ctx) + s.Require().NoError(err, "create user") + return u +} + +func (s *RedeemCodeRepoSuite) createGroup(name string) *dbent.Group { + g, err := s.client.Group.Create(). + SetName(name). + Save(s.ctx) + s.Require().NoError(err, "create group") + return g +} + +// --- Create / CreateBatch / GetByID / GetByCode --- + +func (s *RedeemCodeRepoSuite) TestCreate() { + code := &service.RedeemCode{ + Code: "TEST-CREATE", + Type: service.RedeemTypeBalance, + Value: 100, + Status: service.StatusUnused, + } + + err := s.repo.Create(s.ctx, code) + s.Require().NoError(err, "Create") + s.Require().NotZero(code.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, code.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("TEST-CREATE", got.Code) +} + +func (s *RedeemCodeRepoSuite) TestCreateBatch() { + codes := []service.RedeemCode{ + {Code: "BATCH-1", Type: service.RedeemTypeBalance, Value: 10, Status: service.StatusUnused}, + {Code: "BATCH-2", Type: service.RedeemTypeBalance, Value: 20, Status: service.StatusUnused}, + } + + err := s.repo.CreateBatch(s.ctx, codes) + s.Require().NoError(err, "CreateBatch") + + got1, err := s.repo.GetByCode(s.ctx, "BATCH-1") + s.Require().NoError(err) + s.Require().Equal(float64(10), got1.Value) + + got2, err := s.repo.GetByCode(s.ctx, "BATCH-2") + s.Require().NoError(err) + s.Require().Equal(float64(20), got2.Value) +} + +func (s *RedeemCodeRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) +} + +func (s *RedeemCodeRepoSuite) TestGetByCode() { + _, err := s.client.RedeemCode.Create(). + SetCode("GET-BY-CODE"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + Save(s.ctx) + s.Require().NoError(err, "seed redeem code") + + got, err := s.repo.GetByCode(s.ctx, "GET-BY-CODE") + s.Require().NoError(err, "GetByCode") + s.Require().Equal("GET-BY-CODE", got.Code) +} + +func (s *RedeemCodeRepoSuite) TestGetByCode_NotFound() { + _, err := s.repo.GetByCode(s.ctx, "NON-EXISTENT") + s.Require().Error(err, "expected error for non-existent code") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) +} + +// --- Delete --- + +func (s *RedeemCodeRepoSuite) TestDelete() { + created, err := s.client.RedeemCode.Create(). + SetCode("TO-DELETE"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + Save(s.ctx) + s.Require().NoError(err) + + err = s.repo.Delete(s.ctx, created.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, created.ID) + s.Require().Error(err, "expected error after delete") + s.Require().ErrorIs(err, service.ErrRedeemCodeNotFound) +} + +// --- List / ListWithFilters --- + +func (s *RedeemCodeRepoSuite) TestList() { + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "LIST-1", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "LIST-2", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + + codes, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List") + s.Require().Len(codes, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *RedeemCodeRepoSuite) TestListWithFilters_Type() { + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "TYPE-BAL", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "TYPE-SUB", Type: service.RedeemTypeSubscription, Value: 0, Status: service.StatusUnused})) + + codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.RedeemTypeSubscription, "", "") + s.Require().NoError(err) + s.Require().Len(codes, 1) + s.Require().Equal(service.RedeemTypeSubscription, codes[0].Type) +} + +func (s *RedeemCodeRepoSuite) TestListWithFilters_Status() { + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "STAT-UNUSED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "STAT-USED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUsed})) + + codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", service.StatusUsed, "") + s.Require().NoError(err) + s.Require().Len(codes, 1) + s.Require().Equal(service.StatusUsed, codes[0].Status) +} + +func (s *RedeemCodeRepoSuite) TestListWithFilters_Search() { + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "ALPHA-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + s.Require().NoError(s.repo.Create(s.ctx, &service.RedeemCode{Code: "BETA-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused})) + + codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "alpha") + s.Require().NoError(err) + s.Require().Len(codes, 1) + s.Require().Contains(codes[0].Code, "ALPHA") +} + +func (s *RedeemCodeRepoSuite) TestListWithFilters_GroupPreload() { + group := s.createGroup(uniqueTestValue(s.T(), "g-preload")) + _, err := s.client.RedeemCode.Create(). + SetCode("WITH-GROUP"). + SetType(service.RedeemTypeSubscription). + SetStatus(service.StatusUnused). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetGroupID(group.ID). + Save(s.ctx) + s.Require().NoError(err) + + codes, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, "", "", "") + s.Require().NoError(err) + s.Require().Len(codes, 1) + s.Require().NotNil(codes[0].Group, "expected Group preload") + s.Require().Equal(group.ID, codes[0].Group.ID) +} + +// --- Update --- + +func (s *RedeemCodeRepoSuite) TestUpdate() { + code := &service.RedeemCode{ + Code: "UPDATE-ME", + Type: service.RedeemTypeBalance, + Value: 10, + Status: service.StatusUnused, + } + s.Require().NoError(s.repo.Create(s.ctx, code)) + + code.Value = 50 + err := s.repo.Update(s.ctx, code) + s.Require().NoError(err, "Update") + + got, err := s.repo.GetByID(s.ctx, code.ID) + s.Require().NoError(err) + s.Require().Equal(float64(50), got.Value) +} + +// --- Use --- + +func (s *RedeemCodeRepoSuite) TestUse() { + user := s.createUser(uniqueTestValue(s.T(), "use") + "@example.com") + code := &service.RedeemCode{Code: "USE-ME", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused} + s.Require().NoError(s.repo.Create(s.ctx, code)) + + err := s.repo.Use(s.ctx, code.ID, user.ID) + s.Require().NoError(err, "Use") + + got, err := s.repo.GetByID(s.ctx, code.ID) + s.Require().NoError(err) + s.Require().Equal(service.StatusUsed, got.Status) + s.Require().NotNil(got.UsedBy) + s.Require().Equal(user.ID, *got.UsedBy) + s.Require().NotNil(got.UsedAt) +} + +func (s *RedeemCodeRepoSuite) TestUse_Idempotency() { + user := s.createUser(uniqueTestValue(s.T(), "idem") + "@example.com") + code := &service.RedeemCode{Code: "IDEM-CODE", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUnused} + s.Require().NoError(s.repo.Create(s.ctx, code)) + + err := s.repo.Use(s.ctx, code.ID, user.ID) + s.Require().NoError(err, "Use first time") + + // Second use should fail + err = s.repo.Use(s.ctx, code.ID, user.ID) + s.Require().Error(err, "Use expected error on second call") + s.Require().ErrorIs(err, service.ErrRedeemCodeUsed) +} + +func (s *RedeemCodeRepoSuite) TestUse_AlreadyUsed() { + user := s.createUser(uniqueTestValue(s.T(), "already") + "@example.com") + code := &service.RedeemCode{Code: "ALREADY-USED", Type: service.RedeemTypeBalance, Value: 0, Status: service.StatusUsed} + s.Require().NoError(s.repo.Create(s.ctx, code)) + + err := s.repo.Use(s.ctx, code.ID, user.ID) + s.Require().Error(err, "expected error for already used code") + s.Require().ErrorIs(err, service.ErrRedeemCodeUsed) +} + +// --- ListByUser --- + +func (s *RedeemCodeRepoSuite) TestListByUser() { + user := s.createUser(uniqueTestValue(s.T(), "listby") + "@example.com") + base := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + usedAt1 := base + _, err := s.client.RedeemCode.Create(). + SetCode("USER-1"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(usedAt1). + Save(s.ctx) + s.Require().NoError(err) + + usedAt2 := base.Add(1 * time.Hour) + _, err = s.client.RedeemCode.Create(). + SetCode("USER-2"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(usedAt2). + Save(s.ctx) + s.Require().NoError(err) + + codes, err := s.repo.ListByUser(s.ctx, user.ID, 10) + s.Require().NoError(err, "ListByUser") + s.Require().Len(codes, 2) + // Ordered by used_at DESC, so USER-2 first + s.Require().Equal("USER-2", codes[0].Code) + s.Require().Equal("USER-1", codes[1].Code) +} + +func (s *RedeemCodeRepoSuite) TestListByUser_WithGroupPreload() { + user := s.createUser(uniqueTestValue(s.T(), "grp") + "@example.com") + group := s.createGroup(uniqueTestValue(s.T(), "g-listby")) + + _, err := s.client.RedeemCode.Create(). + SetCode("WITH-GRP"). + SetType(service.RedeemTypeSubscription). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(time.Now()). + SetGroupID(group.ID). + Save(s.ctx) + s.Require().NoError(err) + + codes, err := s.repo.ListByUser(s.ctx, user.ID, 10) + s.Require().NoError(err) + s.Require().Len(codes, 1) + s.Require().NotNil(codes[0].Group) + s.Require().Equal(group.ID, codes[0].Group.ID) +} + +func (s *RedeemCodeRepoSuite) TestListByUser_DefaultLimit() { + user := s.createUser(uniqueTestValue(s.T(), "deflimit") + "@example.com") + _, err := s.client.RedeemCode.Create(). + SetCode("DEF-LIM"). + SetType(service.RedeemTypeBalance). + SetStatus(service.StatusUsed). + SetValue(0). + SetNotes(""). + SetValidityDays(30). + SetUsedBy(user.ID). + SetUsedAt(time.Now()). + Save(s.ctx) + s.Require().NoError(err) + + // limit <= 0 should default to 10 + codes, err := s.repo.ListByUser(s.ctx, user.ID, 0) + s.Require().NoError(err) + s.Require().Len(codes, 1) +} + +// --- Combined original test --- + +func (s *RedeemCodeRepoSuite) TestCreateBatch_Filters_Use_Idempotency_ListByUser() { + user := s.createUser(uniqueTestValue(s.T(), "rc") + "@example.com") + group := s.createGroup(uniqueTestValue(s.T(), "g-rc")) + groupID := group.ID + + codes := []service.RedeemCode{ + {Code: "CODEA", Type: service.RedeemTypeBalance, Value: 1, Status: service.StatusUnused, Notes: ""}, + {Code: "CODEB", Type: service.RedeemTypeSubscription, Value: 0, Status: service.StatusUnused, Notes: "", GroupID: &groupID, ValidityDays: 7}, + } + s.Require().NoError(s.repo.CreateBatch(s.ctx, codes), "CreateBatch") + + list, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.RedeemTypeSubscription, service.StatusUnused, "code") + s.Require().NoError(err, "ListWithFilters") + s.Require().Equal(int64(1), page.Total) + s.Require().Len(list, 1) + s.Require().NotNil(list[0].Group, "expected Group preload") + s.Require().Equal(group.ID, list[0].Group.ID) + + codeB, err := s.repo.GetByCode(s.ctx, "CODEB") + s.Require().NoError(err, "GetByCode") + s.Require().NoError(s.repo.Use(s.ctx, codeB.ID, user.ID), "Use") + err = s.repo.Use(s.ctx, codeB.ID, user.ID) + s.Require().Error(err, "Use expected error on second call") + s.Require().ErrorIs(err, service.ErrRedeemCodeUsed) + + codeA, err := s.repo.GetByCode(s.ctx, "CODEA") + s.Require().NoError(err, "GetByCode") + + // Use fixed time instead of time.Sleep for deterministic ordering. + _, err = s.client.RedeemCode.UpdateOneID(codeB.ID). + SetUsedAt(time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC)). + Save(s.ctx) + s.Require().NoError(err) + s.Require().NoError(s.repo.Use(s.ctx, codeA.ID, user.ID), "Use codeA") + _, err = s.client.RedeemCode.UpdateOneID(codeA.ID). + SetUsedAt(time.Date(2025, 1, 1, 13, 0, 0, 0, time.UTC)). + Save(s.ctx) + s.Require().NoError(err) + + used, err := s.repo.ListByUser(s.ctx, user.ID, 10) + s.Require().NoError(err, "ListByUser") + s.Require().Len(used, 2, "expected 2 used codes") + s.Require().Equal("CODEA", used[0].Code, "expected newest used code first") +} diff --git a/backend/internal/repository/redis.go b/backend/internal/repository/redis.go new file mode 100644 index 00000000..f3606ad9 --- /dev/null +++ b/backend/internal/repository/redis.go @@ -0,0 +1,39 @@ +package repository + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + + "github.com/redis/go-redis/v9" +) + +// InitRedis 初始化 Redis 客户端 +// +// 性能优化说明: +// 原实现使用 go-redis 默认配置,未设置连接池和超时参数: +// 1. 默认连接池大小可能不足以支撑高并发 +// 2. 无超时控制可能导致慢操作阻塞 +// +// 新实现支持可配置的连接池和超时参数: +// 1. PoolSize: 控制最大并发连接数(默认 128) +// 2. MinIdleConns: 保持最小空闲连接,减少冷启动延迟(默认 10) +// 3. DialTimeout/ReadTimeout/WriteTimeout: 精确控制各阶段超时 +func InitRedis(cfg *config.Config) *redis.Client { + return redis.NewClient(buildRedisOptions(cfg)) +} + +// buildRedisOptions 构建 Redis 连接选项 +// 从配置文件读取连接池和超时参数,支持生产环境调优 +func buildRedisOptions(cfg *config.Config) *redis.Options { + return &redis.Options{ + Addr: cfg.Redis.Address(), + Password: cfg.Redis.Password, + DB: cfg.Redis.DB, + DialTimeout: time.Duration(cfg.Redis.DialTimeoutSeconds) * time.Second, // 建连超时 + ReadTimeout: time.Duration(cfg.Redis.ReadTimeoutSeconds) * time.Second, // 读取超时 + WriteTimeout: time.Duration(cfg.Redis.WriteTimeoutSeconds) * time.Second, // 写入超时 + PoolSize: cfg.Redis.PoolSize, // 连接池大小 + MinIdleConns: cfg.Redis.MinIdleConns, // 最小空闲连接 + } +} diff --git a/backend/internal/repository/redis_test.go b/backend/internal/repository/redis_test.go new file mode 100644 index 00000000..756a63dc --- /dev/null +++ b/backend/internal/repository/redis_test.go @@ -0,0 +1,35 @@ +package repository + +import ( + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestBuildRedisOptions(t *testing.T) { + cfg := &config.Config{ + Redis: config.RedisConfig{ + Host: "localhost", + Port: 6379, + Password: "secret", + DB: 2, + DialTimeoutSeconds: 5, + ReadTimeoutSeconds: 3, + WriteTimeoutSeconds: 4, + PoolSize: 100, + MinIdleConns: 10, + }, + } + + opts := buildRedisOptions(cfg) + require.Equal(t, "localhost:6379", opts.Addr) + require.Equal(t, "secret", opts.Password) + require.Equal(t, 2, opts.DB) + require.Equal(t, 5*time.Second, opts.DialTimeout) + require.Equal(t, 3*time.Second, opts.ReadTimeout) + require.Equal(t, 4*time.Second, opts.WriteTimeout) + require.Equal(t, 100, opts.PoolSize) + require.Equal(t, 10, opts.MinIdleConns) +} diff --git a/backend/internal/repository/req_client_pool.go b/backend/internal/repository/req_client_pool.go new file mode 100644 index 00000000..b23462a4 --- /dev/null +++ b/backend/internal/repository/req_client_pool.go @@ -0,0 +1,64 @@ +package repository + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/imroc/req/v3" +) + +// reqClientOptions 定义 req 客户端的构建参数 +type reqClientOptions struct { + ProxyURL string // 代理 URL(支持 http/https/socks5) + Timeout time.Duration // 请求超时时间 + Impersonate bool // 是否模拟 Chrome 浏览器指纹 +} + +// sharedReqClients 存储按配置参数缓存的 req 客户端实例 +// +// 性能优化说明: +// 原实现在每次 OAuth 刷新时都创建新的 req.Client: +// 1. claude_oauth_service.go: 每次刷新创建新客户端 +// 2. openai_oauth_service.go: 每次刷新创建新客户端 +// 3. gemini_oauth_client.go: 每次刷新创建新客户端 +// +// 新实现使用 sync.Map 缓存客户端: +// 1. 相同配置(代理+超时+模拟设置)复用同一客户端 +// 2. 复用底层连接池,减少 TLS 握手开销 +// 3. LoadOrStore 保证并发安全,避免重复创建 +var sharedReqClients sync.Map + +// getSharedReqClient 获取共享的 req 客户端实例 +// 性能优化:相同配置复用同一客户端,避免重复创建 +func getSharedReqClient(opts reqClientOptions) *req.Client { + key := buildReqClientKey(opts) + if cached, ok := sharedReqClients.Load(key); ok { + if c, ok := cached.(*req.Client); ok { + return c + } + } + + client := req.C().SetTimeout(opts.Timeout) + if opts.Impersonate { + client = client.ImpersonateChrome() + } + if strings.TrimSpace(opts.ProxyURL) != "" { + client.SetProxyURL(strings.TrimSpace(opts.ProxyURL)) + } + + actual, _ := sharedReqClients.LoadOrStore(key, client) + if c, ok := actual.(*req.Client); ok { + return c + } + return client +} + +func buildReqClientKey(opts reqClientOptions) string { + return fmt.Sprintf("%s|%s|%t", + strings.TrimSpace(opts.ProxyURL), + opts.Timeout.String(), + opts.Impersonate, + ) +} diff --git a/backend/internal/repository/scheduler_cache.go b/backend/internal/repository/scheduler_cache.go new file mode 100644 index 00000000..13b22107 --- /dev/null +++ b/backend/internal/repository/scheduler_cache.go @@ -0,0 +1,276 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + schedulerBucketSetKey = "sched:buckets" + schedulerOutboxWatermarkKey = "sched:outbox:watermark" + schedulerAccountPrefix = "sched:acc:" + schedulerActivePrefix = "sched:active:" + schedulerReadyPrefix = "sched:ready:" + schedulerVersionPrefix = "sched:ver:" + schedulerSnapshotPrefix = "sched:" + schedulerLockPrefix = "sched:lock:" +) + +type schedulerCache struct { + rdb *redis.Client +} + +func NewSchedulerCache(rdb *redis.Client) service.SchedulerCache { + return &schedulerCache{rdb: rdb} +} + +func (c *schedulerCache) GetSnapshot(ctx context.Context, bucket service.SchedulerBucket) ([]*service.Account, bool, error) { + readyKey := schedulerBucketKey(schedulerReadyPrefix, bucket) + readyVal, err := c.rdb.Get(ctx, readyKey).Result() + if err == redis.Nil { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + if readyVal != "1" { + return nil, false, nil + } + + activeKey := schedulerBucketKey(schedulerActivePrefix, bucket) + activeVal, err := c.rdb.Get(ctx, activeKey).Result() + if err == redis.Nil { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + + snapshotKey := schedulerSnapshotKey(bucket, activeVal) + ids, err := c.rdb.ZRange(ctx, snapshotKey, 0, -1).Result() + if err != nil { + return nil, false, err + } + if len(ids) == 0 { + return []*service.Account{}, true, nil + } + + keys := make([]string, 0, len(ids)) + for _, id := range ids { + keys = append(keys, schedulerAccountKey(id)) + } + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return nil, false, err + } + + accounts := make([]*service.Account, 0, len(values)) + for _, val := range values { + if val == nil { + return nil, false, nil + } + account, err := decodeCachedAccount(val) + if err != nil { + return nil, false, err + } + accounts = append(accounts, account) + } + + return accounts, true, nil +} + +func (c *schedulerCache) SetSnapshot(ctx context.Context, bucket service.SchedulerBucket, accounts []service.Account) error { + activeKey := schedulerBucketKey(schedulerActivePrefix, bucket) + oldActive, _ := c.rdb.Get(ctx, activeKey).Result() + + versionKey := schedulerBucketKey(schedulerVersionPrefix, bucket) + version, err := c.rdb.Incr(ctx, versionKey).Result() + if err != nil { + return err + } + + versionStr := strconv.FormatInt(version, 10) + snapshotKey := schedulerSnapshotKey(bucket, versionStr) + + pipe := c.rdb.Pipeline() + for _, account := range accounts { + payload, err := json.Marshal(account) + if err != nil { + return err + } + pipe.Set(ctx, schedulerAccountKey(strconv.FormatInt(account.ID, 10)), payload, 0) + } + if len(accounts) > 0 { + // 使用序号作为 score,保持数据库返回的排序语义。 + members := make([]redis.Z, 0, len(accounts)) + for idx, account := range accounts { + members = append(members, redis.Z{ + Score: float64(idx), + Member: strconv.FormatInt(account.ID, 10), + }) + } + pipe.ZAdd(ctx, snapshotKey, members...) + } else { + pipe.Del(ctx, snapshotKey) + } + pipe.Set(ctx, activeKey, versionStr, 0) + pipe.Set(ctx, schedulerBucketKey(schedulerReadyPrefix, bucket), "1", 0) + pipe.SAdd(ctx, schedulerBucketSetKey, bucket.String()) + if _, err := pipe.Exec(ctx); err != nil { + return err + } + + if oldActive != "" && oldActive != versionStr { + _ = c.rdb.Del(ctx, schedulerSnapshotKey(bucket, oldActive)).Err() + } + + return nil +} + +func (c *schedulerCache) GetAccount(ctx context.Context, accountID int64) (*service.Account, error) { + key := schedulerAccountKey(strconv.FormatInt(accountID, 10)) + val, err := c.rdb.Get(ctx, key).Result() + if err == redis.Nil { + return nil, nil + } + if err != nil { + return nil, err + } + return decodeCachedAccount(val) +} + +func (c *schedulerCache) SetAccount(ctx context.Context, account *service.Account) error { + if account == nil || account.ID <= 0 { + return nil + } + payload, err := json.Marshal(account) + if err != nil { + return err + } + key := schedulerAccountKey(strconv.FormatInt(account.ID, 10)) + return c.rdb.Set(ctx, key, payload, 0).Err() +} + +func (c *schedulerCache) DeleteAccount(ctx context.Context, accountID int64) error { + if accountID <= 0 { + return nil + } + key := schedulerAccountKey(strconv.FormatInt(accountID, 10)) + return c.rdb.Del(ctx, key).Err() +} + +func (c *schedulerCache) UpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + if len(updates) == 0 { + return nil + } + + keys := make([]string, 0, len(updates)) + ids := make([]int64, 0, len(updates)) + for id := range updates { + keys = append(keys, schedulerAccountKey(strconv.FormatInt(id, 10))) + ids = append(ids, id) + } + + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return err + } + + pipe := c.rdb.Pipeline() + for i, val := range values { + if val == nil { + continue + } + account, err := decodeCachedAccount(val) + if err != nil { + return err + } + account.LastUsedAt = ptrTime(updates[ids[i]]) + updated, err := json.Marshal(account) + if err != nil { + return err + } + pipe.Set(ctx, keys[i], updated, 0) + } + _, err = pipe.Exec(ctx) + return err +} + +func (c *schedulerCache) TryLockBucket(ctx context.Context, bucket service.SchedulerBucket, ttl time.Duration) (bool, error) { + key := schedulerBucketKey(schedulerLockPrefix, bucket) + return c.rdb.SetNX(ctx, key, time.Now().UnixNano(), ttl).Result() +} + +func (c *schedulerCache) ListBuckets(ctx context.Context) ([]service.SchedulerBucket, error) { + raw, err := c.rdb.SMembers(ctx, schedulerBucketSetKey).Result() + if err != nil { + return nil, err + } + out := make([]service.SchedulerBucket, 0, len(raw)) + for _, entry := range raw { + bucket, ok := service.ParseSchedulerBucket(entry) + if !ok { + continue + } + out = append(out, bucket) + } + return out, nil +} + +func (c *schedulerCache) GetOutboxWatermark(ctx context.Context) (int64, error) { + val, err := c.rdb.Get(ctx, schedulerOutboxWatermarkKey).Result() + if err == redis.Nil { + return 0, nil + } + if err != nil { + return 0, err + } + id, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return 0, err + } + return id, nil +} + +func (c *schedulerCache) SetOutboxWatermark(ctx context.Context, id int64) error { + return c.rdb.Set(ctx, schedulerOutboxWatermarkKey, strconv.FormatInt(id, 10), 0).Err() +} + +func schedulerBucketKey(prefix string, bucket service.SchedulerBucket) string { + return fmt.Sprintf("%s%d:%s:%s", prefix, bucket.GroupID, bucket.Platform, bucket.Mode) +} + +func schedulerSnapshotKey(bucket service.SchedulerBucket, version string) string { + return fmt.Sprintf("%s%d:%s:%s:v%s", schedulerSnapshotPrefix, bucket.GroupID, bucket.Platform, bucket.Mode, version) +} + +func schedulerAccountKey(id string) string { + return schedulerAccountPrefix + id +} + +func ptrTime(t time.Time) *time.Time { + return &t +} + +func decodeCachedAccount(val any) (*service.Account, error) { + var payload []byte + switch raw := val.(type) { + case string: + payload = []byte(raw) + case []byte: + payload = raw + default: + return nil, fmt.Errorf("unexpected account cache type: %T", val) + } + var account service.Account + if err := json.Unmarshal(payload, &account); err != nil { + return nil, err + } + return &account, nil +} diff --git a/backend/internal/repository/scheduler_outbox_repo.go b/backend/internal/repository/scheduler_outbox_repo.go new file mode 100644 index 00000000..d7bc97da --- /dev/null +++ b/backend/internal/repository/scheduler_outbox_repo.go @@ -0,0 +1,96 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type schedulerOutboxRepository struct { + db *sql.DB +} + +func NewSchedulerOutboxRepository(db *sql.DB) service.SchedulerOutboxRepository { + return &schedulerOutboxRepository{db: db} +} + +func (r *schedulerOutboxRepository) ListAfter(ctx context.Context, afterID int64, limit int) ([]service.SchedulerOutboxEvent, error) { + if limit <= 0 { + limit = 100 + } + rows, err := r.db.QueryContext(ctx, ` + SELECT id, event_type, account_id, group_id, payload, created_at + FROM scheduler_outbox + WHERE id > $1 + ORDER BY id ASC + LIMIT $2 + `, afterID, limit) + if err != nil { + return nil, err + } + defer func() { + _ = rows.Close() + }() + + events := make([]service.SchedulerOutboxEvent, 0, limit) + for rows.Next() { + var ( + payloadRaw []byte + accountID sql.NullInt64 + groupID sql.NullInt64 + event service.SchedulerOutboxEvent + ) + if err := rows.Scan(&event.ID, &event.EventType, &accountID, &groupID, &payloadRaw, &event.CreatedAt); err != nil { + return nil, err + } + if accountID.Valid { + v := accountID.Int64 + event.AccountID = &v + } + if groupID.Valid { + v := groupID.Int64 + event.GroupID = &v + } + if len(payloadRaw) > 0 { + var payload map[string]any + if err := json.Unmarshal(payloadRaw, &payload); err != nil { + return nil, err + } + event.Payload = payload + } + events = append(events, event) + } + if err := rows.Err(); err != nil { + return nil, err + } + return events, nil +} + +func (r *schedulerOutboxRepository) MaxID(ctx context.Context) (int64, error) { + var maxID int64 + if err := r.db.QueryRowContext(ctx, "SELECT COALESCE(MAX(id), 0) FROM scheduler_outbox").Scan(&maxID); err != nil { + return 0, err + } + return maxID, nil +} + +func enqueueSchedulerOutbox(ctx context.Context, exec sqlExecutor, eventType string, accountID *int64, groupID *int64, payload any) error { + if exec == nil { + return nil + } + var payloadArg any + if payload != nil { + encoded, err := json.Marshal(payload) + if err != nil { + return err + } + payloadArg = encoded + } + _, err := exec.ExecContext(ctx, ` + INSERT INTO scheduler_outbox (event_type, account_id, group_id, payload) + VALUES ($1, $2, $3, $4) + `, eventType, accountID, groupID, payloadArg) + return err +} diff --git a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go new file mode 100644 index 00000000..e442a125 --- /dev/null +++ b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go @@ -0,0 +1,68 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestSchedulerSnapshotOutboxReplay(t *testing.T) { + ctx := context.Background() + rdb := testRedis(t) + client := testEntClient(t) + + _, _ = integrationDB.ExecContext(ctx, "TRUNCATE scheduler_outbox") + + accountRepo := newAccountRepositoryWithSQL(client, integrationDB) + outboxRepo := NewSchedulerOutboxRepository(integrationDB) + cache := NewSchedulerCache(rdb) + + cfg := &config.Config{ + RunMode: config.RunModeStandard, + Gateway: config.GatewayConfig{ + Scheduling: config.GatewaySchedulingConfig{ + OutboxPollIntervalSeconds: 1, + FullRebuildIntervalSeconds: 0, + DbFallbackEnabled: true, + }, + }, + } + + account := &service.Account{ + Name: "outbox-replay-" + time.Now().Format("150405.000000"), + Platform: service.PlatformOpenAI, + Type: service.AccountTypeAPIKey, + Status: service.StatusActive, + Schedulable: true, + Concurrency: 3, + Priority: 1, + Credentials: map[string]any{}, + Extra: map[string]any{}, + } + require.NoError(t, accountRepo.Create(ctx, account)) + require.NoError(t, cache.SetAccount(ctx, account)) + + svc := service.NewSchedulerSnapshotService(cache, outboxRepo, accountRepo, nil, cfg) + svc.Start() + t.Cleanup(svc.Stop) + + require.NoError(t, accountRepo.UpdateLastUsed(ctx, account.ID)) + updated, err := accountRepo.GetByID(ctx, account.ID) + require.NoError(t, err) + require.NotNil(t, updated.LastUsedAt) + expectedUnix := updated.LastUsedAt.Unix() + + require.Eventually(t, func() bool { + cached, err := cache.GetAccount(ctx, account.ID) + if err != nil || cached == nil || cached.LastUsedAt == nil { + return false + } + return cached.LastUsedAt.Unix() == expectedUnix + }, 5*time.Second, 100*time.Millisecond) +} diff --git a/backend/internal/repository/setting_repo.go b/backend/internal/repository/setting_repo.go new file mode 100644 index 00000000..a4550e60 --- /dev/null +++ b/backend/internal/repository/setting_repo.go @@ -0,0 +1,105 @@ +package repository + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/setting" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type settingRepository struct { + client *ent.Client +} + +func NewSettingRepository(client *ent.Client) service.SettingRepository { + return &settingRepository{client: client} +} + +func (r *settingRepository) Get(ctx context.Context, key string) (*service.Setting, error) { + m, err := r.client.Setting.Query().Where(setting.KeyEQ(key)).Only(ctx) + if err != nil { + if ent.IsNotFound(err) { + return nil, service.ErrSettingNotFound + } + return nil, err + } + return &service.Setting{ + ID: m.ID, + Key: m.Key, + Value: m.Value, + UpdatedAt: m.UpdatedAt, + }, nil +} + +func (r *settingRepository) GetValue(ctx context.Context, key string) (string, error) { + setting, err := r.Get(ctx, key) + if err != nil { + return "", err + } + return setting.Value, nil +} + +func (r *settingRepository) Set(ctx context.Context, key, value string) error { + now := time.Now() + return r.client.Setting. + Create(). + SetKey(key). + SetValue(value). + SetUpdatedAt(now). + OnConflictColumns(setting.FieldKey). + UpdateNewValues(). + Exec(ctx) +} + +func (r *settingRepository) GetMultiple(ctx context.Context, keys []string) (map[string]string, error) { + if len(keys) == 0 { + return map[string]string{}, nil + } + settings, err := r.client.Setting.Query().Where(setting.KeyIn(keys...)).All(ctx) + if err != nil { + return nil, err + } + + result := make(map[string]string) + for _, s := range settings { + result[s.Key] = s.Value + } + return result, nil +} + +func (r *settingRepository) SetMultiple(ctx context.Context, settings map[string]string) error { + if len(settings) == 0 { + return nil + } + + now := time.Now() + builders := make([]*ent.SettingCreate, 0, len(settings)) + for key, value := range settings { + builders = append(builders, r.client.Setting.Create().SetKey(key).SetValue(value).SetUpdatedAt(now)) + } + return r.client.Setting. + CreateBulk(builders...). + OnConflictColumns(setting.FieldKey). + UpdateNewValues(). + Exec(ctx) +} + +func (r *settingRepository) GetAll(ctx context.Context) (map[string]string, error) { + settings, err := r.client.Setting.Query().All(ctx) + if err != nil { + return nil, err + } + + result := make(map[string]string) + for _, s := range settings { + result[s.Key] = s.Value + } + return result, nil +} + +func (r *settingRepository) Delete(ctx context.Context, key string) error { + _, err := r.client.Setting.Delete().Where(setting.KeyEQ(key)).Exec(ctx) + return err +} diff --git a/backend/internal/repository/setting_repo_integration_test.go b/backend/internal/repository/setting_repo_integration_test.go new file mode 100644 index 00000000..147313d6 --- /dev/null +++ b/backend/internal/repository/setting_repo_integration_test.go @@ -0,0 +1,163 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type SettingRepoSuite struct { + suite.Suite + ctx context.Context + repo *settingRepository +} + +func (s *SettingRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.repo = NewSettingRepository(tx.Client()).(*settingRepository) +} + +func TestSettingRepoSuite(t *testing.T) { + suite.Run(t, new(SettingRepoSuite)) +} + +func (s *SettingRepoSuite) TestSetAndGetValue() { + s.Require().NoError(s.repo.Set(s.ctx, "k1", "v1"), "Set") + got, err := s.repo.GetValue(s.ctx, "k1") + s.Require().NoError(err, "GetValue") + s.Require().Equal("v1", got, "GetValue mismatch") +} + +func (s *SettingRepoSuite) TestSet_Upsert() { + s.Require().NoError(s.repo.Set(s.ctx, "k1", "v1"), "Set") + s.Require().NoError(s.repo.Set(s.ctx, "k1", "v2"), "Set upsert") + got, err := s.repo.GetValue(s.ctx, "k1") + s.Require().NoError(err, "GetValue after upsert") + s.Require().Equal("v2", got, "upsert mismatch") +} + +func (s *SettingRepoSuite) TestGetValue_Missing() { + _, err := s.repo.GetValue(s.ctx, "nonexistent") + s.Require().Error(err, "expected error for missing key") + s.Require().ErrorIs(err, service.ErrSettingNotFound) +} + +func (s *SettingRepoSuite) TestSetMultiple_AndGetMultiple() { + s.Require().NoError(s.repo.SetMultiple(s.ctx, map[string]string{"k2": "v2", "k3": "v3"}), "SetMultiple") + m, err := s.repo.GetMultiple(s.ctx, []string{"k2", "k3"}) + s.Require().NoError(err, "GetMultiple") + s.Require().Equal("v2", m["k2"]) + s.Require().Equal("v3", m["k3"]) +} + +func (s *SettingRepoSuite) TestGetMultiple_EmptyKeys() { + m, err := s.repo.GetMultiple(s.ctx, []string{}) + s.Require().NoError(err, "GetMultiple with empty keys") + s.Require().Empty(m, "expected empty map") +} + +func (s *SettingRepoSuite) TestGetMultiple_Subset() { + s.Require().NoError(s.repo.SetMultiple(s.ctx, map[string]string{"a": "1", "b": "2", "c": "3"})) + m, err := s.repo.GetMultiple(s.ctx, []string{"a", "c", "nonexistent"}) + s.Require().NoError(err, "GetMultiple subset") + s.Require().Equal("1", m["a"]) + s.Require().Equal("3", m["c"]) + _, exists := m["nonexistent"] + s.Require().False(exists, "nonexistent key should not be in map") +} + +func (s *SettingRepoSuite) TestGetAll() { + s.Require().NoError(s.repo.SetMultiple(s.ctx, map[string]string{"x": "1", "y": "2"})) + all, err := s.repo.GetAll(s.ctx) + s.Require().NoError(err, "GetAll") + s.Require().GreaterOrEqual(len(all), 2, "expected at least 2 settings") + s.Require().Equal("1", all["x"]) + s.Require().Equal("2", all["y"]) +} + +func (s *SettingRepoSuite) TestDelete() { + s.Require().NoError(s.repo.Set(s.ctx, "todelete", "val")) + s.Require().NoError(s.repo.Delete(s.ctx, "todelete"), "Delete") + _, err := s.repo.GetValue(s.ctx, "todelete") + s.Require().Error(err, "expected missing key error after Delete") + s.Require().ErrorIs(err, service.ErrSettingNotFound) +} + +func (s *SettingRepoSuite) TestDelete_Idempotent() { + // Delete a key that doesn't exist should not error + s.Require().NoError(s.repo.Delete(s.ctx, "nonexistent_delete"), "Delete nonexistent should be idempotent") +} + +func (s *SettingRepoSuite) TestSetMultiple_Upsert() { + s.Require().NoError(s.repo.Set(s.ctx, "upsert_key", "old_value")) + s.Require().NoError(s.repo.SetMultiple(s.ctx, map[string]string{"upsert_key": "new_value", "new_key": "new_val"})) + + got, err := s.repo.GetValue(s.ctx, "upsert_key") + s.Require().NoError(err) + s.Require().Equal("new_value", got, "SetMultiple should upsert existing key") + + got2, err := s.repo.GetValue(s.ctx, "new_key") + s.Require().NoError(err) + s.Require().Equal("new_val", got2) +} + +// TestSet_EmptyValue 测试保存空字符串值 +// 这是一个回归测试,确保可选设置(如站点Logo、API端点地址等)可以保存为空字符串 +func (s *SettingRepoSuite) TestSet_EmptyValue() { + // 测试 Set 方法保存空值 + s.Require().NoError(s.repo.Set(s.ctx, "empty_key", ""), "Set with empty value should succeed") + + got, err := s.repo.GetValue(s.ctx, "empty_key") + s.Require().NoError(err, "GetValue for empty value") + s.Require().Equal("", got, "empty value should be preserved") +} + +// TestSetMultiple_WithEmptyValues 测试批量保存包含空字符串的设置 +// 模拟用户保存站点设置时部分字段为空的场景 +func (s *SettingRepoSuite) TestSetMultiple_WithEmptyValues() { + // 模拟保存站点设置,部分字段有值,部分字段为空 + settings := map[string]string{ + "site_name": "AICodex2API", + "site_subtitle": "Subscription to API", + "site_logo": "", // 用户未上传Logo + "api_base_url": "", // 用户未设置API地址 + "contact_info": "", // 用户未设置联系方式 + "doc_url": "", // 用户未设置文档链接 + } + + s.Require().NoError(s.repo.SetMultiple(s.ctx, settings), "SetMultiple with empty values should succeed") + + // 验证所有值都正确保存 + result, err := s.repo.GetMultiple(s.ctx, []string{"site_name", "site_subtitle", "site_logo", "api_base_url", "contact_info", "doc_url"}) + s.Require().NoError(err, "GetMultiple after SetMultiple with empty values") + + s.Require().Equal("AICodex2API", result["site_name"]) + s.Require().Equal("Subscription to API", result["site_subtitle"]) + s.Require().Equal("", result["site_logo"], "empty site_logo should be preserved") + s.Require().Equal("", result["api_base_url"], "empty api_base_url should be preserved") + s.Require().Equal("", result["contact_info"], "empty contact_info should be preserved") + s.Require().Equal("", result["doc_url"], "empty doc_url should be preserved") +} + +// TestSetMultiple_UpdateToEmpty 测试将已有值更新为空字符串 +// 确保用户可以清空之前设置的值 +func (s *SettingRepoSuite) TestSetMultiple_UpdateToEmpty() { + // 先设置非空值 + s.Require().NoError(s.repo.Set(s.ctx, "clearable_key", "initial_value")) + + got, err := s.repo.GetValue(s.ctx, "clearable_key") + s.Require().NoError(err) + s.Require().Equal("initial_value", got) + + // 更新为空值 + s.Require().NoError(s.repo.SetMultiple(s.ctx, map[string]string{"clearable_key": ""}), "Update to empty should succeed") + + got, err = s.repo.GetValue(s.ctx, "clearable_key") + s.Require().NoError(err) + s.Require().Equal("", got, "value should be updated to empty string") +} diff --git a/backend/internal/repository/soft_delete_ent_integration_test.go b/backend/internal/repository/soft_delete_ent_integration_test.go new file mode 100644 index 00000000..ef63fbee --- /dev/null +++ b/backend/internal/repository/soft_delete_ent_integration_test.go @@ -0,0 +1,216 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func uniqueSoftDeleteValue(t *testing.T, prefix string) string { + t.Helper() + safeName := strings.NewReplacer("/", "_", " ", "_").Replace(t.Name()) + return fmt.Sprintf("%s-%s", prefix, safeName) +} + +func createEntUser(t *testing.T, ctx context.Context, client *dbent.Client, email string) *dbent.User { + t.Helper() + + u, err := client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + Save(ctx) + require.NoError(t, err, "create ent user") + return u +} + +func TestEntSoftDelete_ApiKey_DefaultFilterAndSkip(t *testing.T) { + ctx := context.Background() + // 使用全局 ent client,确保软删除验证在实际持久化数据上进行。 + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user")+"@example.com") + + repo := NewAPIKeyRepository(client) + key := &service.APIKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete"), + Name: "soft-delete", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "soft delete api key") + + _, err := repo.GetByID(ctx, key.ID) + require.ErrorIs(t, err, service.ErrAPIKeyNotFound, "deleted rows should be hidden by default") + + _, err = client.APIKey.Query().Where(apikey.IDEQ(key.ID)).Only(ctx) + require.Error(t, err, "default ent query should not see soft-deleted rows") + require.True(t, dbent.IsNotFound(err), "expected ent not-found after default soft delete filter") + + got, err := client.APIKey.Query(). + Where(apikey.IDEQ(key.ID)). + Only(mixins.SkipSoftDelete(ctx)) + require.NoError(t, err, "SkipSoftDelete should include soft-deleted rows") + require.NotNil(t, got.DeletedAt, "deleted_at should be set after soft delete") +} + +func TestEntSoftDelete_ApiKey_DeleteIdempotent(t *testing.T) { + ctx := context.Background() + // 使用全局 ent client,避免事务回滚影响幂等性验证。 + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user2")+"@example.com") + + repo := NewAPIKeyRepository(client) + key := &service.APIKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete2"), + Name: "soft-delete2", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "first delete") + require.NoError(t, repo.Delete(ctx, key.ID), "second delete should be idempotent") +} + +func TestEntSoftDelete_ApiKey_HardDeleteViaSkipSoftDelete(t *testing.T) { + ctx := context.Background() + // 使用全局 ent client,确保 SkipSoftDelete 的硬删除语义可验证。 + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-user3")+"@example.com") + + repo := NewAPIKeyRepository(client) + key := &service.APIKey{ + UserID: u.ID, + Key: uniqueSoftDeleteValue(t, "sk-soft-delete3"), + Name: "soft-delete3", + Status: service.StatusActive, + } + require.NoError(t, repo.Create(ctx, key), "create api key") + + require.NoError(t, repo.Delete(ctx, key.ID), "soft delete api key") + + // Hard delete using SkipSoftDelete so the hook doesn't convert it to update-deleted_at. + _, err := client.APIKey.Delete().Where(apikey.IDEQ(key.ID)).Exec(mixins.SkipSoftDelete(ctx)) + require.NoError(t, err, "hard delete") + + _, err = client.APIKey.Query(). + Where(apikey.IDEQ(key.ID)). + Only(mixins.SkipSoftDelete(ctx)) + require.True(t, dbent.IsNotFound(err), "expected row to be hard deleted") +} + +// --- UserSubscription 软删除测试 --- + +func createEntGroup(t *testing.T, ctx context.Context, client *dbent.Client, name string) *dbent.Group { + t.Helper() + + g, err := client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(ctx) + require.NoError(t, err, "create ent group") + return g +} + +func TestEntSoftDelete_UserSubscription_DefaultFilterAndSkip(t *testing.T) { + ctx := context.Background() + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-user")+"@example.com") + g := createEntGroup(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-group")) + + repo := NewUserSubscriptionRepository(client) + sub := &service.UserSubscription{ + UserID: u.ID, + GroupID: g.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: time.Now().Add(24 * time.Hour), + } + require.NoError(t, repo.Create(ctx, sub), "create user subscription") + + require.NoError(t, repo.Delete(ctx, sub.ID), "soft delete user subscription") + + _, err := repo.GetByID(ctx, sub.ID) + require.Error(t, err, "deleted rows should be hidden by default") + + _, err = client.UserSubscription.Query().Where(usersubscription.IDEQ(sub.ID)).Only(ctx) + require.Error(t, err, "default ent query should not see soft-deleted rows") + require.True(t, dbent.IsNotFound(err), "expected ent not-found after default soft delete filter") + + got, err := client.UserSubscription.Query(). + Where(usersubscription.IDEQ(sub.ID)). + Only(mixins.SkipSoftDelete(ctx)) + require.NoError(t, err, "SkipSoftDelete should include soft-deleted rows") + require.NotNil(t, got.DeletedAt, "deleted_at should be set after soft delete") +} + +func TestEntSoftDelete_UserSubscription_DeleteIdempotent(t *testing.T) { + ctx := context.Background() + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-user2")+"@example.com") + g := createEntGroup(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-group2")) + + repo := NewUserSubscriptionRepository(client) + sub := &service.UserSubscription{ + UserID: u.ID, + GroupID: g.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: time.Now().Add(24 * time.Hour), + } + require.NoError(t, repo.Create(ctx, sub), "create user subscription") + + require.NoError(t, repo.Delete(ctx, sub.ID), "first delete") + require.NoError(t, repo.Delete(ctx, sub.ID), "second delete should be idempotent") +} + +func TestEntSoftDelete_UserSubscription_ListExcludesDeleted(t *testing.T) { + ctx := context.Background() + client := testEntClient(t) + + u := createEntUser(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-user3")+"@example.com") + g1 := createEntGroup(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-group3a")) + g2 := createEntGroup(t, ctx, client, uniqueSoftDeleteValue(t, "sd-sub-group3b")) + + repo := NewUserSubscriptionRepository(client) + + sub1 := &service.UserSubscription{ + UserID: u.ID, + GroupID: g1.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: time.Now().Add(24 * time.Hour), + } + require.NoError(t, repo.Create(ctx, sub1), "create subscription 1") + + sub2 := &service.UserSubscription{ + UserID: u.ID, + GroupID: g2.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: time.Now().Add(24 * time.Hour), + } + require.NoError(t, repo.Create(ctx, sub2), "create subscription 2") + + // 软删除 sub1 + require.NoError(t, repo.Delete(ctx, sub1.ID), "soft delete subscription 1") + + // ListByUserID 应只返回未删除的订阅 + subs, err := repo.ListByUserID(ctx, u.ID) + require.NoError(t, err, "ListByUserID") + require.Len(t, subs, 1, "should only return non-deleted subscriptions") + require.Equal(t, sub2.ID, subs[0].ID, "expected sub2 to be returned") +} diff --git a/backend/internal/repository/sql_scan.go b/backend/internal/repository/sql_scan.go new file mode 100644 index 00000000..91b6c9c4 --- /dev/null +++ b/backend/internal/repository/sql_scan.go @@ -0,0 +1,42 @@ +package repository + +import ( + "context" + "database/sql" + "errors" +) + +type sqlQueryer interface { + QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) +} + +// scanSingleRow 执行查询并扫描第一行到 dest。 +// 若无结果,可通过 errors.Is(err, sql.ErrNoRows) 判断。 +// 如果 Close 失败,会与原始错误合并返回。 +// 设计目的:仅依赖 QueryContext,避免 QueryRowContext 对 *sql.Tx 的强绑定, +// 让 ent.Tx 也能作为 sqlExecutor/Queryer 使用。 +func scanSingleRow(ctx context.Context, q sqlQueryer, query string, args []any, dest ...any) (err error) { + rows, err := q.QueryContext(ctx, query, args...) + if err != nil { + return err + } + defer func() { + if closeErr := rows.Close(); closeErr != nil { + err = errors.Join(err, closeErr) + } + }() + + if !rows.Next() { + if err = rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + if err = rows.Scan(dest...); err != nil { + return err + } + if err = rows.Err(); err != nil { + return err + } + return nil +} diff --git a/backend/internal/repository/temp_unsched_cache.go b/backend/internal/repository/temp_unsched_cache.go new file mode 100644 index 00000000..55115eb8 --- /dev/null +++ b/backend/internal/repository/temp_unsched_cache.go @@ -0,0 +1,91 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const tempUnschedPrefix = "temp_unsched:account:" + +var tempUnschedSetScript = redis.NewScript(` + local key = KEYS[1] + local new_until = tonumber(ARGV[1]) + local new_value = ARGV[2] + local new_ttl = tonumber(ARGV[3]) + + local existing = redis.call('GET', key) + if existing then + local ok, existing_data = pcall(cjson.decode, existing) + if ok and existing_data and existing_data.until_unix then + local existing_until = tonumber(existing_data.until_unix) + if existing_until and new_until <= existing_until then + return 0 + end + end + end + + redis.call('SET', key, new_value, 'EX', new_ttl) + return 1 +`) + +type tempUnschedCache struct { + rdb *redis.Client +} + +func NewTempUnschedCache(rdb *redis.Client) service.TempUnschedCache { + return &tempUnschedCache{rdb: rdb} +} + +// SetTempUnsched 设置临时不可调度状态(只延长不缩短) +func (c *tempUnschedCache) SetTempUnsched(ctx context.Context, accountID int64, state *service.TempUnschedState) error { + key := fmt.Sprintf("%s%d", tempUnschedPrefix, accountID) + + stateJSON, err := json.Marshal(state) + if err != nil { + return fmt.Errorf("marshal state: %w", err) + } + + ttl := time.Until(time.Unix(state.UntilUnix, 0)) + if ttl <= 0 { + return nil // 已过期,不设置 + } + + ttlSeconds := int(ttl.Seconds()) + if ttlSeconds < 1 { + ttlSeconds = 1 + } + + _, err = tempUnschedSetScript.Run(ctx, c.rdb, []string{key}, state.UntilUnix, string(stateJSON), ttlSeconds).Result() + return err +} + +// GetTempUnsched 获取临时不可调度状态 +func (c *tempUnschedCache) GetTempUnsched(ctx context.Context, accountID int64) (*service.TempUnschedState, error) { + key := fmt.Sprintf("%s%d", tempUnschedPrefix, accountID) + + val, err := c.rdb.Get(ctx, key).Result() + if err == redis.Nil { + return nil, nil + } + if err != nil { + return nil, err + } + + var state service.TempUnschedState + if err := json.Unmarshal([]byte(val), &state); err != nil { + return nil, fmt.Errorf("unmarshal state: %w", err) + } + + return &state, nil +} + +// DeleteTempUnsched 删除临时不可调度状态 +func (c *tempUnschedCache) DeleteTempUnsched(ctx context.Context, accountID int64) error { + key := fmt.Sprintf("%s%d", tempUnschedPrefix, accountID) + return c.rdb.Del(ctx, key).Err() +} diff --git a/backend/internal/repository/timeout_counter_cache.go b/backend/internal/repository/timeout_counter_cache.go new file mode 100644 index 00000000..64cde22a --- /dev/null +++ b/backend/internal/repository/timeout_counter_cache.go @@ -0,0 +1,80 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const timeoutCounterPrefix = "timeout_count:account:" + +// timeoutCounterIncrScript 使用 Lua 脚本原子性地增加计数并返回当前值 +// 如果 key 不存在,则创建并设置过期时间 +var timeoutCounterIncrScript = redis.NewScript(` + local key = KEYS[1] + local ttl = tonumber(ARGV[1]) + + local count = redis.call('INCR', key) + if count == 1 then + redis.call('EXPIRE', key, ttl) + end + + return count +`) + +type timeoutCounterCache struct { + rdb *redis.Client +} + +// NewTimeoutCounterCache 创建超时计数器缓存实例 +func NewTimeoutCounterCache(rdb *redis.Client) service.TimeoutCounterCache { + return &timeoutCounterCache{rdb: rdb} +} + +// IncrementTimeoutCount 增加账户的超时计数,返回当前计数值 +// windowMinutes 是计数窗口时间(分钟),超过此时间计数器会自动重置 +func (c *timeoutCounterCache) IncrementTimeoutCount(ctx context.Context, accountID int64, windowMinutes int) (int64, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + + ttlSeconds := windowMinutes * 60 + if ttlSeconds < 60 { + ttlSeconds = 60 // 最小1分钟 + } + + result, err := timeoutCounterIncrScript.Run(ctx, c.rdb, []string{key}, ttlSeconds).Int64() + if err != nil { + return 0, fmt.Errorf("increment timeout count: %w", err) + } + + return result, nil +} + +// GetTimeoutCount 获取账户当前的超时计数 +func (c *timeoutCounterCache) GetTimeoutCount(ctx context.Context, accountID int64) (int64, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + + val, err := c.rdb.Get(ctx, key).Int64() + if err == redis.Nil { + return 0, nil + } + if err != nil { + return 0, fmt.Errorf("get timeout count: %w", err) + } + + return val, nil +} + +// ResetTimeoutCount 重置账户的超时计数 +func (c *timeoutCounterCache) ResetTimeoutCount(ctx context.Context, accountID int64) error { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + return c.rdb.Del(ctx, key).Err() +} + +// GetTimeoutCountTTL 获取计数器剩余过期时间 +func (c *timeoutCounterCache) GetTimeoutCountTTL(ctx context.Context, accountID int64) (time.Duration, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + return c.rdb.TTL(ctx, key).Result() +} diff --git a/backend/internal/repository/turnstile_service.go b/backend/internal/repository/turnstile_service.go new file mode 100644 index 00000000..89748cd3 --- /dev/null +++ b/backend/internal/repository/turnstile_service.go @@ -0,0 +1,63 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +const turnstileVerifyURL = "https://challenges.cloudflare.com/turnstile/v0/siteverify" + +type turnstileVerifier struct { + httpClient *http.Client + verifyURL string +} + +func NewTurnstileVerifier() service.TurnstileVerifier { + sharedClient, err := httpclient.GetClient(httpclient.Options{ + Timeout: 10 * time.Second, + ValidateResolvedIP: true, + }) + if err != nil { + sharedClient = &http.Client{Timeout: 10 * time.Second} + } + return &turnstileVerifier{ + httpClient: sharedClient, + verifyURL: turnstileVerifyURL, + } +} + +func (v *turnstileVerifier) VerifyToken(ctx context.Context, secretKey, token, remoteIP string) (*service.TurnstileVerifyResponse, error) { + formData := url.Values{} + formData.Set("secret", secretKey) + formData.Set("response", token) + if remoteIP != "" { + formData.Set("remoteip", remoteIP) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, v.verifyURL, strings.NewReader(formData.Encode())) + if err != nil { + return nil, fmt.Errorf("create request: %w", err) + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + resp, err := v.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("send request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + var result service.TurnstileVerifyResponse + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + return nil, fmt.Errorf("decode response: %w", err) + } + + return &result, nil +} diff --git a/backend/internal/repository/turnstile_service_test.go b/backend/internal/repository/turnstile_service_test.go new file mode 100644 index 00000000..83e0839a --- /dev/null +++ b/backend/internal/repository/turnstile_service_test.go @@ -0,0 +1,141 @@ +package repository + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type TurnstileServiceSuite struct { + suite.Suite + ctx context.Context + verifier *turnstileVerifier + received chan url.Values +} + +func (s *TurnstileServiceSuite) SetupTest() { + s.ctx = context.Background() + s.received = make(chan url.Values, 1) + verifier, ok := NewTurnstileVerifier().(*turnstileVerifier) + require.True(s.T(), ok, "type assertion failed") + s.verifier = verifier +} + +func (s *TurnstileServiceSuite) setupTransport(handler http.HandlerFunc) { + s.verifier.verifyURL = "http://in-process/turnstile" + s.verifier.httpClient = &http.Client{ + Transport: newInProcessTransport(handler, nil), + } +} + +func (s *TurnstileServiceSuite) TestVerifyToken_SendsFormAndDecodesJSON() { + s.setupTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Capture form data in main goroutine context later + body, _ := io.ReadAll(r.Body) + values, _ := url.ParseQuery(string(body)) + s.received <- values + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(service.TurnstileVerifyResponse{Success: true}) + })) + + resp, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "1.1.1.1") + require.NoError(s.T(), err, "VerifyToken") + require.NotNil(s.T(), resp) + require.True(s.T(), resp.Success, "expected success response") + + // Assert form fields in main goroutine + select { + case values := <-s.received: + require.Equal(s.T(), "sk", values.Get("secret")) + require.Equal(s.T(), "token", values.Get("response")) + require.Equal(s.T(), "1.1.1.1", values.Get("remoteip")) + default: + require.Fail(s.T(), "expected server to receive request") + } +} + +func (s *TurnstileServiceSuite) TestVerifyToken_ContentType() { + var contentType string + s.setupTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + contentType = r.Header.Get("Content-Type") + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(service.TurnstileVerifyResponse{Success: true}) + })) + + _, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "1.1.1.1") + require.NoError(s.T(), err) + require.True(s.T(), strings.HasPrefix(contentType, "application/x-www-form-urlencoded"), "unexpected content-type: %s", contentType) +} + +func (s *TurnstileServiceSuite) TestVerifyToken_EmptyRemoteIP_NotSent() { + s.setupTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + values, _ := url.ParseQuery(string(body)) + s.received <- values + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(service.TurnstileVerifyResponse{Success: true}) + })) + + _, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "") + require.NoError(s.T(), err) + + select { + case values := <-s.received: + require.Equal(s.T(), "", values.Get("remoteip"), "remoteip should be empty or not sent") + default: + require.Fail(s.T(), "expected server to receive request") + } +} + +func (s *TurnstileServiceSuite) TestVerifyToken_RequestError() { + s.verifier.verifyURL = "http://in-process/turnstile" + s.verifier.httpClient = &http.Client{ + Transport: roundTripFunc(func(*http.Request) (*http.Response, error) { + return nil, errors.New("dial failed") + }), + } + + _, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "1.1.1.1") + require.Error(s.T(), err, "expected error when server is closed") +} + +func (s *TurnstileServiceSuite) TestVerifyToken_InvalidJSON() { + s.setupTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, _ = io.WriteString(w, "not-valid-json") + })) + + _, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "1.1.1.1") + require.Error(s.T(), err, "expected error for invalid JSON response") +} + +func (s *TurnstileServiceSuite) TestVerifyToken_SuccessFalse() { + s.setupTransport(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(service.TurnstileVerifyResponse{ + Success: false, + ErrorCodes: []string{"invalid-input-response"}, + }) + })) + + resp, err := s.verifier.VerifyToken(s.ctx, "sk", "token", "1.1.1.1") + require.NoError(s.T(), err, "VerifyToken should not error on success=false") + require.NotNil(s.T(), resp) + require.False(s.T(), resp.Success) + require.Contains(s.T(), resp.ErrorCodes, "invalid-input-response") +} + +func TestTurnstileServiceSuite(t *testing.T) { + suite.Run(t, new(TurnstileServiceSuite)) +} diff --git a/backend/internal/repository/update_cache.go b/backend/internal/repository/update_cache.go new file mode 100644 index 00000000..86a8f14a --- /dev/null +++ b/backend/internal/repository/update_cache.go @@ -0,0 +1,27 @@ +package repository + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const updateCacheKey = "update:latest" + +type updateCache struct { + rdb *redis.Client +} + +func NewUpdateCache(rdb *redis.Client) service.UpdateCache { + return &updateCache{rdb: rdb} +} + +func (c *updateCache) GetUpdateInfo(ctx context.Context) (string, error) { + return c.rdb.Get(ctx, updateCacheKey).Result() +} + +func (c *updateCache) SetUpdateInfo(ctx context.Context, data string, ttl time.Duration) error { + return c.rdb.Set(ctx, updateCacheKey, data, ttl).Err() +} diff --git a/backend/internal/repository/update_cache_integration_test.go b/backend/internal/repository/update_cache_integration_test.go new file mode 100644 index 00000000..792f1b17 --- /dev/null +++ b/backend/internal/repository/update_cache_integration_test.go @@ -0,0 +1,73 @@ +//go:build integration + +package repository + +import ( + "errors" + "testing" + "time" + + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type UpdateCacheSuite struct { + IntegrationRedisSuite + cache *updateCache +} + +func (s *UpdateCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewUpdateCache(s.rdb).(*updateCache) +} + +func (s *UpdateCacheSuite) TestGetUpdateInfo_Missing() { + _, err := s.cache.GetUpdateInfo(s.ctx) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil for missing update info") +} + +func (s *UpdateCacheSuite) TestSetAndGetUpdateInfo() { + updateTTL := 5 * time.Minute + require.NoError(s.T(), s.cache.SetUpdateInfo(s.ctx, "v1.2.3", updateTTL), "SetUpdateInfo") + + info, err := s.cache.GetUpdateInfo(s.ctx) + require.NoError(s.T(), err, "GetUpdateInfo") + require.Equal(s.T(), "v1.2.3", info, "update info mismatch") +} + +func (s *UpdateCacheSuite) TestSetUpdateInfo_TTL() { + updateTTL := 5 * time.Minute + require.NoError(s.T(), s.cache.SetUpdateInfo(s.ctx, "v1.2.3", updateTTL)) + + ttl, err := s.rdb.TTL(s.ctx, updateCacheKey).Result() + require.NoError(s.T(), err, "TTL updateCacheKey") + s.AssertTTLWithin(ttl, 1*time.Second, updateTTL) +} + +func (s *UpdateCacheSuite) TestSetUpdateInfo_Overwrite() { + require.NoError(s.T(), s.cache.SetUpdateInfo(s.ctx, "v1.0.0", 5*time.Minute)) + require.NoError(s.T(), s.cache.SetUpdateInfo(s.ctx, "v2.0.0", 5*time.Minute)) + + info, err := s.cache.GetUpdateInfo(s.ctx) + require.NoError(s.T(), err) + require.Equal(s.T(), "v2.0.0", info, "expected overwritten value") +} + +func (s *UpdateCacheSuite) TestSetUpdateInfo_ZeroTTL() { + // TTL=0 means persist forever (no expiry) in Redis SET command + require.NoError(s.T(), s.cache.SetUpdateInfo(s.ctx, "v0.0.0", 0)) + + info, err := s.cache.GetUpdateInfo(s.ctx) + require.NoError(s.T(), err) + require.Equal(s.T(), "v0.0.0", info) + + ttl, err := s.rdb.TTL(s.ctx, updateCacheKey).Result() + require.NoError(s.T(), err) + // TTL=-1 means no expiry, TTL=-2 means key doesn't exist + require.Equal(s.T(), time.Duration(-1), ttl, "expected TTL=-1 for key with no expiry") +} + +func TestUpdateCacheSuite(t *testing.T) { + suite.Run(t, new(UpdateCacheSuite)) +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go new file mode 100644 index 00000000..4a2aaade --- /dev/null +++ b/backend/internal/repository/usage_log_repo.go @@ -0,0 +1,2271 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "fmt" + "os" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + dbaccount "github.com/Wei-Shaw/sub2api/ent/account" + dbapikey "github.com/Wei-Shaw/sub2api/ent/apikey" + dbgroup "github.com/Wei-Shaw/sub2api/ent/group" + dbuser "github.com/Wei-Shaw/sub2api/ent/user" + dbusersub "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at" + +type usageLogRepository struct { + client *dbent.Client + sql sqlExecutor +} + +func NewUsageLogRepository(client *dbent.Client, sqlDB *sql.DB) service.UsageLogRepository { + return newUsageLogRepositoryWithSQL(client, sqlDB) +} + +func newUsageLogRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *usageLogRepository { + // 使用 scanSingleRow 替代 QueryRowContext,保证 ent.Tx 作为 sqlExecutor 可用。 + return &usageLogRepository{client: client, sql: sqlq} +} + +// getPerformanceStats 获取 RPM 和 TPM(近5分钟平均值,可选按用户过滤) +func (r *usageLogRepository) getPerformanceStats(ctx context.Context, userID int64) (rpm, tpm int64, err error) { + fiveMinutesAgo := time.Now().Add(-5 * time.Minute) + query := ` + SELECT + COUNT(*) as request_count, + COALESCE(SUM(input_tokens + output_tokens), 0) as token_count + FROM usage_logs + WHERE created_at >= $1` + args := []any{fiveMinutesAgo} + if userID > 0 { + query += " AND user_id = $2" + args = append(args, userID) + } + + var requestCount int64 + var tokenCount int64 + if err := scanSingleRow(ctx, r.sql, query, args, &requestCount, &tokenCount); err != nil { + return 0, 0, err + } + return requestCount / 5, tokenCount / 5, nil +} + +func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) (bool, error) { + if log == nil { + return false, nil + } + + // 在事务上下文中,使用 tx 绑定的 ExecQuerier 执行原生 SQL,保证与其他更新同事务。 + // 无事务时回退到默认的 *sql.DB 执行器。 + sqlq := r.sql + if tx := dbent.TxFromContext(ctx); tx != nil { + sqlq = tx.Client() + } + + createdAt := log.CreatedAt + if createdAt.IsZero() { + createdAt = time.Now() + } + + requestID := strings.TrimSpace(log.RequestID) + log.RequestID = requestID + + rateMultiplier := log.RateMultiplier + + query := ` + INSERT INTO usage_logs ( + user_id, + api_key_id, + account_id, + request_id, + model, + group_id, + subscription_id, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + cache_creation_5m_tokens, + cache_creation_1h_tokens, + input_cost, + output_cost, + cache_creation_cost, + cache_read_cost, + total_cost, + actual_cost, + rate_multiplier, + account_rate_multiplier, + billing_type, + stream, + duration_ms, + first_token_ms, + user_agent, + ip_address, + image_count, + image_size, + created_at + ) VALUES ( + $1, $2, $3, $4, $5, + $6, $7, + $8, $9, $10, $11, + $12, $13, + $14, $15, $16, $17, $18, $19, + $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30 + ) + ON CONFLICT (request_id, api_key_id) DO NOTHING + RETURNING id, created_at + ` + + groupID := nullInt64(log.GroupID) + subscriptionID := nullInt64(log.SubscriptionID) + duration := nullInt(log.DurationMs) + firstToken := nullInt(log.FirstTokenMs) + userAgent := nullString(log.UserAgent) + ipAddress := nullString(log.IPAddress) + imageSize := nullString(log.ImageSize) + + var requestIDArg any + if requestID != "" { + requestIDArg = requestID + } + + args := []any{ + log.UserID, + log.APIKeyID, + log.AccountID, + requestIDArg, + log.Model, + groupID, + subscriptionID, + log.InputTokens, + log.OutputTokens, + log.CacheCreationTokens, + log.CacheReadTokens, + log.CacheCreation5mTokens, + log.CacheCreation1hTokens, + log.InputCost, + log.OutputCost, + log.CacheCreationCost, + log.CacheReadCost, + log.TotalCost, + log.ActualCost, + rateMultiplier, + log.AccountRateMultiplier, + log.BillingType, + log.Stream, + duration, + firstToken, + userAgent, + ipAddress, + log.ImageCount, + imageSize, + createdAt, + } + if err := scanSingleRow(ctx, sqlq, query, args, &log.ID, &log.CreatedAt); err != nil { + if errors.Is(err, sql.ErrNoRows) && requestID != "" { + selectQuery := "SELECT id, created_at FROM usage_logs WHERE request_id = $1 AND api_key_id = $2" + if err := scanSingleRow(ctx, sqlq, selectQuery, []any{requestID, log.APIKeyID}, &log.ID, &log.CreatedAt); err != nil { + return false, err + } + log.RateMultiplier = rateMultiplier + return false, nil + } else { + return false, err + } + } + log.RateMultiplier = rateMultiplier + return true, nil +} + +func (r *usageLogRepository) GetByID(ctx context.Context, id int64) (log *service.UsageLog, err error) { + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE id = $1" + rows, err := r.sql.QueryContext(ctx, query, id) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + log = nil + } + }() + if !rows.Next() { + if err = rows.Err(); err != nil { + return nil, err + } + return nil, service.ErrUsageLogNotFound + } + log, err = scanUsageLog(rows) + if err != nil { + return nil, err + } + if err = rows.Err(); err != nil { + return nil, err + } + return log, nil +} + +func (r *usageLogRepository) ListByUser(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + return r.listUsageLogsWithPagination(ctx, "WHERE user_id = $1", []any{userID}, params) +} + +func (r *usageLogRepository) ListByAPIKey(ctx context.Context, apiKeyID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + return r.listUsageLogsWithPagination(ctx, "WHERE api_key_id = $1", []any{apiKeyID}, params) +} + +// UserStats 用户使用统计 +type UserStats struct { + TotalRequests int64 `json:"total_requests"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` + InputTokens int64 `json:"input_tokens"` + OutputTokens int64 `json:"output_tokens"` + CacheReadTokens int64 `json:"cache_read_tokens"` +} + +func (r *usageLogRepository) GetUserStats(ctx context.Context, userID int64, startTime, endTime time.Time) (*UserStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, + COALESCE(SUM(actual_cost), 0) as total_cost, + COALESCE(SUM(input_tokens), 0) as input_tokens, + COALESCE(SUM(output_tokens), 0) as output_tokens, + COALESCE(SUM(cache_read_tokens), 0) as cache_read_tokens + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + ` + + stats := &UserStats{} + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{userID, startTime, endTime}, + &stats.TotalRequests, + &stats.TotalTokens, + &stats.TotalCost, + &stats.InputTokens, + &stats.OutputTokens, + &stats.CacheReadTokens, + ); err != nil { + return nil, err + } + return stats, nil +} + +// DashboardStats 仪表盘统计 +type DashboardStats = usagestats.DashboardStats + +func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardStats, error) { + stats := &DashboardStats{} + now := timezone.Now() + todayStart := timezone.Today() + + if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil { + return nil, err + } + if err := r.fillDashboardUsageStatsAggregated(ctx, stats, todayStart, now); err != nil { + return nil, err + } + + rpm, tpm, err := r.getPerformanceStats(ctx, 0) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm + + return stats, nil +} + +func (r *usageLogRepository) GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*DashboardStats, error) { + startUTC := start.UTC() + endUTC := end.UTC() + if !endUTC.After(startUTC) { + return nil, errors.New("统计时间范围无效") + } + + stats := &DashboardStats{} + now := timezone.Now() + todayStart := timezone.Today() + + if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil { + return nil, err + } + if err := r.fillDashboardUsageStatsFromUsageLogs(ctx, stats, startUTC, endUTC, todayStart, now); err != nil { + return nil, err + } + + rpm, tpm, err := r.getPerformanceStats(ctx, 0) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm + + return stats, nil +} + +func (r *usageLogRepository) fillDashboardEntityStats(ctx context.Context, stats *DashboardStats, todayUTC, now time.Time) error { + userStatsQuery := ` + SELECT + COUNT(*) as total_users, + COUNT(CASE WHEN created_at >= $1 THEN 1 END) as today_new_users + FROM users + WHERE deleted_at IS NULL + ` + if err := scanSingleRow( + ctx, + r.sql, + userStatsQuery, + []any{todayUTC}, + &stats.TotalUsers, + &stats.TodayNewUsers, + ); err != nil { + return err + } + + apiKeyStatsQuery := ` + SELECT + COUNT(*) as total_api_keys, + COUNT(CASE WHEN status = $1 THEN 1 END) as active_api_keys + FROM api_keys + WHERE deleted_at IS NULL + ` + if err := scanSingleRow( + ctx, + r.sql, + apiKeyStatsQuery, + []any{service.StatusActive}, + &stats.TotalAPIKeys, + &stats.ActiveAPIKeys, + ); err != nil { + return err + } + + accountStatsQuery := ` + SELECT + COUNT(*) as total_accounts, + COUNT(CASE WHEN status = $1 AND schedulable = true THEN 1 END) as normal_accounts, + COUNT(CASE WHEN status = $2 THEN 1 END) as error_accounts, + COUNT(CASE WHEN rate_limited_at IS NOT NULL AND rate_limit_reset_at > $3 THEN 1 END) as ratelimit_accounts, + COUNT(CASE WHEN overload_until IS NOT NULL AND overload_until > $4 THEN 1 END) as overload_accounts + FROM accounts + WHERE deleted_at IS NULL + ` + if err := scanSingleRow( + ctx, + r.sql, + accountStatsQuery, + []any{service.StatusActive, service.StatusError, now, now}, + &stats.TotalAccounts, + &stats.NormalAccounts, + &stats.ErrorAccounts, + &stats.RateLimitAccounts, + &stats.OverloadAccounts, + ); err != nil { + return err + } + + return nil +} + +func (r *usageLogRepository) fillDashboardUsageStatsAggregated(ctx context.Context, stats *DashboardStats, todayUTC, now time.Time) error { + totalStatsQuery := ` + SELECT + COALESCE(SUM(total_requests), 0) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(SUM(total_duration_ms), 0) as total_duration_ms + FROM usage_dashboard_daily + ` + var totalDurationMs int64 + if err := scanSingleRow( + ctx, + r.sql, + totalStatsQuery, + nil, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &totalDurationMs, + ); err != nil { + return err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + if stats.TotalRequests > 0 { + stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) + } + + todayStatsQuery := ` + SELECT + total_requests as today_requests, + input_tokens as today_input_tokens, + output_tokens as today_output_tokens, + cache_creation_tokens as today_cache_creation_tokens, + cache_read_tokens as today_cache_read_tokens, + total_cost as today_cost, + actual_cost as today_actual_cost, + active_users as active_users + FROM usage_dashboard_daily + WHERE bucket_date = $1::date + ` + if err := scanSingleRow( + ctx, + r.sql, + todayStatsQuery, + []any{todayUTC}, + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + &stats.ActiveUsers, + ); err != nil { + if err != sql.ErrNoRows { + return err + } + } + stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens + + hourlyActiveQuery := ` + SELECT active_users + FROM usage_dashboard_hourly + WHERE bucket_start = $1 + ` + hourStart := now.In(timezone.Location()).Truncate(time.Hour) + if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart}, &stats.HourlyActiveUsers); err != nil { + if err != sql.ErrNoRows { + return err + } + } + + return nil +} + +func (r *usageLogRepository) fillDashboardUsageStatsFromUsageLogs(ctx context.Context, stats *DashboardStats, startUTC, endUTC, todayUTC, now time.Time) error { + totalStatsQuery := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(SUM(COALESCE(duration_ms, 0)), 0) as total_duration_ms + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + var totalDurationMs int64 + if err := scanSingleRow( + ctx, + r.sql, + totalStatsQuery, + []any{startUTC, endUTC}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &totalDurationMs, + ); err != nil { + return err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + if stats.TotalRequests > 0 { + stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) + } + + todayEnd := todayUTC.Add(24 * time.Hour) + todayStatsQuery := ` + SELECT + COUNT(*) as today_requests, + COALESCE(SUM(input_tokens), 0) as today_input_tokens, + COALESCE(SUM(output_tokens), 0) as today_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as today_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as today_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as today_cost, + COALESCE(SUM(actual_cost), 0) as today_actual_cost + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + if err := scanSingleRow( + ctx, + r.sql, + todayStatsQuery, + []any{todayUTC, todayEnd}, + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + ); err != nil { + return err + } + stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens + + activeUsersQuery := ` + SELECT COUNT(DISTINCT user_id) as active_users + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + if err := scanSingleRow(ctx, r.sql, activeUsersQuery, []any{todayUTC, todayEnd}, &stats.ActiveUsers); err != nil { + return err + } + + hourStart := now.UTC().Truncate(time.Hour) + hourEnd := hourStart.Add(time.Hour) + hourlyActiveQuery := ` + SELECT COUNT(DISTINCT user_id) as active_users + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart, hourEnd}, &stats.HourlyActiveUsers); err != nil { + return err + } + + return nil +} + +func (r *usageLogRepository) ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + return r.listUsageLogsWithPagination(ctx, "WHERE account_id = $1", []any{accountID}, params) +} + +func (r *usageLogRepository) ListByUserAndTimeRange(ctx context.Context, userID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, userID, startTime, endTime) + return logs, nil, err +} + +// GetUserStatsAggregated returns aggregated usage statistics for a user using database-level aggregation +func (r *usageLogRepository) GetUserStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + ` + + var stats usagestats.UsageStats + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{userID, startTime, endTime}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil +} + +// GetAPIKeyStatsAggregated returns aggregated usage statistics for an API key using database-level aggregation +func (r *usageLogRepository) GetAPIKeyStatsAggregated(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms + FROM usage_logs + WHERE api_key_id = $1 AND created_at >= $2 AND created_at < $3 + ` + + var stats usagestats.UsageStats + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{apiKeyID, startTime, endTime}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil +} + +// GetAccountStatsAggregated 使用 SQL 聚合统计账号使用数据 +// +// 性能优化说明: +// 原实现先查询所有日志记录,再在应用层循环计算统计值: +// 1. 需要传输大量数据到应用层 +// 2. 应用层循环计算增加 CPU 和内存开销 +// +// 新实现使用 SQL 聚合函数: +// 1. 在数据库层完成 COUNT/SUM/AVG 计算 +// 2. 只返回单行聚合结果,大幅减少数据传输量 +// 3. 利用数据库索引优化聚合查询性能 +func (r *usageLogRepository) GetAccountStatsAggregated(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 + ` + + var stats usagestats.UsageStats + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{accountID, startTime, endTime}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil +} + +// GetModelStatsAggregated 使用 SQL 聚合统计模型使用数据 +// 性能优化:数据库层聚合计算,避免应用层循环统计 +func (r *usageLogRepository) GetModelStatsAggregated(ctx context.Context, modelName string, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms + FROM usage_logs + WHERE model = $1 AND created_at >= $2 AND created_at < $3 + ` + + var stats usagestats.UsageStats + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{modelName, startTime, endTime}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return &stats, nil +} + +// GetDailyStatsAggregated 使用 SQL 聚合统计用户的每日使用数据 +// 性能优化:使用 GROUP BY 在数据库层按日期分组聚合,避免应用层循环分组统计 +func (r *usageLogRepository) GetDailyStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) (result []map[string]any, err error) { + tzName := resolveUsageStatsTimezone() + query := ` + SELECT + -- 使用应用时区分组,避免数据库会话时区导致日边界偏移。 + TO_CHAR(created_at AT TIME ZONE $4, 'YYYY-MM-DD') as date, + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(COALESCE(duration_ms, 0)), 0) as avg_duration_ms + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY 1 + ORDER BY 1 + ` + + rows, err := r.sql.QueryContext(ctx, query, userID, startTime, endTime, tzName) + if err != nil { + return nil, err + } + defer func() { + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + result = nil + } + }() + + result = make([]map[string]any, 0) + for rows.Next() { + var ( + date string + totalRequests int64 + totalInputTokens int64 + totalOutputTokens int64 + totalCacheTokens int64 + totalCost float64 + totalActualCost float64 + avgDurationMs float64 + ) + if err = rows.Scan( + &date, + &totalRequests, + &totalInputTokens, + &totalOutputTokens, + &totalCacheTokens, + &totalCost, + &totalActualCost, + &avgDurationMs, + ); err != nil { + return nil, err + } + result = append(result, map[string]any{ + "date": date, + "total_requests": totalRequests, + "total_input_tokens": totalInputTokens, + "total_output_tokens": totalOutputTokens, + "total_cache_tokens": totalCacheTokens, + "total_tokens": totalInputTokens + totalOutputTokens + totalCacheTokens, + "total_cost": totalCost, + "total_actual_cost": totalActualCost, + "average_duration_ms": avgDurationMs, + }) + } + + if err = rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +// resolveUsageStatsTimezone 获取用于 SQL 分组的时区名称。 +// 优先使用应用初始化的时区,其次尝试读取 TZ 环境变量,最后回落为 UTC。 +func resolveUsageStatsTimezone() string { + tzName := timezone.Name() + if tzName != "" && tzName != "Local" { + return tzName + } + if envTZ := strings.TrimSpace(os.Getenv("TZ")); envTZ != "" { + return envTZ + } + return "UTC" +} + +func (r *usageLogRepository) ListByAPIKeyAndTimeRange(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE api_key_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, apiKeyID, startTime, endTime) + return logs, nil, err +} + +func (r *usageLogRepository) ListByAccountAndTimeRange(ctx context.Context, accountID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, accountID, startTime, endTime) + return logs, nil, err +} + +func (r *usageLogRepository) ListByModelAndTimeRange(ctx context.Context, modelName string, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + query := "SELECT " + usageLogSelectColumns + " FROM usage_logs WHERE model = $1 AND created_at >= $2 AND created_at < $3 ORDER BY id DESC" + logs, err := r.queryUsageLogs(ctx, query, modelName, startTime, endTime) + return logs, nil, err +} + +func (r *usageLogRepository) Delete(ctx context.Context, id int64) error { + _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_logs WHERE id = $1", id) + return err +} + +// GetAccountTodayStats 获取账号今日统计 +func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID int64) (*usagestats.AccountStats, error) { + today := timezone.Today() + + query := ` + SELECT + COUNT(*) as requests, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost, + COALESCE(SUM(total_cost), 0) as standard_cost, + COALESCE(SUM(actual_cost), 0) as user_cost + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 + ` + + stats := &usagestats.AccountStats{} + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{accountID, today}, + &stats.Requests, + &stats.Tokens, + &stats.Cost, + &stats.StandardCost, + &stats.UserCost, + ); err != nil { + return nil, err + } + return stats, nil +} + +// GetAccountWindowStats 获取账号时间窗口内的统计 +func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountID int64, startTime time.Time) (*usagestats.AccountStats, error) { + query := ` + SELECT + COUNT(*) as requests, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost, + COALESCE(SUM(total_cost), 0) as standard_cost, + COALESCE(SUM(actual_cost), 0) as user_cost + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 + ` + + stats := &usagestats.AccountStats{} + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{accountID, startTime}, + &stats.Requests, + &stats.Tokens, + &stats.Cost, + &stats.StandardCost, + &stats.UserCost, + ); err != nil { + return nil, err + } + return stats, nil +} + +// TrendDataPoint represents a single point in trend data +type TrendDataPoint = usagestats.TrendDataPoint + +// ModelStat represents usage statistics for a single model +type ModelStat = usagestats.ModelStat + +// UserUsageTrendPoint represents user usage trend data point +type UserUsageTrendPoint = usagestats.UserUsageTrendPoint + +// APIKeyUsageTrendPoint represents API key usage trend data point +type APIKeyUsageTrendPoint = usagestats.APIKeyUsageTrendPoint + +// GetAPIKeyUsageTrend returns usage trend data grouped by API key and date +func (r *usageLogRepository) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) (results []APIKeyUsageTrendPoint, err error) { + dateFormat := "YYYY-MM-DD" + if granularity == "hour" { + dateFormat = "YYYY-MM-DD HH24:00" + } + + query := fmt.Sprintf(` + WITH top_keys AS ( + SELECT api_key_id + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + GROUP BY api_key_id + ORDER BY SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) DESC + LIMIT $3 + ) + SELECT + TO_CHAR(u.created_at, '%s') as date, + u.api_key_id, + COALESCE(k.name, '') as key_name, + COUNT(*) as requests, + COALESCE(SUM(u.input_tokens + u.output_tokens + u.cache_creation_tokens + u.cache_read_tokens), 0) as tokens + FROM usage_logs u + LEFT JOIN api_keys k ON u.api_key_id = k.id + WHERE u.api_key_id IN (SELECT api_key_id FROM top_keys) + AND u.created_at >= $4 AND u.created_at < $5 + GROUP BY date, u.api_key_id, k.name + ORDER BY date ASC, tokens DESC + `, dateFormat) + + rows, err := r.sql.QueryContext(ctx, query, startTime, endTime, limit, startTime, endTime) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results = make([]APIKeyUsageTrendPoint, 0) + for rows.Next() { + var row APIKeyUsageTrendPoint + if err = rows.Scan(&row.Date, &row.APIKeyID, &row.KeyName, &row.Requests, &row.Tokens); err != nil { + return nil, err + } + results = append(results, row) + } + if err = rows.Err(); err != nil { + return nil, err + } + + return results, nil +} + +// GetUserUsageTrend returns usage trend data grouped by user and date +func (r *usageLogRepository) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) (results []UserUsageTrendPoint, err error) { + dateFormat := "YYYY-MM-DD" + if granularity == "hour" { + dateFormat = "YYYY-MM-DD HH24:00" + } + + query := fmt.Sprintf(` + WITH top_users AS ( + SELECT user_id + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + GROUP BY user_id + ORDER BY SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) DESC + LIMIT $3 + ) + SELECT + TO_CHAR(u.created_at, '%s') as date, + u.user_id, + COALESCE(us.email, '') as email, + COUNT(*) as requests, + COALESCE(SUM(u.input_tokens + u.output_tokens + u.cache_creation_tokens + u.cache_read_tokens), 0) as tokens, + COALESCE(SUM(u.total_cost), 0) as cost, + COALESCE(SUM(u.actual_cost), 0) as actual_cost + FROM usage_logs u + LEFT JOIN users us ON u.user_id = us.id + WHERE u.user_id IN (SELECT user_id FROM top_users) + AND u.created_at >= $4 AND u.created_at < $5 + GROUP BY date, u.user_id, us.email + ORDER BY date ASC, tokens DESC + `, dateFormat) + + rows, err := r.sql.QueryContext(ctx, query, startTime, endTime, limit, startTime, endTime) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results = make([]UserUsageTrendPoint, 0) + for rows.Next() { + var row UserUsageTrendPoint + if err = rows.Scan(&row.Date, &row.UserID, &row.Email, &row.Requests, &row.Tokens, &row.Cost, &row.ActualCost); err != nil { + return nil, err + } + results = append(results, row) + } + if err = rows.Err(); err != nil { + return nil, err + } + + return results, nil +} + +// UserDashboardStats 用户仪表盘统计 +type UserDashboardStats = usagestats.UserDashboardStats + +// GetUserDashboardStats 获取用户专属的仪表盘统计 +func (r *usageLogRepository) GetUserDashboardStats(ctx context.Context, userID int64) (*UserDashboardStats, error) { + stats := &UserDashboardStats{} + today := timezone.Today() + + // API Key 统计 + if err := scanSingleRow( + ctx, + r.sql, + "SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND deleted_at IS NULL", + []any{userID}, + &stats.TotalAPIKeys, + ); err != nil { + return nil, err + } + if err := scanSingleRow( + ctx, + r.sql, + "SELECT COUNT(*) FROM api_keys WHERE user_id = $1 AND status = $2 AND deleted_at IS NULL", + []any{userID, service.StatusActive}, + &stats.ActiveAPIKeys, + ); err != nil { + return nil, err + } + + // 累计 Token 统计 + totalStatsQuery := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(duration_ms), 0) as avg_duration_ms + FROM usage_logs + WHERE user_id = $1 + ` + if err := scanSingleRow( + ctx, + r.sql, + totalStatsQuery, + []any{userID}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + + // 今日 Token 统计 + todayStatsQuery := ` + SELECT + COUNT(*) as today_requests, + COALESCE(SUM(input_tokens), 0) as today_input_tokens, + COALESCE(SUM(output_tokens), 0) as today_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as today_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as today_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as today_cost, + COALESCE(SUM(actual_cost), 0) as today_actual_cost + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 + ` + if err := scanSingleRow( + ctx, + r.sql, + todayStatsQuery, + []any{userID, today}, + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + ); err != nil { + return nil, err + } + stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens + + // 性能指标:RPM 和 TPM(最近1分钟,仅统计该用户的请求) + rpm, tpm, err := r.getPerformanceStats(ctx, userID) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm + + return stats, nil +} + +// GetUserUsageTrendByUserID 获取指定用户的使用趋势 +func (r *usageLogRepository) GetUserUsageTrendByUserID(ctx context.Context, userID int64, startTime, endTime time.Time, granularity string) (results []TrendDataPoint, err error) { + dateFormat := "YYYY-MM-DD" + if granularity == "hour" { + dateFormat = "YYYY-MM-DD HH24:00" + } + + query := fmt.Sprintf(` + SELECT + TO_CHAR(created_at, '%s') as date, + COUNT(*) as requests, + COALESCE(SUM(input_tokens), 0) as input_tokens, + COALESCE(SUM(output_tokens), 0) as output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as cache_tokens, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, + COALESCE(SUM(total_cost), 0) as cost, + COALESCE(SUM(actual_cost), 0) as actual_cost + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY date + ORDER BY date ASC + `, dateFormat) + + rows, err := r.sql.QueryContext(ctx, query, userID, startTime, endTime) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results, err = scanTrendRows(rows) + if err != nil { + return nil, err + } + return results, nil +} + +// GetUserModelStats 获取指定用户的模型统计 +func (r *usageLogRepository) GetUserModelStats(ctx context.Context, userID int64, startTime, endTime time.Time) (results []ModelStat, err error) { + query := ` + SELECT + model, + COUNT(*) as requests, + COALESCE(SUM(input_tokens), 0) as input_tokens, + COALESCE(SUM(output_tokens), 0) as output_tokens, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, + COALESCE(SUM(total_cost), 0) as cost, + COALESCE(SUM(actual_cost), 0) as actual_cost + FROM usage_logs + WHERE user_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY model + ORDER BY total_tokens DESC + ` + + rows, err := r.sql.QueryContext(ctx, query, userID, startTime, endTime) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results, err = scanModelStatsRows(rows) + if err != nil { + return nil, err + } + return results, nil +} + +// UsageLogFilters represents filters for usage log queries +type UsageLogFilters = usagestats.UsageLogFilters + +// ListWithFilters lists usage logs with optional filters (for admin) +func (r *usageLogRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) { + conditions := make([]string, 0, 8) + args := make([]any, 0, 8) + + if filters.UserID > 0 { + conditions = append(conditions, fmt.Sprintf("user_id = $%d", len(args)+1)) + args = append(args, filters.UserID) + } + if filters.APIKeyID > 0 { + conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", len(args)+1)) + args = append(args, filters.APIKeyID) + } + if filters.AccountID > 0 { + conditions = append(conditions, fmt.Sprintf("account_id = $%d", len(args)+1)) + args = append(args, filters.AccountID) + } + if filters.GroupID > 0 { + conditions = append(conditions, fmt.Sprintf("group_id = $%d", len(args)+1)) + args = append(args, filters.GroupID) + } + if filters.Model != "" { + conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) + args = append(args, filters.Model) + } + if filters.Stream != nil { + conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) + args = append(args, *filters.Stream) + } + if filters.BillingType != nil { + conditions = append(conditions, fmt.Sprintf("billing_type = $%d", len(args)+1)) + args = append(args, int16(*filters.BillingType)) + } + if filters.StartTime != nil { + conditions = append(conditions, fmt.Sprintf("created_at >= $%d", len(args)+1)) + args = append(args, *filters.StartTime) + } + if filters.EndTime != nil { + conditions = append(conditions, fmt.Sprintf("created_at <= $%d", len(args)+1)) + args = append(args, *filters.EndTime) + } + + whereClause := buildWhere(conditions) + logs, page, err := r.listUsageLogsWithPagination(ctx, whereClause, args, params) + if err != nil { + return nil, nil, err + } + + if err := r.hydrateUsageLogAssociations(ctx, logs); err != nil { + return nil, nil, err + } + return logs, page, nil +} + +// UsageStats represents usage statistics +type UsageStats = usagestats.UsageStats + +// BatchUserUsageStats represents usage stats for a single user +type BatchUserUsageStats = usagestats.BatchUserUsageStats + +// GetBatchUserUsageStats gets today and total actual_cost for multiple users +func (r *usageLogRepository) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*BatchUserUsageStats, error) { + result := make(map[int64]*BatchUserUsageStats) + if len(userIDs) == 0 { + return result, nil + } + + for _, id := range userIDs { + result[id] = &BatchUserUsageStats{UserID: id} + } + + query := ` + SELECT user_id, COALESCE(SUM(actual_cost), 0) as total_cost + FROM usage_logs + WHERE user_id = ANY($1) + GROUP BY user_id + ` + rows, err := r.sql.QueryContext(ctx, query, pq.Array(userIDs)) + if err != nil { + return nil, err + } + for rows.Next() { + var userID int64 + var total float64 + if err := rows.Scan(&userID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[userID]; ok { + stats.TotalActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + today := timezone.Today() + todayQuery := ` + SELECT user_id, COALESCE(SUM(actual_cost), 0) as today_cost + FROM usage_logs + WHERE user_id = ANY($1) AND created_at >= $2 + GROUP BY user_id + ` + rows, err = r.sql.QueryContext(ctx, todayQuery, pq.Array(userIDs), today) + if err != nil { + return nil, err + } + for rows.Next() { + var userID int64 + var total float64 + if err := rows.Scan(&userID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[userID]; ok { + stats.TodayActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +// BatchAPIKeyUsageStats represents usage stats for a single API key +type BatchAPIKeyUsageStats = usagestats.BatchAPIKeyUsageStats + +// GetBatchAPIKeyUsageStats gets today and total actual_cost for multiple API keys +func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*BatchAPIKeyUsageStats, error) { + result := make(map[int64]*BatchAPIKeyUsageStats) + if len(apiKeyIDs) == 0 { + return result, nil + } + + for _, id := range apiKeyIDs { + result[id] = &BatchAPIKeyUsageStats{APIKeyID: id} + } + + query := ` + SELECT api_key_id, COALESCE(SUM(actual_cost), 0) as total_cost + FROM usage_logs + WHERE api_key_id = ANY($1) + GROUP BY api_key_id + ` + rows, err := r.sql.QueryContext(ctx, query, pq.Array(apiKeyIDs)) + if err != nil { + return nil, err + } + for rows.Next() { + var apiKeyID int64 + var total float64 + if err := rows.Scan(&apiKeyID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[apiKeyID]; ok { + stats.TotalActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + today := timezone.Today() + todayQuery := ` + SELECT api_key_id, COALESCE(SUM(actual_cost), 0) as today_cost + FROM usage_logs + WHERE api_key_id = ANY($1) AND created_at >= $2 + GROUP BY api_key_id + ` + rows, err = r.sql.QueryContext(ctx, todayQuery, pq.Array(apiKeyIDs), today) + if err != nil { + return nil, err + } + for rows.Next() { + var apiKeyID int64 + var total float64 + if err := rows.Scan(&apiKeyID, &total); err != nil { + _ = rows.Close() + return nil, err + } + if stats, ok := result[apiKeyID]; ok { + stats.TodayActualCost = total + } + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +// GetUsageTrendWithFilters returns usage trend data with optional filters +func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) { + dateFormat := "YYYY-MM-DD" + if granularity == "hour" { + dateFormat = "YYYY-MM-DD HH24:00" + } + + query := fmt.Sprintf(` + SELECT + TO_CHAR(created_at, '%s') as date, + COUNT(*) as requests, + COALESCE(SUM(input_tokens), 0) as input_tokens, + COALESCE(SUM(output_tokens), 0) as output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as cache_tokens, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, + COALESCE(SUM(total_cost), 0) as cost, + COALESCE(SUM(actual_cost), 0) as actual_cost + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + `, dateFormat) + + args := []any{startTime, endTime} + if userID > 0 { + query += fmt.Sprintf(" AND user_id = $%d", len(args)+1) + args = append(args, userID) + } + if apiKeyID > 0 { + query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1) + args = append(args, apiKeyID) + } + if accountID > 0 { + query += fmt.Sprintf(" AND account_id = $%d", len(args)+1) + args = append(args, accountID) + } + if groupID > 0 { + query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) + args = append(args, groupID) + } + if model != "" { + query += fmt.Sprintf(" AND model = $%d", len(args)+1) + args = append(args, model) + } + if stream != nil { + query += fmt.Sprintf(" AND stream = $%d", len(args)+1) + args = append(args, *stream) + } + query += " GROUP BY date ORDER BY date ASC" + + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results, err = scanTrendRows(rows) + if err != nil { + return nil, err + } + return results, nil +} + +// GetModelStatsWithFilters returns model statistics with optional filters +func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) { + actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost" + // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。 + if accountID > 0 && userID == 0 && apiKeyID == 0 { + actualCostExpr = "COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost" + } + + query := fmt.Sprintf(` + SELECT + model, + COUNT(*) as requests, + COALESCE(SUM(input_tokens), 0) as input_tokens, + COALESCE(SUM(output_tokens), 0) as output_tokens, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, + COALESCE(SUM(total_cost), 0) as cost, + %s + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + `, actualCostExpr) + + args := []any{startTime, endTime} + if userID > 0 { + query += fmt.Sprintf(" AND user_id = $%d", len(args)+1) + args = append(args, userID) + } + if apiKeyID > 0 { + query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1) + args = append(args, apiKeyID) + } + if accountID > 0 { + query += fmt.Sprintf(" AND account_id = $%d", len(args)+1) + args = append(args, accountID) + } + if groupID > 0 { + query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) + args = append(args, groupID) + } + if stream != nil { + query += fmt.Sprintf(" AND stream = $%d", len(args)+1) + args = append(args, *stream) + } + query += " GROUP BY model ORDER BY total_tokens DESC" + + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + results = nil + } + }() + + results, err = scanModelStatsRows(rows) + if err != nil { + return nil, err + } + return results, nil +} + +// GetGlobalStats gets usage statistics for all users within a time range +func (r *usageLogRepository) GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*UsageStats, error) { + query := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(AVG(duration_ms), 0) as avg_duration_ms + FROM usage_logs + WHERE created_at >= $1 AND created_at <= $2 + ` + + stats := &UsageStats{} + if err := scanSingleRow( + ctx, + r.sql, + query, + []any{startTime, endTime}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return stats, nil +} + +// GetStatsWithFilters gets usage statistics with optional filters +func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters UsageLogFilters) (*UsageStats, error) { + conditions := make([]string, 0, 9) + args := make([]any, 0, 9) + + if filters.UserID > 0 { + conditions = append(conditions, fmt.Sprintf("user_id = $%d", len(args)+1)) + args = append(args, filters.UserID) + } + if filters.APIKeyID > 0 { + conditions = append(conditions, fmt.Sprintf("api_key_id = $%d", len(args)+1)) + args = append(args, filters.APIKeyID) + } + if filters.AccountID > 0 { + conditions = append(conditions, fmt.Sprintf("account_id = $%d", len(args)+1)) + args = append(args, filters.AccountID) + } + if filters.GroupID > 0 { + conditions = append(conditions, fmt.Sprintf("group_id = $%d", len(args)+1)) + args = append(args, filters.GroupID) + } + if filters.Model != "" { + conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) + args = append(args, filters.Model) + } + if filters.Stream != nil { + conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) + args = append(args, *filters.Stream) + } + if filters.BillingType != nil { + conditions = append(conditions, fmt.Sprintf("billing_type = $%d", len(args)+1)) + args = append(args, int16(*filters.BillingType)) + } + if filters.StartTime != nil { + conditions = append(conditions, fmt.Sprintf("created_at >= $%d", len(args)+1)) + args = append(args, *filters.StartTime) + } + if filters.EndTime != nil { + conditions = append(conditions, fmt.Sprintf("created_at <= $%d", len(args)+1)) + args = append(args, *filters.EndTime) + } + + query := fmt.Sprintf(` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as total_account_cost, + COALESCE(AVG(duration_ms), 0) as avg_duration_ms + FROM usage_logs + %s + `, buildWhere(conditions)) + + stats := &UsageStats{} + var totalAccountCost float64 + if err := scanSingleRow( + ctx, + r.sql, + query, + args, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &totalAccountCost, + &stats.AverageDurationMs, + ); err != nil { + return nil, err + } + if filters.AccountID > 0 { + stats.TotalAccountCost = &totalAccountCost + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens + return stats, nil +} + +// AccountUsageHistory represents daily usage history for an account +type AccountUsageHistory = usagestats.AccountUsageHistory + +// AccountUsageSummary represents summary statistics for an account +type AccountUsageSummary = usagestats.AccountUsageSummary + +// AccountUsageStatsResponse represents the full usage statistics response for an account +type AccountUsageStatsResponse = usagestats.AccountUsageStatsResponse + +// GetAccountUsageStats returns comprehensive usage statistics for an account over a time range +func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID int64, startTime, endTime time.Time) (resp *AccountUsageStatsResponse, err error) { + daysCount := int(endTime.Sub(startTime).Hours()/24) + 1 + if daysCount <= 0 { + daysCount = 30 + } + + query := ` + SELECT + TO_CHAR(created_at, 'YYYY-MM-DD') as date, + COUNT(*) as requests, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, + COALESCE(SUM(total_cost), 0) as cost, + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost, + COALESCE(SUM(actual_cost), 0) as user_cost + FROM usage_logs + WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 + GROUP BY date + ORDER BY date ASC + ` + + rows, err := r.sql.QueryContext(ctx, query, accountID, startTime, endTime) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + resp = nil + } + }() + + history := make([]AccountUsageHistory, 0) + for rows.Next() { + var date string + var requests int64 + var tokens int64 + var cost float64 + var actualCost float64 + var userCost float64 + if err = rows.Scan(&date, &requests, &tokens, &cost, &actualCost, &userCost); err != nil { + return nil, err + } + t, _ := time.Parse("2006-01-02", date) + history = append(history, AccountUsageHistory{ + Date: date, + Label: t.Format("01/02"), + Requests: requests, + Tokens: tokens, + Cost: cost, + ActualCost: actualCost, + UserCost: userCost, + }) + } + if err = rows.Err(); err != nil { + return nil, err + } + + var totalAccountCost, totalUserCost, totalStandardCost float64 + var totalRequests, totalTokens int64 + var highestCostDay, highestRequestDay *AccountUsageHistory + + for i := range history { + h := &history[i] + totalAccountCost += h.ActualCost + totalUserCost += h.UserCost + totalStandardCost += h.Cost + totalRequests += h.Requests + totalTokens += h.Tokens + + if highestCostDay == nil || h.ActualCost > highestCostDay.ActualCost { + highestCostDay = h + } + if highestRequestDay == nil || h.Requests > highestRequestDay.Requests { + highestRequestDay = h + } + } + + actualDaysUsed := len(history) + if actualDaysUsed == 0 { + actualDaysUsed = 1 + } + + avgQuery := "SELECT COALESCE(AVG(duration_ms), 0) as avg_duration_ms FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3" + var avgDuration float64 + if err := scanSingleRow(ctx, r.sql, avgQuery, []any{accountID, startTime, endTime}, &avgDuration); err != nil { + return nil, err + } + + summary := AccountUsageSummary{ + Days: daysCount, + ActualDaysUsed: actualDaysUsed, + TotalCost: totalAccountCost, + TotalUserCost: totalUserCost, + TotalStandardCost: totalStandardCost, + TotalRequests: totalRequests, + TotalTokens: totalTokens, + AvgDailyCost: totalAccountCost / float64(actualDaysUsed), + AvgDailyUserCost: totalUserCost / float64(actualDaysUsed), + AvgDailyRequests: float64(totalRequests) / float64(actualDaysUsed), + AvgDailyTokens: float64(totalTokens) / float64(actualDaysUsed), + AvgDurationMs: avgDuration, + } + + todayStr := timezone.Now().Format("2006-01-02") + for i := range history { + if history[i].Date == todayStr { + summary.Today = &struct { + Date string `json:"date"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + }{ + Date: history[i].Date, + Cost: history[i].ActualCost, + UserCost: history[i].UserCost, + Requests: history[i].Requests, + Tokens: history[i].Tokens, + } + break + } + } + + if highestCostDay != nil { + summary.HighestCostDay = &struct { + Date string `json:"date"` + Label string `json:"label"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + Requests int64 `json:"requests"` + }{ + Date: highestCostDay.Date, + Label: highestCostDay.Label, + Cost: highestCostDay.ActualCost, + UserCost: highestCostDay.UserCost, + Requests: highestCostDay.Requests, + } + } + + if highestRequestDay != nil { + summary.HighestRequestDay = &struct { + Date string `json:"date"` + Label string `json:"label"` + Requests int64 `json:"requests"` + Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` + }{ + Date: highestRequestDay.Date, + Label: highestRequestDay.Label, + Requests: highestRequestDay.Requests, + Cost: highestRequestDay.ActualCost, + UserCost: highestRequestDay.UserCost, + } + } + + models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil) + if err != nil { + models = []ModelStat{} + } + + resp = &AccountUsageStatsResponse{ + History: history, + Summary: summary, + Models: models, + } + return resp, nil +} + +func (r *usageLogRepository) listUsageLogsWithPagination(ctx context.Context, whereClause string, args []any, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + countQuery := "SELECT COUNT(*) FROM usage_logs " + whereClause + var total int64 + if err := scanSingleRow(ctx, r.sql, countQuery, args, &total); err != nil { + return nil, nil, err + } + + limitPos := len(args) + 1 + offsetPos := len(args) + 2 + listArgs := append(append([]any{}, args...), params.Limit(), params.Offset()) + query := fmt.Sprintf("SELECT %s FROM usage_logs %s ORDER BY id DESC LIMIT $%d OFFSET $%d", usageLogSelectColumns, whereClause, limitPos, offsetPos) + logs, err := r.queryUsageLogs(ctx, query, listArgs...) + if err != nil { + return nil, nil, err + } + return logs, paginationResultFromTotal(total, params), nil +} + +func (r *usageLogRepository) queryUsageLogs(ctx context.Context, query string, args ...any) (logs []service.UsageLog, err error) { + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { + // 保持主错误优先;仅在无错误时回传 Close 失败。 + // 同时清空返回值,避免误用不完整结果。 + if closeErr := rows.Close(); closeErr != nil && err == nil { + err = closeErr + logs = nil + } + }() + + logs = make([]service.UsageLog, 0) + for rows.Next() { + var log *service.UsageLog + log, err = scanUsageLog(rows) + if err != nil { + return nil, err + } + logs = append(logs, *log) + } + if err = rows.Err(); err != nil { + return nil, err + } + return logs, nil +} + +func (r *usageLogRepository) hydrateUsageLogAssociations(ctx context.Context, logs []service.UsageLog) error { + // 关联数据使用 Ent 批量加载,避免把复杂 SQL 继续膨胀。 + if len(logs) == 0 { + return nil + } + + ids := collectUsageLogIDs(logs) + users, err := r.loadUsers(ctx, ids.userIDs) + if err != nil { + return err + } + apiKeys, err := r.loadAPIKeys(ctx, ids.apiKeyIDs) + if err != nil { + return err + } + accounts, err := r.loadAccounts(ctx, ids.accountIDs) + if err != nil { + return err + } + groups, err := r.loadGroups(ctx, ids.groupIDs) + if err != nil { + return err + } + subs, err := r.loadSubscriptions(ctx, ids.subscriptionIDs) + if err != nil { + return err + } + + for i := range logs { + if user, ok := users[logs[i].UserID]; ok { + logs[i].User = user + } + if key, ok := apiKeys[logs[i].APIKeyID]; ok { + logs[i].APIKey = key + } + if acc, ok := accounts[logs[i].AccountID]; ok { + logs[i].Account = acc + } + if logs[i].GroupID != nil { + if group, ok := groups[*logs[i].GroupID]; ok { + logs[i].Group = group + } + } + if logs[i].SubscriptionID != nil { + if sub, ok := subs[*logs[i].SubscriptionID]; ok { + logs[i].Subscription = sub + } + } + } + return nil +} + +type usageLogIDs struct { + userIDs []int64 + apiKeyIDs []int64 + accountIDs []int64 + groupIDs []int64 + subscriptionIDs []int64 +} + +func collectUsageLogIDs(logs []service.UsageLog) usageLogIDs { + idSet := func() map[int64]struct{} { return make(map[int64]struct{}) } + + userIDs := idSet() + apiKeyIDs := idSet() + accountIDs := idSet() + groupIDs := idSet() + subscriptionIDs := idSet() + + for i := range logs { + userIDs[logs[i].UserID] = struct{}{} + apiKeyIDs[logs[i].APIKeyID] = struct{}{} + accountIDs[logs[i].AccountID] = struct{}{} + if logs[i].GroupID != nil { + groupIDs[*logs[i].GroupID] = struct{}{} + } + if logs[i].SubscriptionID != nil { + subscriptionIDs[*logs[i].SubscriptionID] = struct{}{} + } + } + + return usageLogIDs{ + userIDs: setToSlice(userIDs), + apiKeyIDs: setToSlice(apiKeyIDs), + accountIDs: setToSlice(accountIDs), + groupIDs: setToSlice(groupIDs), + subscriptionIDs: setToSlice(subscriptionIDs), + } +} + +func (r *usageLogRepository) loadUsers(ctx context.Context, ids []int64) (map[int64]*service.User, error) { + out := make(map[int64]*service.User) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.User.Query().Where(dbuser.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = userEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadAPIKeys(ctx context.Context, ids []int64) (map[int64]*service.APIKey, error) { + out := make(map[int64]*service.APIKey) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.APIKey.Query().Where(dbapikey.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = apiKeyEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadAccounts(ctx context.Context, ids []int64) (map[int64]*service.Account, error) { + out := make(map[int64]*service.Account) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.Account.Query().Where(dbaccount.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = accountEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadGroups(ctx context.Context, ids []int64) (map[int64]*service.Group, error) { + out := make(map[int64]*service.Group) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.Group.Query().Where(dbgroup.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = groupEntityToService(m) + } + return out, nil +} + +func (r *usageLogRepository) loadSubscriptions(ctx context.Context, ids []int64) (map[int64]*service.UserSubscription, error) { + out := make(map[int64]*service.UserSubscription) + if len(ids) == 0 { + return out, nil + } + models, err := r.client.UserSubscription.Query().Where(dbusersub.IDIn(ids...)).All(ctx) + if err != nil { + return nil, err + } + for _, m := range models { + out[m.ID] = userSubscriptionEntityToService(m) + } + return out, nil +} + +func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, error) { + var ( + id int64 + userID int64 + apiKeyID int64 + accountID int64 + requestID sql.NullString + model string + groupID sql.NullInt64 + subscriptionID sql.NullInt64 + inputTokens int + outputTokens int + cacheCreationTokens int + cacheReadTokens int + cacheCreation5m int + cacheCreation1h int + inputCost float64 + outputCost float64 + cacheCreationCost float64 + cacheReadCost float64 + totalCost float64 + actualCost float64 + rateMultiplier float64 + accountRateMultiplier sql.NullFloat64 + billingType int16 + stream bool + durationMs sql.NullInt64 + firstTokenMs sql.NullInt64 + userAgent sql.NullString + ipAddress sql.NullString + imageCount int + imageSize sql.NullString + createdAt time.Time + ) + + if err := scanner.Scan( + &id, + &userID, + &apiKeyID, + &accountID, + &requestID, + &model, + &groupID, + &subscriptionID, + &inputTokens, + &outputTokens, + &cacheCreationTokens, + &cacheReadTokens, + &cacheCreation5m, + &cacheCreation1h, + &inputCost, + &outputCost, + &cacheCreationCost, + &cacheReadCost, + &totalCost, + &actualCost, + &rateMultiplier, + &accountRateMultiplier, + &billingType, + &stream, + &durationMs, + &firstTokenMs, + &userAgent, + &ipAddress, + &imageCount, + &imageSize, + &createdAt, + ); err != nil { + return nil, err + } + + log := &service.UsageLog{ + ID: id, + UserID: userID, + APIKeyID: apiKeyID, + AccountID: accountID, + Model: model, + InputTokens: inputTokens, + OutputTokens: outputTokens, + CacheCreationTokens: cacheCreationTokens, + CacheReadTokens: cacheReadTokens, + CacheCreation5mTokens: cacheCreation5m, + CacheCreation1hTokens: cacheCreation1h, + InputCost: inputCost, + OutputCost: outputCost, + CacheCreationCost: cacheCreationCost, + CacheReadCost: cacheReadCost, + TotalCost: totalCost, + ActualCost: actualCost, + RateMultiplier: rateMultiplier, + AccountRateMultiplier: nullFloat64Ptr(accountRateMultiplier), + BillingType: int8(billingType), + Stream: stream, + ImageCount: imageCount, + CreatedAt: createdAt, + } + + if requestID.Valid { + log.RequestID = requestID.String + } + if groupID.Valid { + value := groupID.Int64 + log.GroupID = &value + } + if subscriptionID.Valid { + value := subscriptionID.Int64 + log.SubscriptionID = &value + } + if durationMs.Valid { + value := int(durationMs.Int64) + log.DurationMs = &value + } + if firstTokenMs.Valid { + value := int(firstTokenMs.Int64) + log.FirstTokenMs = &value + } + if userAgent.Valid { + log.UserAgent = &userAgent.String + } + if ipAddress.Valid { + log.IPAddress = &ipAddress.String + } + if imageSize.Valid { + log.ImageSize = &imageSize.String + } + + return log, nil +} + +func scanTrendRows(rows *sql.Rows) ([]TrendDataPoint, error) { + results := make([]TrendDataPoint, 0) + for rows.Next() { + var row TrendDataPoint + if err := rows.Scan( + &row.Date, + &row.Requests, + &row.InputTokens, + &row.OutputTokens, + &row.CacheTokens, + &row.TotalTokens, + &row.Cost, + &row.ActualCost, + ); err != nil { + return nil, err + } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return results, nil +} + +func scanModelStatsRows(rows *sql.Rows) ([]ModelStat, error) { + results := make([]ModelStat, 0) + for rows.Next() { + var row ModelStat + if err := rows.Scan( + &row.Model, + &row.Requests, + &row.InputTokens, + &row.OutputTokens, + &row.TotalTokens, + &row.Cost, + &row.ActualCost, + ); err != nil { + return nil, err + } + results = append(results, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return results, nil +} + +func buildWhere(conditions []string) string { + if len(conditions) == 0 { + return "" + } + return "WHERE " + strings.Join(conditions, " AND ") +} + +func nullInt64(v *int64) sql.NullInt64 { + if v == nil { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *v, Valid: true} +} + +func nullInt(v *int) sql.NullInt64 { + if v == nil { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(*v), Valid: true} +} + +func nullFloat64Ptr(v sql.NullFloat64) *float64 { + if !v.Valid { + return nil + } + out := v.Float64 + return &out +} + +func nullString(v *string) sql.NullString { + if v == nil || *v == "" { + return sql.NullString{} + } + return sql.NullString{String: *v, Valid: true} +} + +func setToSlice(set map[int64]struct{}) []int64 { + out := make([]int64, 0, len(set)) + for id := range set { + out = append(out, id) + } + return out +} diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go new file mode 100644 index 00000000..7174be18 --- /dev/null +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -0,0 +1,1215 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type UsageLogRepoSuite struct { + suite.Suite + ctx context.Context + tx *dbent.Tx + client *dbent.Client + repo *usageLogRepository +} + +func (s *UsageLogRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.tx = tx + s.client = tx.Client() + s.repo = newUsageLogRepositoryWithSQL(s.client, tx) +} + +func TestUsageLogRepoSuite(t *testing.T) { + suite.Run(t, new(UsageLogRepoSuite)) +} + +// truncateToDayUTC 截断到 UTC 日期边界(测试辅助函数) +func truncateToDayUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) +} + +func (s *UsageLogRepoSuite) createUsageLog(user *service.User, apiKey *service.APIKey, account *service.Account, inputTokens, outputTokens int, cost float64, createdAt time.Time) *service.UsageLog { + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), // Generate unique RequestID for each log + Model: "claude-3", + InputTokens: inputTokens, + OutputTokens: outputTokens, + TotalCost: cost, + ActualCost: cost, + CreatedAt: createdAt, + } + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err) + return log +} + +// --- Create / GetByID --- + +func (s *UsageLogRepoSuite) TestCreate() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "create@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-create", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-create"}) + + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 0.5, + ActualCost: 0.4, + } + + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err, "Create") + s.Require().NotZero(log.ID) +} + +func (s *UsageLogRepoSuite) TestGetByID() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid"}) + + log := s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + got, err := s.repo.GetByID(s.ctx, log.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal(log.ID, got.ID) + s.Require().Equal(10, got.InputTokens) +} + +func (s *UsageLogRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *UsageLogRepoSuite) TestGetByID_ReturnsAccountRateMultiplier() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid-mult@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid-mult", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid-mult"}) + + m := 0.5 + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 2.0, + AccountRateMultiplier: &m, + CreatedAt: timezone.Today().Add(2 * time.Hour), + } + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err) + + got, err := s.repo.GetByID(s.ctx, log.ID) + s.Require().NoError(err) + s.Require().NotNil(got.AccountRateMultiplier) + s.Require().InEpsilon(0.5, *got.AccountRateMultiplier, 0.0001) +} + +// --- Delete --- + +func (s *UsageLogRepoSuite) TestDelete() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "delete@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-delete", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-delete"}) + + log := s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + err := s.repo.Delete(s.ctx, log.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, log.ID) + s.Require().Error(err, "expected error after delete") +} + +// --- ListByUser --- + +func (s *UsageLogRepoSuite) TestListByUser() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyuser@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-listbyuser", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyuser"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, time.Now()) + + logs, page, err := s.repo.ListByUser(s.ctx, user.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByUser") + s.Require().Len(logs, 2) + s.Require().Equal(int64(2), page.Total) +} + +// --- ListByAPIKey --- + +func (s *UsageLogRepoSuite) TestListByAPIKey() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyapikey@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-listbyapikey", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyapikey"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, time.Now()) + + logs, page, err := s.repo.ListByAPIKey(s.ctx, apiKey.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByAPIKey") + s.Require().Len(logs, 2) + s.Require().Equal(int64(2), page.Total) +} + +// --- ListByAccount --- + +func (s *UsageLogRepoSuite) TestListByAccount() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "listbyaccount@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-listbyaccount", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-listbyaccount"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + logs, page, err := s.repo.ListByAccount(s.ctx, account.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByAccount") + s.Require().Len(logs, 1) + s.Require().Equal(int64(1), page.Total) +} + +// --- GetUserStats --- + +func (s *UsageLogRepoSuite) TestGetUserStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "userstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-userstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-userstats"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + stats, err := s.repo.GetUserStats(s.ctx, user.ID, startTime, endTime) + s.Require().NoError(err, "GetUserStats") + s.Require().Equal(int64(2), stats.TotalRequests) + s.Require().Equal(int64(25), stats.InputTokens) + s.Require().Equal(int64(45), stats.OutputTokens) +} + +// --- ListWithFilters --- + +func (s *UsageLogRepoSuite) TestListWithFilters() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-filters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filters"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + filters := usagestats.UsageLogFilters{UserID: user.ID} + logs, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, filters) + s.Require().NoError(err, "ListWithFilters") + s.Require().Len(logs, 1) + s.Require().Equal(int64(1), page.Total) +} + +// --- GetDashboardStats --- + +func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { + now := time.Now().UTC() + todayStart := truncateToDayUTC(now) + baseStats, err := s.repo.GetDashboardStats(s.ctx) + s.Require().NoError(err, "GetDashboardStats base") + + userToday := mustCreateUser(s.T(), s.client, &service.User{ + Email: "today@example.com", + CreatedAt: testMaxTime(todayStart.Add(10*time.Second), now.Add(-10*time.Second)), + UpdatedAt: now, + }) + userOld := mustCreateUser(s.T(), s.client, &service.User{ + Email: "old@example.com", + CreatedAt: todayStart.Add(-24 * time.Hour), + UpdatedAt: todayStart.Add(-24 * time.Hour), + }) + + group := mustCreateGroup(s.T(), s.client, &service.Group{Name: "g-ul"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: userToday.ID, Key: "sk-ul-1", Name: "ul1"}) + mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: userOld.ID, Key: "sk-ul-2", Name: "ul2", Status: service.StatusDisabled}) + + resetAt := now.Add(10 * time.Minute) + accNormal := mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-normal", Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-error", Status: service.StatusError, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-rl", RateLimitedAt: &now, RateLimitResetAt: &resetAt, Schedulable: true}) + mustCreateAccount(s.T(), s.client, &service.Account{Name: "a-ov", OverloadUntil: &resetAt, Schedulable: true}) + + d1, d2, d3 := 100, 200, 300 + logToday := &service.UsageLog{ + UserID: userToday.ID, + APIKeyID: apiKey1.ID, + AccountID: accNormal.ID, + Model: "claude-3", + GroupID: &group.ID, + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 3, + CacheReadTokens: 4, + TotalCost: 1.5, + ActualCost: 1.2, + DurationMs: &d1, + CreatedAt: testMaxTime(todayStart.Add(2*time.Minute), now.Add(-2*time.Minute)), + } + _, err = s.repo.Create(s.ctx, logToday) + s.Require().NoError(err, "Create logToday") + + logOld := &service.UsageLog{ + UserID: userOld.ID, + APIKeyID: apiKey1.ID, + AccountID: accNormal.ID, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 6, + TotalCost: 0.7, + ActualCost: 0.7, + DurationMs: &d2, + CreatedAt: todayStart.Add(-1 * time.Hour), + } + _, err = s.repo.Create(s.ctx, logOld) + s.Require().NoError(err, "Create logOld") + + logPerf := &service.UsageLog{ + UserID: userToday.ID, + APIKeyID: apiKey1.ID, + AccountID: accNormal.ID, + Model: "claude-3", + InputTokens: 1, + OutputTokens: 2, + TotalCost: 0.1, + ActualCost: 0.1, + DurationMs: &d3, + CreatedAt: now.Add(-30 * time.Second), + } + _, err = s.repo.Create(s.ctx, logPerf) + s.Require().NoError(err, "Create logPerf") + + aggRepo := newDashboardAggregationRepositoryWithSQL(s.tx) + aggStart := todayStart.Add(-2 * time.Hour) + aggEnd := now.Add(2 * time.Minute) + s.Require().NoError(aggRepo.AggregateRange(s.ctx, aggStart, aggEnd), "AggregateRange") + + stats, err := s.repo.GetDashboardStats(s.ctx) + s.Require().NoError(err, "GetDashboardStats") + + s.Require().Equal(baseStats.TotalUsers+2, stats.TotalUsers, "TotalUsers mismatch") + s.Require().Equal(baseStats.TodayNewUsers+1, stats.TodayNewUsers, "TodayNewUsers mismatch") + s.Require().Equal(baseStats.ActiveUsers+1, stats.ActiveUsers, "ActiveUsers mismatch") + s.Require().Equal(baseStats.TotalAPIKeys+2, stats.TotalAPIKeys, "TotalAPIKeys mismatch") + s.Require().Equal(baseStats.ActiveAPIKeys+1, stats.ActiveAPIKeys, "ActiveAPIKeys mismatch") + s.Require().Equal(baseStats.TotalAccounts+4, stats.TotalAccounts, "TotalAccounts mismatch") + s.Require().Equal(baseStats.ErrorAccounts+1, stats.ErrorAccounts, "ErrorAccounts mismatch") + s.Require().Equal(baseStats.RateLimitAccounts+1, stats.RateLimitAccounts, "RateLimitAccounts mismatch") + s.Require().Equal(baseStats.OverloadAccounts+1, stats.OverloadAccounts, "OverloadAccounts mismatch") + + s.Require().Equal(baseStats.TotalRequests+3, stats.TotalRequests, "TotalRequests mismatch") + s.Require().Equal(baseStats.TotalInputTokens+int64(16), stats.TotalInputTokens, "TotalInputTokens mismatch") + s.Require().Equal(baseStats.TotalOutputTokens+int64(28), stats.TotalOutputTokens, "TotalOutputTokens mismatch") + s.Require().Equal(baseStats.TotalCacheCreationTokens+int64(3), stats.TotalCacheCreationTokens, "TotalCacheCreationTokens mismatch") + s.Require().Equal(baseStats.TotalCacheReadTokens+int64(4), stats.TotalCacheReadTokens, "TotalCacheReadTokens mismatch") + s.Require().Equal(baseStats.TotalTokens+int64(51), stats.TotalTokens, "TotalTokens mismatch") + s.Require().Equal(baseStats.TotalCost+2.3, stats.TotalCost, "TotalCost mismatch") + s.Require().Equal(baseStats.TotalActualCost+2.0, stats.TotalActualCost, "TotalActualCost mismatch") + s.Require().GreaterOrEqual(stats.TodayRequests, int64(1), "expected TodayRequests >= 1") + s.Require().GreaterOrEqual(stats.TodayCost, 0.0, "expected TodayCost >= 0") + + wantRpm, wantTpm, err := s.repo.getPerformanceStats(s.ctx, 0) + s.Require().NoError(err, "getPerformanceStats") + s.Require().Equal(wantRpm, stats.Rpm, "Rpm mismatch") + s.Require().Equal(wantTpm, stats.Tpm, "Tpm mismatch") +} + +func (s *UsageLogRepoSuite) TestDashboardStatsWithRange_Fallback() { + now := time.Now().UTC() + todayStart := truncateToDayUTC(now) + rangeStart := todayStart.Add(-24 * time.Hour) + rangeEnd := now.Add(1 * time.Second) + + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "range-u1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "range-u2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-range-1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-range-2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-range"}) + + d1, d2, d3 := 100, 200, 300 + logOutside := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 7, + OutputTokens: 8, + TotalCost: 0.8, + ActualCost: 0.7, + DurationMs: &d3, + CreatedAt: rangeStart.Add(-1 * time.Hour), + } + _, err := s.repo.Create(s.ctx, logOutside) + s.Require().NoError(err) + + logRange := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 1, + CacheReadTokens: 2, + TotalCost: 1.0, + ActualCost: 0.9, + DurationMs: &d1, + CreatedAt: rangeStart.Add(2 * time.Hour), + } + _, err = s.repo.Create(s.ctx, logRange) + s.Require().NoError(err) + + logToday := &service.UsageLog{ + UserID: user2.ID, + APIKeyID: apiKey2.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 6, + CacheReadTokens: 1, + TotalCost: 0.5, + ActualCost: 0.5, + DurationMs: &d2, + CreatedAt: now, + } + _, err = s.repo.Create(s.ctx, logToday) + s.Require().NoError(err) + + stats, err := s.repo.GetDashboardStatsWithRange(s.ctx, rangeStart, rangeEnd) + s.Require().NoError(err) + s.Require().Equal(int64(2), stats.TotalRequests) + s.Require().Equal(int64(15), stats.TotalInputTokens) + s.Require().Equal(int64(26), stats.TotalOutputTokens) + s.Require().Equal(int64(1), stats.TotalCacheCreationTokens) + s.Require().Equal(int64(3), stats.TotalCacheReadTokens) + s.Require().Equal(int64(45), stats.TotalTokens) + s.Require().Equal(1.5, stats.TotalCost) + s.Require().Equal(1.4, stats.TotalActualCost) + s.Require().InEpsilon(150.0, stats.AverageDurationMs, 0.0001) +} + +// --- GetUserDashboardStats --- + +func (s *UsageLogRepoSuite) TestGetUserDashboardStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "userdash@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-userdash", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-userdash"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + stats, err := s.repo.GetUserDashboardStats(s.ctx, user.ID) + s.Require().NoError(err, "GetUserDashboardStats") + s.Require().Equal(int64(1), stats.TotalAPIKeys) + s.Require().Equal(int64(1), stats.TotalRequests) +} + +// --- GetAccountTodayStats --- + +func (s *UsageLogRepoSuite) TestGetAccountTodayStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "acctoday@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-acctoday", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-today"}) + + createdAt := timezone.Today().Add(1 * time.Hour) + + m1 := 1.5 + m2 := 0.0 + _, err := s.repo.Create(s.ctx, &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 2.0, + AccountRateMultiplier: &m1, + CreatedAt: createdAt, + }) + s.Require().NoError(err) + _, err = s.repo.Create(s.ctx, &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 5, + OutputTokens: 5, + TotalCost: 0.5, + ActualCost: 1.0, + AccountRateMultiplier: &m2, + CreatedAt: createdAt, + }) + s.Require().NoError(err) + + stats, err := s.repo.GetAccountTodayStats(s.ctx, account.ID) + s.Require().NoError(err, "GetAccountTodayStats") + s.Require().Equal(int64(2), stats.Requests) + s.Require().Equal(int64(40), stats.Tokens) + // account cost = SUM(total_cost * account_rate_multiplier) + s.Require().InEpsilon(1.5, stats.Cost, 0.0001) + // standard cost = SUM(total_cost) + s.Require().InEpsilon(1.5, stats.StandardCost, 0.0001) + // user cost = SUM(actual_cost) + s.Require().InEpsilon(3.0, stats.UserCost, 0.0001) +} + +func (s *UsageLogRepoSuite) TestDashboardAggregationConsistency() { + now := time.Now().UTC().Truncate(time.Second) + // 使用固定的时间偏移确保 hour1 和 hour2 在同一天且都在过去 + // 选择当天 02:00 和 03:00 作为测试时间点(基于 now 的日期) + dayStart := truncateToDayUTC(now) + hour1 := dayStart.Add(2 * time.Hour) // 当天 02:00 + hour2 := dayStart.Add(3 * time.Hour) // 当天 03:00 + // 如果当前时间早于 hour2,则使用昨天的时间 + if now.Before(hour2.Add(time.Hour)) { + dayStart = dayStart.Add(-24 * time.Hour) + hour1 = dayStart.Add(2 * time.Hour) + hour2 = dayStart.Add(3 * time.Hour) + } + + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "agg-u1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "agg-u2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-agg-1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-agg-2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-agg"}) + + d1, d2, d3 := 100, 200, 150 + log1 := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 2, + CacheReadTokens: 1, + TotalCost: 1.0, + ActualCost: 0.9, + DurationMs: &d1, + CreatedAt: hour1.Add(5 * time.Minute), + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 5, + TotalCost: 0.5, + ActualCost: 0.5, + DurationMs: &d2, + CreatedAt: hour1.Add(20 * time.Minute), + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + log3 := &service.UsageLog{ + UserID: user2.ID, + APIKeyID: apiKey2.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 7, + OutputTokens: 8, + TotalCost: 0.7, + ActualCost: 0.7, + DurationMs: &d3, + CreatedAt: hour2.Add(10 * time.Minute), + } + _, err = s.repo.Create(s.ctx, log3) + s.Require().NoError(err) + + aggRepo := newDashboardAggregationRepositoryWithSQL(s.tx) + aggStart := hour1.Add(-5 * time.Minute) + aggEnd := hour2.Add(time.Hour) // 确保覆盖 hour2 的所有数据 + s.Require().NoError(aggRepo.AggregateRange(s.ctx, aggStart, aggEnd)) + + type hourlyRow struct { + totalRequests int64 + inputTokens int64 + outputTokens int64 + cacheCreationTokens int64 + cacheReadTokens int64 + totalCost float64 + actualCost float64 + totalDurationMs int64 + activeUsers int64 + } + fetchHourly := func(bucketStart time.Time) hourlyRow { + var row hourlyRow + err := scanSingleRow(s.ctx, s.tx, ` + SELECT total_requests, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, + total_cost, actual_cost, total_duration_ms, active_users + FROM usage_dashboard_hourly + WHERE bucket_start = $1 + `, []any{bucketStart}, &row.totalRequests, &row.inputTokens, &row.outputTokens, + &row.cacheCreationTokens, &row.cacheReadTokens, &row.totalCost, &row.actualCost, + &row.totalDurationMs, &row.activeUsers, + ) + s.Require().NoError(err) + return row + } + + hour1Row := fetchHourly(hour1) + s.Require().Equal(int64(2), hour1Row.totalRequests) + s.Require().Equal(int64(15), hour1Row.inputTokens) + s.Require().Equal(int64(25), hour1Row.outputTokens) + s.Require().Equal(int64(2), hour1Row.cacheCreationTokens) + s.Require().Equal(int64(1), hour1Row.cacheReadTokens) + s.Require().Equal(1.5, hour1Row.totalCost) + s.Require().Equal(1.4, hour1Row.actualCost) + s.Require().Equal(int64(300), hour1Row.totalDurationMs) + s.Require().Equal(int64(1), hour1Row.activeUsers) + + hour2Row := fetchHourly(hour2) + s.Require().Equal(int64(1), hour2Row.totalRequests) + s.Require().Equal(int64(7), hour2Row.inputTokens) + s.Require().Equal(int64(8), hour2Row.outputTokens) + s.Require().Equal(int64(0), hour2Row.cacheCreationTokens) + s.Require().Equal(int64(0), hour2Row.cacheReadTokens) + s.Require().Equal(0.7, hour2Row.totalCost) + s.Require().Equal(0.7, hour2Row.actualCost) + s.Require().Equal(int64(150), hour2Row.totalDurationMs) + s.Require().Equal(int64(1), hour2Row.activeUsers) + + var daily struct { + totalRequests int64 + inputTokens int64 + outputTokens int64 + cacheCreationTokens int64 + cacheReadTokens int64 + totalCost float64 + actualCost float64 + totalDurationMs int64 + activeUsers int64 + } + err = scanSingleRow(s.ctx, s.tx, ` + SELECT total_requests, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, + total_cost, actual_cost, total_duration_ms, active_users + FROM usage_dashboard_daily + WHERE bucket_date = $1::date + `, []any{dayStart}, &daily.totalRequests, &daily.inputTokens, &daily.outputTokens, + &daily.cacheCreationTokens, &daily.cacheReadTokens, &daily.totalCost, &daily.actualCost, + &daily.totalDurationMs, &daily.activeUsers, + ) + s.Require().NoError(err) + s.Require().Equal(int64(3), daily.totalRequests) + s.Require().Equal(int64(22), daily.inputTokens) + s.Require().Equal(int64(33), daily.outputTokens) + s.Require().Equal(int64(2), daily.cacheCreationTokens) + s.Require().Equal(int64(1), daily.cacheReadTokens) + s.Require().Equal(2.2, daily.totalCost) + s.Require().Equal(2.1, daily.actualCost) + s.Require().Equal(int64(450), daily.totalDurationMs) + s.Require().Equal(int64(2), daily.activeUsers) +} + +// --- GetBatchUserUsageStats --- + +func (s *UsageLogRepoSuite) TestGetBatchUserUsageStats() { + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "batch1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "batch2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-batch1", Name: "k"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-batch2", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-batch"}) + + s.createUsageLog(user1, apiKey1, account, 10, 20, 0.5, time.Now()) + s.createUsageLog(user2, apiKey2, account, 15, 25, 0.6, time.Now()) + + stats, err := s.repo.GetBatchUserUsageStats(s.ctx, []int64{user1.ID, user2.ID}) + s.Require().NoError(err, "GetBatchUserUsageStats") + s.Require().Len(stats, 2) + s.Require().NotNil(stats[user1.ID]) + s.Require().NotNil(stats[user2.ID]) +} + +func (s *UsageLogRepoSuite) TestGetBatchUserUsageStats_Empty() { + stats, err := s.repo.GetBatchUserUsageStats(s.ctx, []int64{}) + s.Require().NoError(err) + s.Require().Empty(stats) +} + +// --- GetBatchAPIKeyUsageStats --- + +func (s *UsageLogRepoSuite) TestGetBatchApiKeyUsageStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "batchkey@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-batchkey1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-batchkey2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-batchkey"}) + + s.createUsageLog(user, apiKey1, account, 10, 20, 0.5, time.Now()) + s.createUsageLog(user, apiKey2, account, 15, 25, 0.6, time.Now()) + + stats, err := s.repo.GetBatchAPIKeyUsageStats(s.ctx, []int64{apiKey1.ID, apiKey2.ID}) + s.Require().NoError(err, "GetBatchAPIKeyUsageStats") + s.Require().Len(stats, 2) +} + +func (s *UsageLogRepoSuite) TestGetBatchApiKeyUsageStats_Empty() { + stats, err := s.repo.GetBatchAPIKeyUsageStats(s.ctx, []int64{}) + s.Require().NoError(err) + s.Require().Empty(stats) +} + +// --- GetGlobalStats --- + +func (s *UsageLogRepoSuite) TestGetGlobalStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "global@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-global", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-global"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + + stats, err := s.repo.GetGlobalStats(s.ctx, base.Add(-1*time.Hour), base.Add(2*time.Hour)) + s.Require().NoError(err, "GetGlobalStats") + s.Require().Equal(int64(2), stats.TotalRequests) + s.Require().Equal(int64(25), stats.TotalInputTokens) + s.Require().Equal(int64(45), stats.TotalOutputTokens) +} + +func testMaxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} + +// --- ListByUserAndTimeRange --- + +func (s *UsageLogRepoSuite) TestListByUserAndTimeRange() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "timerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-timerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-timerange"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(-24*time.Hour)) // outside range + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + logs, _, err := s.repo.ListByUserAndTimeRange(s.ctx, user.ID, startTime, endTime) + s.Require().NoError(err, "ListByUserAndTimeRange") + s.Require().Len(logs, 2) +} + +// --- ListByAPIKeyAndTimeRange --- + +func (s *UsageLogRepoSuite) TestListByAPIKeyAndTimeRange() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-keytimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytimerange"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(30*time.Minute)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(-24*time.Hour)) // outside range + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + logs, _, err := s.repo.ListByAPIKeyAndTimeRange(s.ctx, apiKey.ID, startTime, endTime) + s.Require().NoError(err, "ListByAPIKeyAndTimeRange") + s.Require().Len(logs, 2) +} + +// --- ListByAccountAndTimeRange --- + +func (s *UsageLogRepoSuite) TestListByAccountAndTimeRange() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "acctimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-acctimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-acctimerange"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(45*time.Minute)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(-24*time.Hour)) // outside range + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + logs, _, err := s.repo.ListByAccountAndTimeRange(s.ctx, account.ID, startTime, endTime) + s.Require().NoError(err, "ListByAccountAndTimeRange") + s.Require().Len(logs, 2) +} + +// --- ListByModelAndTimeRange --- + +func (s *UsageLogRepoSuite) TestListByModelAndTimeRange() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modeltimerange@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-modeltimerange", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modeltimerange"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + + // Create logs with different models + log1 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-opus", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 0.5, + ActualCost: 0.5, + CreatedAt: base, + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-opus", + InputTokens: 15, + OutputTokens: 25, + TotalCost: 0.6, + ActualCost: 0.6, + CreatedAt: base.Add(30 * time.Minute), + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + log3 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-sonnet", + InputTokens: 20, + OutputTokens: 30, + TotalCost: 0.7, + ActualCost: 0.7, + CreatedAt: base.Add(1 * time.Hour), + } + _, err = s.repo.Create(s.ctx, log3) + s.Require().NoError(err) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + logs, _, err := s.repo.ListByModelAndTimeRange(s.ctx, "claude-3-opus", startTime, endTime) + s.Require().NoError(err, "ListByModelAndTimeRange") + s.Require().Len(logs, 2) +} + +// --- GetAccountWindowStats --- + +func (s *UsageLogRepoSuite) TestGetAccountWindowStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "windowstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-windowstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-windowstats"}) + + now := time.Now() + windowStart := now.Add(-10 * time.Minute) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, now.Add(-5*time.Minute)) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, now.Add(-3*time.Minute)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, now.Add(-30*time.Minute)) // outside window + + stats, err := s.repo.GetAccountWindowStats(s.ctx, account.ID, windowStart) + s.Require().NoError(err, "GetAccountWindowStats") + s.Require().Equal(int64(2), stats.Requests) + s.Require().Equal(int64(70), stats.Tokens) // (10+20) + (15+25) +} + +// --- GetUserUsageTrendByUserID --- + +func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-usertrend", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrend"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(24*time.Hour)) // next day + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(48 * time.Hour) + trend, err := s.repo.GetUserUsageTrendByUserID(s.ctx, user.ID, startTime, endTime, "day") + s.Require().NoError(err, "GetUserUsageTrendByUserID") + s.Require().Len(trend, 2) // 2 different days +} + +func (s *UsageLogRepoSuite) TestGetUserUsageTrendByUserID_HourlyGranularity() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrendhourly@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-usertrendhourly", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrendhourly"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(2*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(3 * time.Hour) + trend, err := s.repo.GetUserUsageTrendByUserID(s.ctx, user.ID, startTime, endTime, "hour") + s.Require().NoError(err, "GetUserUsageTrendByUserID hourly") + s.Require().Len(trend, 3) // 3 different hours +} + +// --- GetUserModelStats --- + +func (s *UsageLogRepoSuite) TestGetUserModelStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modelstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-modelstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modelstats"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + + // Create logs with different models + log1 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-opus", + InputTokens: 100, + OutputTokens: 200, + TotalCost: 0.5, + ActualCost: 0.5, + CreatedAt: base, + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-sonnet", + InputTokens: 50, + OutputTokens: 100, + TotalCost: 0.2, + ActualCost: 0.2, + CreatedAt: base.Add(1 * time.Hour), + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + stats, err := s.repo.GetUserModelStats(s.ctx, user.ID, startTime, endTime) + s.Require().NoError(err, "GetUserModelStats") + s.Require().Len(stats, 2) + + // Should be ordered by total_tokens DESC + s.Require().Equal("claude-3-opus", stats[0].Model) + s.Require().Equal(int64(300), stats[0].TotalTokens) +} + +// --- GetUsageTrendWithFilters --- + +func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "trendfilters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-trendfilters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-trendfilters"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(24*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(48 * time.Hour) + + // Test with user filter + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil) + s.Require().NoError(err, "GetUsageTrendWithFilters user filter") + s.Require().Len(trend, 2) + + // Test with apiKey filter + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil) + s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter") + s.Require().Len(trend, 2) + + // Test with both filters + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil) + s.Require().NoError(err, "GetUsageTrendWithFilters both filters") + s.Require().Len(trend, 2) +} + +func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "trendfilters-h@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-trendfilters-h", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-trendfilters-h"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(3 * time.Hour) + + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil) + s.Require().NoError(err, "GetUsageTrendWithFilters hourly") + s.Require().Len(trend, 2) +} + +// --- GetModelStatsWithFilters --- + +func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "modelfilters@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-modelfilters", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-modelfilters"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + + log1 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-opus", + InputTokens: 100, + OutputTokens: 200, + TotalCost: 0.5, + ActualCost: 0.5, + CreatedAt: base, + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-sonnet", + InputTokens: 50, + OutputTokens: 100, + TotalCost: 0.2, + ActualCost: 0.2, + CreatedAt: base.Add(1 * time.Hour), + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + + // Test with user filter + stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil) + s.Require().NoError(err, "GetModelStatsWithFilters user filter") + s.Require().Len(stats, 2) + + // Test with apiKey filter + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil) + s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter") + s.Require().Len(stats, 2) + + // Test with account filter + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil) + s.Require().NoError(err, "GetModelStatsWithFilters account filter") + s.Require().Len(stats, 2) +} + +// --- GetAccountUsageStats --- + +func (s *UsageLogRepoSuite) TestGetAccountUsageStats() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "accstats@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-accstats", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-accstats"}) + + base := time.Date(2025, 1, 15, 0, 0, 0, 0, time.UTC) + + // Create logs on different days + log1 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-opus", + InputTokens: 100, + OutputTokens: 200, + TotalCost: 0.5, + ActualCost: 0.4, + CreatedAt: base.Add(12 * time.Hour), + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + Model: "claude-3-sonnet", + InputTokens: 50, + OutputTokens: 100, + TotalCost: 0.2, + ActualCost: 0.15, + CreatedAt: base.Add(36 * time.Hour), // next day + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + startTime := base + endTime := base.Add(72 * time.Hour) + + resp, err := s.repo.GetAccountUsageStats(s.ctx, account.ID, startTime, endTime) + s.Require().NoError(err, "GetAccountUsageStats") + + s.Require().Len(resp.History, 2, "expected 2 days of history") + s.Require().Equal(int64(2), resp.Summary.TotalRequests) + s.Require().Equal(int64(450), resp.Summary.TotalTokens) + s.Require().Len(resp.Models, 2) +} + +func (s *UsageLogRepoSuite) TestGetAccountUsageStats_EmptyRange() { + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-emptystats"}) + + base := time.Date(2025, 1, 15, 0, 0, 0, 0, time.UTC) + startTime := base + endTime := base.Add(72 * time.Hour) + + resp, err := s.repo.GetAccountUsageStats(s.ctx, account.ID, startTime, endTime) + s.Require().NoError(err, "GetAccountUsageStats empty") + + s.Require().Len(resp.History, 0) + s.Require().Equal(int64(0), resp.Summary.TotalRequests) +} + +// --- GetUserUsageTrend --- + +func (s *UsageLogRepoSuite) TestGetUserUsageTrend() { + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "usertrend2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-usertrend1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-usertrend2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-usertrends"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user1, apiKey1, account, 100, 200, 1.0, base) + s.createUsageLog(user2, apiKey2, account, 50, 100, 0.5, base) + s.createUsageLog(user1, apiKey1, account, 100, 200, 1.0, base.Add(24*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(48 * time.Hour) + + trend, err := s.repo.GetUserUsageTrend(s.ctx, startTime, endTime, "day", 10) + s.Require().NoError(err, "GetUserUsageTrend") + s.Require().GreaterOrEqual(len(trend), 2) +} + +// --- GetAPIKeyUsageTrend --- + +func (s *UsageLogRepoSuite) TestGetAPIKeyUsageTrend() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytrend@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-keytrend1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-keytrend2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytrends"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey1, account, 100, 200, 1.0, base) + s.createUsageLog(user, apiKey2, account, 50, 100, 0.5, base) + s.createUsageLog(user, apiKey1, account, 100, 200, 1.0, base.Add(24*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(48 * time.Hour) + + trend, err := s.repo.GetAPIKeyUsageTrend(s.ctx, startTime, endTime, "day", 10) + s.Require().NoError(err, "GetAPIKeyUsageTrend") + s.Require().GreaterOrEqual(len(trend), 2) +} + +func (s *UsageLogRepoSuite) TestGetAPIKeyUsageTrend_HourlyGranularity() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "keytrendh@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-keytrendh", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-keytrendh"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 100, 200, 1.0, base) + s.createUsageLog(user, apiKey, account, 50, 100, 0.5, base.Add(1*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(3 * time.Hour) + + trend, err := s.repo.GetAPIKeyUsageTrend(s.ctx, startTime, endTime, "hour", 10) + s.Require().NoError(err, "GetAPIKeyUsageTrend hourly") + s.Require().Len(trend, 2) +} + +// --- ListWithFilters (additional filter tests) --- + +func (s *UsageLogRepoSuite) TestListWithFilters_ApiKeyFilter() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterskey@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-filterskey", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterskey"}) + + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + + filters := usagestats.UsageLogFilters{APIKeyID: apiKey.ID} + logs, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, filters) + s.Require().NoError(err, "ListWithFilters apiKey") + s.Require().Len(logs, 1) + s.Require().Equal(int64(1), page.Total) +} + +func (s *UsageLogRepoSuite) TestListWithFilters_TimeRange() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterstime@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-filterstime", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterstime"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + s.createUsageLog(user, apiKey, account, 20, 30, 0.7, base.Add(-24*time.Hour)) // outside range + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + filters := usagestats.UsageLogFilters{StartTime: &startTime, EndTime: &endTime} + logs, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, filters) + s.Require().NoError(err, "ListWithFilters time range") + s.Require().Len(logs, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *UsageLogRepoSuite) TestListWithFilters_CombinedFilters() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "filterscombined@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-filterscombined", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-filterscombined"}) + + base := time.Date(2025, 1, 15, 12, 0, 0, 0, time.UTC) + s.createUsageLog(user, apiKey, account, 10, 20, 0.5, base) + s.createUsageLog(user, apiKey, account, 15, 25, 0.6, base.Add(1*time.Hour)) + + startTime := base.Add(-1 * time.Hour) + endTime := base.Add(2 * time.Hour) + filters := usagestats.UsageLogFilters{ + UserID: user.ID, + APIKeyID: apiKey.ID, + StartTime: &startTime, + EndTime: &endTime, + } + logs, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, filters) + s.Require().NoError(err, "ListWithFilters combined") + s.Require().Len(logs, 2) + s.Require().Equal(int64(2), page.Total) +} diff --git a/backend/internal/repository/user_attribute_repo.go b/backend/internal/repository/user_attribute_repo.go new file mode 100644 index 00000000..0b616caf --- /dev/null +++ b/backend/internal/repository/user_attribute_repo.go @@ -0,0 +1,385 @@ +package repository + +import ( + "context" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/userattributedefinition" + "github.com/Wei-Shaw/sub2api/ent/userattributevalue" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +// UserAttributeDefinitionRepository implementation +type userAttributeDefinitionRepository struct { + client *dbent.Client +} + +// NewUserAttributeDefinitionRepository creates a new repository instance +func NewUserAttributeDefinitionRepository(client *dbent.Client) service.UserAttributeDefinitionRepository { + return &userAttributeDefinitionRepository{client: client} +} + +func (r *userAttributeDefinitionRepository) Create(ctx context.Context, def *service.UserAttributeDefinition) error { + client := clientFromContext(ctx, r.client) + + created, err := client.UserAttributeDefinition.Create(). + SetKey(def.Key). + SetName(def.Name). + SetDescription(def.Description). + SetType(string(def.Type)). + SetOptions(toEntOptions(def.Options)). + SetRequired(def.Required). + SetValidation(toEntValidation(def.Validation)). + SetPlaceholder(def.Placeholder). + SetEnabled(def.Enabled). + Save(ctx) + + if err != nil { + return translatePersistenceError(err, nil, service.ErrAttributeKeyExists) + } + + def.ID = created.ID + def.DisplayOrder = created.DisplayOrder + def.CreatedAt = created.CreatedAt + def.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *userAttributeDefinitionRepository) GetByID(ctx context.Context, id int64) (*service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + e, err := client.UserAttributeDefinition.Query(). + Where(userattributedefinition.IDEQ(id)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + return defEntityToService(e), nil +} + +func (r *userAttributeDefinitionRepository) GetByKey(ctx context.Context, key string) (*service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + e, err := client.UserAttributeDefinition.Query(). + Where(userattributedefinition.KeyEQ(key)). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + return defEntityToService(e), nil +} + +func (r *userAttributeDefinitionRepository) Update(ctx context.Context, def *service.UserAttributeDefinition) error { + client := clientFromContext(ctx, r.client) + + updated, err := client.UserAttributeDefinition.UpdateOneID(def.ID). + SetName(def.Name). + SetDescription(def.Description). + SetType(string(def.Type)). + SetOptions(toEntOptions(def.Options)). + SetRequired(def.Required). + SetValidation(toEntValidation(def.Validation)). + SetPlaceholder(def.Placeholder). + SetDisplayOrder(def.DisplayOrder). + SetEnabled(def.Enabled). + Save(ctx) + + if err != nil { + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, service.ErrAttributeKeyExists) + } + + def.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *userAttributeDefinitionRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeDefinition.Delete(). + Where(userattributedefinition.IDEQ(id)). + Exec(ctx) + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) +} + +func (r *userAttributeDefinitionRepository) List(ctx context.Context, enabledOnly bool) ([]service.UserAttributeDefinition, error) { + client := clientFromContext(ctx, r.client) + + q := client.UserAttributeDefinition.Query() + if enabledOnly { + q = q.Where(userattributedefinition.EnabledEQ(true)) + } + + entities, err := q.Order(dbent.Asc(userattributedefinition.FieldDisplayOrder)).All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeDefinition, 0, len(entities)) + for _, e := range entities { + result = append(result, *defEntityToService(e)) + } + return result, nil +} + +func (r *userAttributeDefinitionRepository) UpdateDisplayOrders(ctx context.Context, orders map[int64]int) error { + tx, err := r.client.Tx(ctx) + if err != nil { + return err + } + defer func() { _ = tx.Rollback() }() + + for id, order := range orders { + if _, err := tx.UserAttributeDefinition.UpdateOneID(id). + SetDisplayOrder(order). + Save(ctx); err != nil { + return translatePersistenceError(err, service.ErrAttributeDefinitionNotFound, nil) + } + } + + return tx.Commit() +} + +func (r *userAttributeDefinitionRepository) ExistsByKey(ctx context.Context, key string) (bool, error) { + client := clientFromContext(ctx, r.client) + return client.UserAttributeDefinition.Query(). + Where(userattributedefinition.KeyEQ(key)). + Exist(ctx) +} + +// UserAttributeValueRepository implementation +type userAttributeValueRepository struct { + client *dbent.Client +} + +// NewUserAttributeValueRepository creates a new repository instance +func NewUserAttributeValueRepository(client *dbent.Client) service.UserAttributeValueRepository { + return &userAttributeValueRepository{client: client} +} + +func (r *userAttributeValueRepository) GetByUserID(ctx context.Context, userID int64) ([]service.UserAttributeValue, error) { + client := clientFromContext(ctx, r.client) + + entities, err := client.UserAttributeValue.Query(). + Where(userattributevalue.UserIDEQ(userID)). + All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeValue, 0, len(entities)) + for _, e := range entities { + result = append(result, service.UserAttributeValue{ + ID: e.ID, + UserID: e.UserID, + AttributeID: e.AttributeID, + Value: e.Value, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + }) + } + return result, nil +} + +func (r *userAttributeValueRepository) GetByUserIDs(ctx context.Context, userIDs []int64) ([]service.UserAttributeValue, error) { + if len(userIDs) == 0 { + return []service.UserAttributeValue{}, nil + } + + client := clientFromContext(ctx, r.client) + + entities, err := client.UserAttributeValue.Query(). + Where(userattributevalue.UserIDIn(userIDs...)). + All(ctx) + if err != nil { + return nil, err + } + + result := make([]service.UserAttributeValue, 0, len(entities)) + for _, e := range entities { + result = append(result, service.UserAttributeValue{ + ID: e.ID, + UserID: e.UserID, + AttributeID: e.AttributeID, + Value: e.Value, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + }) + } + return result, nil +} + +func (r *userAttributeValueRepository) UpsertBatch(ctx context.Context, userID int64, inputs []service.UpdateUserAttributeInput) error { + if len(inputs) == 0 { + return nil + } + + tx, err := r.client.Tx(ctx) + if err != nil { + return err + } + defer func() { _ = tx.Rollback() }() + + for _, input := range inputs { + // Use upsert (ON CONFLICT DO UPDATE) + err := tx.UserAttributeValue.Create(). + SetUserID(userID). + SetAttributeID(input.AttributeID). + SetValue(input.Value). + OnConflictColumns(userattributevalue.FieldUserID, userattributevalue.FieldAttributeID). + UpdateValue(). + UpdateUpdatedAt(). + Exec(ctx) + if err != nil { + return err + } + } + + return tx.Commit() +} + +func (r *userAttributeValueRepository) DeleteByAttributeID(ctx context.Context, attributeID int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeValue.Delete(). + Where(userattributevalue.AttributeIDEQ(attributeID)). + Exec(ctx) + return err +} + +func (r *userAttributeValueRepository) DeleteByUserID(ctx context.Context, userID int64) error { + client := clientFromContext(ctx, r.client) + + _, err := client.UserAttributeValue.Delete(). + Where(userattributevalue.UserIDEQ(userID)). + Exec(ctx) + return err +} + +// Helper functions for entity to service conversion +func defEntityToService(e *dbent.UserAttributeDefinition) *service.UserAttributeDefinition { + if e == nil { + return nil + } + return &service.UserAttributeDefinition{ + ID: e.ID, + Key: e.Key, + Name: e.Name, + Description: e.Description, + Type: service.UserAttributeType(e.Type), + Options: toServiceOptions(e.Options), + Required: e.Required, + Validation: toServiceValidation(e.Validation), + Placeholder: e.Placeholder, + DisplayOrder: e.DisplayOrder, + Enabled: e.Enabled, + CreatedAt: e.CreatedAt, + UpdatedAt: e.UpdatedAt, + } +} + +// Type conversion helpers (map types <-> service types) +func toEntOptions(opts []service.UserAttributeOption) []map[string]any { + if opts == nil { + return []map[string]any{} + } + result := make([]map[string]any, len(opts)) + for i, o := range opts { + result[i] = map[string]any{"value": o.Value, "label": o.Label} + } + return result +} + +func toServiceOptions(opts []map[string]any) []service.UserAttributeOption { + if opts == nil { + return []service.UserAttributeOption{} + } + result := make([]service.UserAttributeOption, len(opts)) + for i, o := range opts { + result[i] = service.UserAttributeOption{ + Value: getString(o, "value"), + Label: getString(o, "label"), + } + } + return result +} + +func toEntValidation(v service.UserAttributeValidation) map[string]any { + result := map[string]any{} + if v.MinLength != nil { + result["min_length"] = *v.MinLength + } + if v.MaxLength != nil { + result["max_length"] = *v.MaxLength + } + if v.Min != nil { + result["min"] = *v.Min + } + if v.Max != nil { + result["max"] = *v.Max + } + if v.Pattern != nil { + result["pattern"] = *v.Pattern + } + if v.Message != nil { + result["message"] = *v.Message + } + return result +} + +func toServiceValidation(v map[string]any) service.UserAttributeValidation { + result := service.UserAttributeValidation{} + if val := getInt(v, "min_length"); val != nil { + result.MinLength = val + } + if val := getInt(v, "max_length"); val != nil { + result.MaxLength = val + } + if val := getInt(v, "min"); val != nil { + result.Min = val + } + if val := getInt(v, "max"); val != nil { + result.Max = val + } + if val := getStringPtr(v, "pattern"); val != nil { + result.Pattern = val + } + if val := getStringPtr(v, "message"); val != nil { + result.Message = val + } + return result +} + +// Helper functions for type conversion +func getString(m map[string]any, key string) string { + if v, ok := m[key]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func getStringPtr(m map[string]any, key string) *string { + if v, ok := m[key]; ok { + if s, ok := v.(string); ok { + return &s + } + } + return nil +} + +func getInt(m map[string]any, key string) *int { + if v, ok := m[key]; ok { + switch n := v.(type) { + case int: + return &n + case int64: + i := int(n) + return &i + case float64: + i := int(n) + return &i + } + } + return nil +} diff --git a/backend/internal/repository/user_repo.go b/backend/internal/repository/user_repo.go new file mode 100644 index 00000000..006a5464 --- /dev/null +++ b/backend/internal/repository/user_repo.go @@ -0,0 +1,468 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "fmt" + "sort" + "strings" + + dbent "github.com/Wei-Shaw/sub2api/ent" + dbuser "github.com/Wei-Shaw/sub2api/ent/user" + "github.com/Wei-Shaw/sub2api/ent/userallowedgroup" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type userRepository struct { + client *dbent.Client + sql sqlExecutor +} + +func NewUserRepository(client *dbent.Client, sqlDB *sql.DB) service.UserRepository { + return newUserRepositoryWithSQL(client, sqlDB) +} + +func newUserRepositoryWithSQL(client *dbent.Client, sqlq sqlExecutor) *userRepository { + return &userRepository{client: client, sql: sqlq} +} + +func (r *userRepository) Create(ctx context.Context, userIn *service.User) error { + if userIn == nil { + return nil + } + + // 统一使用 ent 的事务:保证用户与允许分组的更新原子化, + // 并避免基于 *sql.Tx 手动构造 ent client 导致的 ExecQuerier 断言错误。 + tx, err := r.client.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return err + } + + var txClient *dbent.Client + if err == nil { + defer func() { _ = tx.Rollback() }() + txClient = tx.Client() + } else { + // 已处于外部事务中(ErrTxStarted),复用当前 client 并由调用方负责提交/回滚。 + txClient = r.client + } + + created, err := txClient.User.Create(). + SetEmail(userIn.Email). + SetUsername(userIn.Username). + SetNotes(userIn.Notes). + SetPasswordHash(userIn.PasswordHash). + SetRole(userIn.Role). + SetBalance(userIn.Balance). + SetConcurrency(userIn.Concurrency). + SetStatus(userIn.Status). + Save(ctx) + if err != nil { + return translatePersistenceError(err, nil, service.ErrEmailExists) + } + + if err := r.syncUserAllowedGroupsWithClient(ctx, txClient, created.ID, userIn.AllowedGroups); err != nil { + return err + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return err + } + } + + applyUserEntityToService(userIn, created) + return nil +} + +func (r *userRepository) GetByID(ctx context.Context, id int64) (*service.User, error) { + m, err := r.client.User.Query().Where(dbuser.IDEQ(id)).Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) + } + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{id}) + if err != nil { + return nil, err + } + if v, ok := groups[id]; ok { + out.AllowedGroups = v + } + return out, nil +} + +func (r *userRepository) GetByEmail(ctx context.Context, email string) (*service.User, error) { + m, err := r.client.User.Query().Where(dbuser.EmailEQ(email)).Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) + } + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{m.ID}) + if err != nil { + return nil, err + } + if v, ok := groups[m.ID]; ok { + out.AllowedGroups = v + } + return out, nil +} + +func (r *userRepository) Update(ctx context.Context, userIn *service.User) error { + if userIn == nil { + return nil + } + + // 使用 ent 事务包裹用户更新与 allowed_groups 同步,避免跨层事务不一致。 + tx, err := r.client.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return err + } + + var txClient *dbent.Client + if err == nil { + defer func() { _ = tx.Rollback() }() + txClient = tx.Client() + } else { + // 已处于外部事务中(ErrTxStarted),复用当前 client 并由调用方负责提交/回滚。 + txClient = r.client + } + + updated, err := txClient.User.UpdateOneID(userIn.ID). + SetEmail(userIn.Email). + SetUsername(userIn.Username). + SetNotes(userIn.Notes). + SetPasswordHash(userIn.PasswordHash). + SetRole(userIn.Role). + SetBalance(userIn.Balance). + SetConcurrency(userIn.Concurrency). + SetStatus(userIn.Status). + Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, service.ErrEmailExists) + } + + if err := r.syncUserAllowedGroupsWithClient(ctx, txClient, updated.ID, userIn.AllowedGroups); err != nil { + return err + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return err + } + } + + userIn.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *userRepository) Delete(ctx context.Context, id int64) error { + affected, err := r.client.User.Delete().Where(dbuser.IDEQ(id)).Exec(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + if affected == 0 { + return service.ErrUserNotFound + } + return nil +} + +func (r *userRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.User, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, service.UserListFilters{}) +} + +func (r *userRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters service.UserListFilters) ([]service.User, *pagination.PaginationResult, error) { + q := r.client.User.Query() + + if filters.Status != "" { + q = q.Where(dbuser.StatusEQ(filters.Status)) + } + if filters.Role != "" { + q = q.Where(dbuser.RoleEQ(filters.Role)) + } + if filters.Search != "" { + q = q.Where( + dbuser.Or( + dbuser.EmailContainsFold(filters.Search), + dbuser.UsernameContainsFold(filters.Search), + ), + ) + } + + // If attribute filters are specified, we need to filter by user IDs first + var allowedUserIDs []int64 + if len(filters.Attributes) > 0 { + var attrErr error + allowedUserIDs, attrErr = r.filterUsersByAttributes(ctx, filters.Attributes) + if attrErr != nil { + return nil, nil, attrErr + } + if len(allowedUserIDs) == 0 { + // No users match the attribute filters + return []service.User{}, paginationResultFromTotal(0, params), nil + } + q = q.Where(dbuser.IDIn(allowedUserIDs...)) + } + + total, err := q.Clone().Count(ctx) + if err != nil { + return nil, nil, err + } + + users, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(dbuser.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outUsers := make([]service.User, 0, len(users)) + if len(users) == 0 { + return outUsers, paginationResultFromTotal(int64(total), params), nil + } + + userIDs := make([]int64, 0, len(users)) + userMap := make(map[int64]*service.User, len(users)) + for i := range users { + userIDs = append(userIDs, users[i].ID) + u := userEntityToService(users[i]) + outUsers = append(outUsers, *u) + userMap[u.ID] = &outUsers[len(outUsers)-1] + } + + // Batch load active subscriptions with groups to avoid N+1. + subs, err := r.client.UserSubscription.Query(). + Where( + usersubscription.UserIDIn(userIDs...), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + ). + WithGroup(). + All(ctx) + if err != nil { + return nil, nil, err + } + + for i := range subs { + if u, ok := userMap[subs[i].UserID]; ok { + u.Subscriptions = append(u.Subscriptions, *userSubscriptionEntityToService(subs[i])) + } + } + + allowedGroupsByUser, err := r.loadAllowedGroups(ctx, userIDs) + if err != nil { + return nil, nil, err + } + for id, u := range userMap { + if groups, ok := allowedGroupsByUser[id]; ok { + u.AllowedGroups = groups + } + } + + return outUsers, paginationResultFromTotal(int64(total), params), nil +} + +// filterUsersByAttributes returns user IDs that match ALL the given attribute filters +func (r *userRepository) filterUsersByAttributes(ctx context.Context, attrs map[int64]string) ([]int64, error) { + if len(attrs) == 0 { + return nil, nil + } + + if r.sql == nil { + return nil, fmt.Errorf("sql executor is not configured") + } + + clauses := make([]string, 0, len(attrs)) + args := make([]any, 0, len(attrs)*2+1) + argIndex := 1 + for attrID, value := range attrs { + clauses = append(clauses, fmt.Sprintf("(attribute_id = $%d AND value ILIKE $%d)", argIndex, argIndex+1)) + args = append(args, attrID, "%"+value+"%") + argIndex += 2 + } + + query := fmt.Sprintf( + `SELECT user_id + FROM user_attribute_values + WHERE %s + GROUP BY user_id + HAVING COUNT(DISTINCT attribute_id) = $%d`, + strings.Join(clauses, " OR "), + argIndex, + ) + args = append(args, len(attrs)) + + rows, err := r.sql.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + result := make([]int64, 0) + for rows.Next() { + var userID int64 + if scanErr := rows.Scan(&userID); scanErr != nil { + return nil, scanErr + } + result = append(result, userID) + } + if err := rows.Err(); err != nil { + return nil, err + } + return result, nil +} + +func (r *userRepository) UpdateBalance(ctx context.Context, id int64, amount float64) error { + client := clientFromContext(ctx, r.client) + n, err := client.User.Update().Where(dbuser.IDEQ(id)).AddBalance(amount).Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + if n == 0 { + return service.ErrUserNotFound + } + return nil +} + +// DeductBalance 扣除用户余额 +// 透支策略:允许余额变为负数,确保当前请求能够完成 +// 中间件会阻止余额 <= 0 的用户发起后续请求 +func (r *userRepository) DeductBalance(ctx context.Context, id int64, amount float64) error { + client := clientFromContext(ctx, r.client) + n, err := client.User.Update(). + Where(dbuser.IDEQ(id)). + AddBalance(-amount). + Save(ctx) + if err != nil { + return err + } + if n == 0 { + return service.ErrUserNotFound + } + return nil +} + +func (r *userRepository) UpdateConcurrency(ctx context.Context, id int64, amount int) error { + client := clientFromContext(ctx, r.client) + n, err := client.User.Update().Where(dbuser.IDEQ(id)).AddConcurrency(amount).Save(ctx) + if err != nil { + return translatePersistenceError(err, service.ErrUserNotFound, nil) + } + if n == 0 { + return service.ErrUserNotFound + } + return nil +} + +func (r *userRepository) ExistsByEmail(ctx context.Context, email string) (bool, error) { + return r.client.User.Query().Where(dbuser.EmailEQ(email)).Exist(ctx) +} + +func (r *userRepository) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) { + // 仅操作 user_allowed_groups 联接表,legacy users.allowed_groups 列已弃用。 + affected, err := r.client.UserAllowedGroup.Delete(). + Where(userallowedgroup.GroupIDEQ(groupID)). + Exec(ctx) + if err != nil { + return 0, err + } + return int64(affected), nil +} + +func (r *userRepository) GetFirstAdmin(ctx context.Context) (*service.User, error) { + m, err := r.client.User.Query(). + Where( + dbuser.RoleEQ(service.RoleAdmin), + dbuser.StatusEQ(service.StatusActive), + ). + Order(dbent.Asc(dbuser.FieldID)). + First(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrUserNotFound, nil) + } + + out := userEntityToService(m) + groups, err := r.loadAllowedGroups(ctx, []int64{m.ID}) + if err != nil { + return nil, err + } + if v, ok := groups[m.ID]; ok { + out.AllowedGroups = v + } + return out, nil +} + +func (r *userRepository) loadAllowedGroups(ctx context.Context, userIDs []int64) (map[int64][]int64, error) { + out := make(map[int64][]int64, len(userIDs)) + if len(userIDs) == 0 { + return out, nil + } + + rows, err := r.client.UserAllowedGroup.Query(). + Where(userallowedgroup.UserIDIn(userIDs...)). + All(ctx) + if err != nil { + return nil, err + } + + for i := range rows { + out[rows[i].UserID] = append(out[rows[i].UserID], rows[i].GroupID) + } + + for userID := range out { + sort.Slice(out[userID], func(i, j int) bool { return out[userID][i] < out[userID][j] }) + } + + return out, nil +} + +// syncUserAllowedGroupsWithClient 在 ent client/事务内同步用户允许分组: +// 仅操作 user_allowed_groups 联接表,legacy users.allowed_groups 列已弃用。 +func (r *userRepository) syncUserAllowedGroupsWithClient(ctx context.Context, client *dbent.Client, userID int64, groupIDs []int64) error { + if client == nil { + return nil + } + + // Keep join table as the source of truth for reads. + if _, err := client.UserAllowedGroup.Delete().Where(userallowedgroup.UserIDEQ(userID)).Exec(ctx); err != nil { + return err + } + + unique := make(map[int64]struct{}, len(groupIDs)) + for _, id := range groupIDs { + if id <= 0 { + continue + } + unique[id] = struct{}{} + } + + if len(unique) > 0 { + creates := make([]*dbent.UserAllowedGroupCreate, 0, len(unique)) + for groupID := range unique { + creates = append(creates, client.UserAllowedGroup.Create().SetUserID(userID).SetGroupID(groupID)) + } + if err := client.UserAllowedGroup. + CreateBulk(creates...). + OnConflictColumns(userallowedgroup.FieldUserID, userallowedgroup.FieldGroupID). + DoNothing(). + Exec(ctx); err != nil { + return err + } + } + + return nil +} + +func applyUserEntityToService(dst *service.User, src *dbent.User) { + if dst == nil || src == nil { + return + } + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt +} diff --git a/backend/internal/repository/user_repo_integration_test.go b/backend/internal/repository/user_repo_integration_test.go new file mode 100644 index 00000000..f5d0f9ff --- /dev/null +++ b/backend/internal/repository/user_repo_integration_test.go @@ -0,0 +1,537 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type UserRepoSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + repo *userRepository +} + +func (s *UserRepoSuite) SetupTest() { + s.ctx = context.Background() + s.client = testEntClient(s.T()) + s.repo = newUserRepositoryWithSQL(s.client, integrationDB) + + // 清理测试数据,确保每个测试从干净状态开始 + _, _ = integrationDB.ExecContext(s.ctx, "DELETE FROM user_subscriptions") + _, _ = integrationDB.ExecContext(s.ctx, "DELETE FROM user_allowed_groups") + _, _ = integrationDB.ExecContext(s.ctx, "DELETE FROM users") +} + +func TestUserRepoSuite(t *testing.T) { + suite.Run(t, new(UserRepoSuite)) +} + +func (s *UserRepoSuite) mustCreateUser(u *service.User) *service.User { + s.T().Helper() + + if u.Email == "" { + u.Email = "user-" + time.Now().Format(time.RFC3339Nano) + "@example.com" + } + if u.PasswordHash == "" { + u.PasswordHash = "test-password-hash" + } + if u.Role == "" { + u.Role = service.RoleUser + } + if u.Status == "" { + u.Status = service.StatusActive + } + if u.Concurrency == 0 { + u.Concurrency = 5 + } + + s.Require().NoError(s.repo.Create(s.ctx, u), "create user") + return u +} + +func (s *UserRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *UserRepoSuite) mustCreateSubscription(userID, groupID int64, mutate func(*dbent.UserSubscriptionCreate)) *dbent.UserSubscription { + s.T().Helper() + + now := time.Now() + create := s.client.UserSubscription.Create(). + SetUserID(userID). + SetGroupID(groupID). + SetStartsAt(now.Add(-1 * time.Hour)). + SetExpiresAt(now.Add(24 * time.Hour)). + SetStatus(service.SubscriptionStatusActive). + SetAssignedAt(now). + SetNotes("") + + if mutate != nil { + mutate(create) + } + + sub, err := create.Save(s.ctx) + s.Require().NoError(err, "create subscription") + return sub +} + +// --- Create / GetByID / GetByEmail / Update / Delete --- + +func (s *UserRepoSuite) TestCreate() { + user := s.mustCreateUser(&service.User{ + Email: "create@test.com", + Username: "testuser", + PasswordHash: "test-password-hash", + Role: service.RoleUser, + Status: service.StatusActive, + }) + + s.Require().NotZero(user.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal("create@test.com", got.Email) +} + +func (s *UserRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *UserRepoSuite) TestGetByEmail() { + user := s.mustCreateUser(&service.User{Email: "byemail@test.com"}) + + got, err := s.repo.GetByEmail(s.ctx, user.Email) + s.Require().NoError(err, "GetByEmail") + s.Require().Equal(user.ID, got.ID) +} + +func (s *UserRepoSuite) TestGetByEmail_NotFound() { + _, err := s.repo.GetByEmail(s.ctx, "nonexistent@test.com") + s.Require().Error(err, "expected error for non-existent email") +} + +func (s *UserRepoSuite) TestUpdate() { + user := s.mustCreateUser(&service.User{Email: "update@test.com", Username: "original"}) + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + got.Username = "updated" + s.Require().NoError(s.repo.Update(s.ctx, got), "Update") + + updated, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("updated", updated.Username) +} + +func (s *UserRepoSuite) TestDelete() { + user := s.mustCreateUser(&service.User{Email: "delete@test.com"}) + + err := s.repo.Delete(s.ctx, user.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, user.ID) + s.Require().Error(err, "expected error after delete") +} + +// --- List / ListWithFilters --- + +func (s *UserRepoSuite) TestList() { + s.mustCreateUser(&service.User{Email: "list1@test.com"}) + s.mustCreateUser(&service.User{Email: "list2@test.com"}) + + users, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "List") + s.Require().Len(users, 2) + s.Require().Equal(int64(2), page.Total) +} + +func (s *UserRepoSuite) TestListWithFilters_Status() { + s.mustCreateUser(&service.User{Email: "active@test.com", Status: service.StatusActive}) + s.mustCreateUser(&service.User{Email: "disabled@test.com", Status: service.StatusDisabled}) + + users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Status: service.StatusActive}) + s.Require().NoError(err) + s.Require().Len(users, 1) + s.Require().Equal(service.StatusActive, users[0].Status) +} + +func (s *UserRepoSuite) TestListWithFilters_Role() { + s.mustCreateUser(&service.User{Email: "user@test.com", Role: service.RoleUser}) + s.mustCreateUser(&service.User{Email: "admin@test.com", Role: service.RoleAdmin}) + + users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Role: service.RoleAdmin}) + s.Require().NoError(err) + s.Require().Len(users, 1) + s.Require().Equal(service.RoleAdmin, users[0].Role) +} + +func (s *UserRepoSuite) TestListWithFilters_Search() { + s.mustCreateUser(&service.User{Email: "alice@test.com", Username: "Alice"}) + s.mustCreateUser(&service.User{Email: "bob@test.com", Username: "Bob"}) + + users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Search: "alice"}) + s.Require().NoError(err) + s.Require().Len(users, 1) + s.Require().Contains(users[0].Email, "alice") +} + +func (s *UserRepoSuite) TestListWithFilters_SearchByUsername() { + s.mustCreateUser(&service.User{Email: "u1@test.com", Username: "JohnDoe"}) + s.mustCreateUser(&service.User{Email: "u2@test.com", Username: "JaneSmith"}) + + users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Search: "john"}) + s.Require().NoError(err) + s.Require().Len(users, 1) + s.Require().Equal("JohnDoe", users[0].Username) +} + +func (s *UserRepoSuite) TestListWithFilters_LoadsActiveSubscriptions() { + user := s.mustCreateUser(&service.User{Email: "sub@test.com", Status: service.StatusActive}) + groupActive := s.mustCreateGroup("g-sub-active") + groupExpired := s.mustCreateGroup("g-sub-expired") + + _ = s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusActive) + c.SetExpiresAt(time.Now().Add(1 * time.Hour)) + }) + _ = s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-1 * time.Hour)) + }) + + users, _, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Search: "sub@"}) + s.Require().NoError(err, "ListWithFilters") + s.Require().Len(users, 1, "expected 1 user") + s.Require().Len(users[0].Subscriptions, 1, "expected 1 active subscription") + s.Require().NotNil(users[0].Subscriptions[0].Group, "expected subscription group preload") + s.Require().Equal(groupActive.ID, users[0].Subscriptions[0].Group.ID, "group ID mismatch") +} + +func (s *UserRepoSuite) TestListWithFilters_CombinedFilters() { + s.mustCreateUser(&service.User{ + Email: "a@example.com", + Username: "Alice", + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + }) + target := s.mustCreateUser(&service.User{ + Email: "b@example.com", + Username: "Bob", + Role: service.RoleAdmin, + Status: service.StatusActive, + Balance: 1, + }) + s.mustCreateUser(&service.User{ + Email: "c@example.com", + Role: service.RoleAdmin, + Status: service.StatusDisabled, + }) + + users, page, err := s.repo.ListWithFilters(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, service.UserListFilters{Status: service.StatusActive, Role: service.RoleAdmin, Search: "b@"}) + s.Require().NoError(err, "ListWithFilters") + s.Require().Equal(int64(1), page.Total, "ListWithFilters total mismatch") + s.Require().Len(users, 1, "ListWithFilters len mismatch") + s.Require().Equal(target.ID, users[0].ID, "ListWithFilters result mismatch") +} + +// --- Balance operations --- + +func (s *UserRepoSuite) TestUpdateBalance() { + user := s.mustCreateUser(&service.User{Email: "bal@test.com", Balance: 10}) + + err := s.repo.UpdateBalance(s.ctx, user.ID, 2.5) + s.Require().NoError(err, "UpdateBalance") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(12.5, got.Balance, 1e-6) +} + +func (s *UserRepoSuite) TestUpdateBalance_Negative() { + user := s.mustCreateUser(&service.User{Email: "balneg@test.com", Balance: 10}) + + err := s.repo.UpdateBalance(s.ctx, user.ID, -3) + s.Require().NoError(err, "UpdateBalance with negative") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(7.0, got.Balance, 1e-6) +} + +func (s *UserRepoSuite) TestDeductBalance() { + user := s.mustCreateUser(&service.User{Email: "deduct@test.com", Balance: 10}) + + err := s.repo.DeductBalance(s.ctx, user.ID, 5) + s.Require().NoError(err, "DeductBalance") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(5.0, got.Balance, 1e-6) +} + +func (s *UserRepoSuite) TestDeductBalance_InsufficientFunds() { + user := s.mustCreateUser(&service.User{Email: "insuf@test.com", Balance: 5}) + + // 透支策略:允许扣除超过余额的金额 + err := s.repo.DeductBalance(s.ctx, user.ID, 999) + s.Require().NoError(err, "DeductBalance should allow overdraft") + + // 验证余额变为负数 + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(-994.0, got.Balance, 1e-6, "Balance should be negative after overdraft") +} + +func (s *UserRepoSuite) TestDeductBalance_ExactAmount() { + user := s.mustCreateUser(&service.User{Email: "exact@test.com", Balance: 10}) + + err := s.repo.DeductBalance(s.ctx, user.ID, 10) + s.Require().NoError(err, "DeductBalance exact amount") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(0.0, got.Balance, 1e-6) +} + +func (s *UserRepoSuite) TestDeductBalance_AllowsOverdraft() { + user := s.mustCreateUser(&service.User{Email: "overdraft@test.com", Balance: 5.0}) + + // 扣除超过余额的金额 - 应该成功 + err := s.repo.DeductBalance(s.ctx, user.ID, 10.0) + s.Require().NoError(err, "DeductBalance should allow overdraft") + + // 验证余额为负 + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().InDelta(-5.0, got.Balance, 1e-6, "Balance should be -5.0 after overdraft") +} + +// --- Concurrency --- + +func (s *UserRepoSuite) TestUpdateConcurrency() { + user := s.mustCreateUser(&service.User{Email: "conc@test.com", Concurrency: 5}) + + err := s.repo.UpdateConcurrency(s.ctx, user.ID, 3) + s.Require().NoError(err, "UpdateConcurrency") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().Equal(8, got.Concurrency) +} + +func (s *UserRepoSuite) TestUpdateConcurrency_Negative() { + user := s.mustCreateUser(&service.User{Email: "concneg@test.com", Concurrency: 5}) + + err := s.repo.UpdateConcurrency(s.ctx, user.ID, -2) + s.Require().NoError(err, "UpdateConcurrency negative") + + got, err := s.repo.GetByID(s.ctx, user.ID) + s.Require().NoError(err) + s.Require().Equal(3, got.Concurrency) +} + +// --- ExistsByEmail --- + +func (s *UserRepoSuite) TestExistsByEmail() { + s.mustCreateUser(&service.User{Email: "exists@test.com"}) + + exists, err := s.repo.ExistsByEmail(s.ctx, "exists@test.com") + s.Require().NoError(err, "ExistsByEmail") + s.Require().True(exists) + + notExists, err := s.repo.ExistsByEmail(s.ctx, "notexists@test.com") + s.Require().NoError(err) + s.Require().False(notExists) +} + +// --- RemoveGroupFromAllowedGroups --- + +func (s *UserRepoSuite) TestRemoveGroupFromAllowedGroups() { + target := s.mustCreateGroup("target-42") + other := s.mustCreateGroup("other-7") + + userA := s.mustCreateUser(&service.User{ + Email: "a1@example.com", + AllowedGroups: []int64{target.ID, other.ID}, + }) + s.mustCreateUser(&service.User{ + Email: "a2@example.com", + AllowedGroups: []int64{other.ID}, + }) + + affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, target.ID) + s.Require().NoError(err, "RemoveGroupFromAllowedGroups") + s.Require().Equal(int64(1), affected, "expected 1 affected row") + + got, err := s.repo.GetByID(s.ctx, userA.ID) + s.Require().NoError(err, "GetByID") + s.Require().NotContains(got.AllowedGroups, target.ID) + s.Require().Contains(got.AllowedGroups, other.ID) +} + +func (s *UserRepoSuite) TestRemoveGroupFromAllowedGroups_NoMatch() { + groupA := s.mustCreateGroup("nomatch-a") + groupB := s.mustCreateGroup("nomatch-b") + + s.mustCreateUser(&service.User{ + Email: "nomatch@test.com", + AllowedGroups: []int64{groupA.ID, groupB.ID}, + }) + + affected, err := s.repo.RemoveGroupFromAllowedGroups(s.ctx, 999999) + s.Require().NoError(err) + s.Require().Zero(affected, "expected no affected rows") +} + +// --- GetFirstAdmin --- + +func (s *UserRepoSuite) TestGetFirstAdmin() { + admin1 := s.mustCreateUser(&service.User{ + Email: "admin1@example.com", + Role: service.RoleAdmin, + Status: service.StatusActive, + }) + s.mustCreateUser(&service.User{ + Email: "admin2@example.com", + Role: service.RoleAdmin, + Status: service.StatusActive, + }) + + got, err := s.repo.GetFirstAdmin(s.ctx) + s.Require().NoError(err, "GetFirstAdmin") + s.Require().Equal(admin1.ID, got.ID, "GetFirstAdmin mismatch") +} + +func (s *UserRepoSuite) TestGetFirstAdmin_NoAdmin() { + s.mustCreateUser(&service.User{ + Email: "user@example.com", + Role: service.RoleUser, + Status: service.StatusActive, + }) + + _, err := s.repo.GetFirstAdmin(s.ctx) + s.Require().Error(err, "expected error when no admin exists") +} + +func (s *UserRepoSuite) TestGetFirstAdmin_DisabledAdminIgnored() { + s.mustCreateUser(&service.User{ + Email: "disabled@example.com", + Role: service.RoleAdmin, + Status: service.StatusDisabled, + }) + activeAdmin := s.mustCreateUser(&service.User{ + Email: "active@example.com", + Role: service.RoleAdmin, + Status: service.StatusActive, + }) + + got, err := s.repo.GetFirstAdmin(s.ctx) + s.Require().NoError(err, "GetFirstAdmin") + s.Require().Equal(activeAdmin.ID, got.ID, "should return only active admin") +} + +// --- Combined --- + +func (s *UserRepoSuite) TestCRUD_And_Filters_And_AtomicUpdates() { + user1 := s.mustCreateUser(&service.User{ + Email: "a@example.com", + Username: "Alice", + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + }) + user2 := s.mustCreateUser(&service.User{ + Email: "b@example.com", + Username: "Bob", + Role: service.RoleAdmin, + Status: service.StatusActive, + Balance: 1, + }) + s.mustCreateUser(&service.User{ + Email: "c@example.com", + Role: service.RoleAdmin, + Status: service.StatusDisabled, + }) + + got, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal(user1.Email, got.Email, "GetByID email mismatch") + + gotByEmail, err := s.repo.GetByEmail(s.ctx, user2.Email) + s.Require().NoError(err, "GetByEmail") + s.Require().Equal(user2.ID, gotByEmail.ID, "GetByEmail ID mismatch") + + got.Username = "Alice2" + s.Require().NoError(s.repo.Update(s.ctx, got), "Update") + got2, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("Alice2", got2.Username, "Update did not persist") + + s.Require().NoError(s.repo.UpdateBalance(s.ctx, user1.ID, 2.5), "UpdateBalance") + got3, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID after UpdateBalance") + s.Require().InDelta(12.5, got3.Balance, 1e-6) + + s.Require().NoError(s.repo.DeductBalance(s.ctx, user1.ID, 5), "DeductBalance") + got4, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID after DeductBalance") + s.Require().InDelta(7.5, got4.Balance, 1e-6) + + // 透支策略:允许扣除超过余额的金额 + err = s.repo.DeductBalance(s.ctx, user1.ID, 999) + s.Require().NoError(err, "DeductBalance should allow overdraft") + gotOverdraft, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID after overdraft") + s.Require().Less(gotOverdraft.Balance, 0.0, "Balance should be negative after overdraft") + + s.Require().NoError(s.repo.UpdateConcurrency(s.ctx, user1.ID, 3), "UpdateConcurrency") + got5, err := s.repo.GetByID(s.ctx, user1.ID) + s.Require().NoError(err, "GetByID after UpdateConcurrency") + s.Require().Equal(user1.Concurrency+3, got5.Concurrency) + + params := pagination.PaginationParams{Page: 1, PageSize: 10} + users, page, err := s.repo.ListWithFilters(s.ctx, params, service.UserListFilters{Status: service.StatusActive, Role: service.RoleAdmin, Search: "b@"}) + s.Require().NoError(err, "ListWithFilters") + s.Require().Equal(int64(1), page.Total, "ListWithFilters total mismatch") + s.Require().Len(users, 1, "ListWithFilters len mismatch") + s.Require().Equal(user2.ID, users[0].ID, "ListWithFilters result mismatch") +} + +// --- UpdateBalance/UpdateConcurrency 影响行数校验测试 --- + +func (s *UserRepoSuite) TestUpdateBalance_NotFound() { + err := s.repo.UpdateBalance(s.ctx, 999999, 10.0) + s.Require().Error(err, "expected error for non-existent user") + s.Require().ErrorIs(err, service.ErrUserNotFound) +} + +func (s *UserRepoSuite) TestUpdateConcurrency_NotFound() { + err := s.repo.UpdateConcurrency(s.ctx, 999999, 5) + s.Require().Error(err, "expected error for non-existent user") + s.Require().ErrorIs(err, service.ErrUserNotFound) +} + +func (s *UserRepoSuite) TestDeductBalance_NotFound() { + err := s.repo.DeductBalance(s.ctx, 999999, 5) + s.Require().Error(err, "expected error for non-existent user") + // DeductBalance 在用户不存在时返回 ErrUserNotFound + s.Require().ErrorIs(err, service.ErrUserNotFound) +} diff --git a/backend/internal/repository/user_subscription_repo.go b/backend/internal/repository/user_subscription_repo.go new file mode 100644 index 00000000..cd3b9db6 --- /dev/null +++ b/backend/internal/repository/user_subscription_repo.go @@ -0,0 +1,435 @@ +package repository + +import ( + "context" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/usersubscription" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type userSubscriptionRepository struct { + client *dbent.Client +} + +func NewUserSubscriptionRepository(client *dbent.Client) service.UserSubscriptionRepository { + return &userSubscriptionRepository{client: client} +} + +func (r *userSubscriptionRepository) Create(ctx context.Context, sub *service.UserSubscription) error { + if sub == nil { + return service.ErrSubscriptionNilInput + } + + client := clientFromContext(ctx, r.client) + builder := client.UserSubscription.Create(). + SetUserID(sub.UserID). + SetGroupID(sub.GroupID). + SetExpiresAt(sub.ExpiresAt). + SetNillableDailyWindowStart(sub.DailyWindowStart). + SetNillableWeeklyWindowStart(sub.WeeklyWindowStart). + SetNillableMonthlyWindowStart(sub.MonthlyWindowStart). + SetDailyUsageUsd(sub.DailyUsageUSD). + SetWeeklyUsageUsd(sub.WeeklyUsageUSD). + SetMonthlyUsageUsd(sub.MonthlyUsageUSD). + SetNillableAssignedBy(sub.AssignedBy) + + if sub.StartsAt.IsZero() { + builder.SetStartsAt(time.Now()) + } else { + builder.SetStartsAt(sub.StartsAt) + } + if sub.Status != "" { + builder.SetStatus(sub.Status) + } + if !sub.AssignedAt.IsZero() { + builder.SetAssignedAt(sub.AssignedAt) + } + // Keep compatibility with historical behavior: always store notes as a string value. + builder.SetNotes(sub.Notes) + + created, err := builder.Save(ctx) + if err == nil { + applyUserSubscriptionEntityToService(sub, created) + } + return translatePersistenceError(err, nil, service.ErrSubscriptionAlreadyExists) +} + +func (r *userSubscriptionRepository) GetByID(ctx context.Context, id int64) (*service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + m, err := client.UserSubscription.Query(). + Where(usersubscription.IDEQ(id)). + WithUser(). + WithGroup(). + WithAssignedByUser(). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) + } + return userSubscriptionEntityToService(m), nil +} + +func (r *userSubscriptionRepository) GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + m, err := client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID), usersubscription.GroupIDEQ(groupID)). + WithGroup(). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) + } + return userSubscriptionEntityToService(m), nil +} + +func (r *userSubscriptionRepository) GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + m, err := client.UserSubscription.Query(). + Where( + usersubscription.UserIDEQ(userID), + usersubscription.GroupIDEQ(groupID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + WithGroup(). + Only(ctx) + if err != nil { + return nil, translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) + } + return userSubscriptionEntityToService(m), nil +} + +func (r *userSubscriptionRepository) Update(ctx context.Context, sub *service.UserSubscription) error { + if sub == nil { + return service.ErrSubscriptionNilInput + } + + client := clientFromContext(ctx, r.client) + builder := client.UserSubscription.UpdateOneID(sub.ID). + SetUserID(sub.UserID). + SetGroupID(sub.GroupID). + SetStartsAt(sub.StartsAt). + SetExpiresAt(sub.ExpiresAt). + SetStatus(sub.Status). + SetNillableDailyWindowStart(sub.DailyWindowStart). + SetNillableWeeklyWindowStart(sub.WeeklyWindowStart). + SetNillableMonthlyWindowStart(sub.MonthlyWindowStart). + SetDailyUsageUsd(sub.DailyUsageUSD). + SetWeeklyUsageUsd(sub.WeeklyUsageUSD). + SetMonthlyUsageUsd(sub.MonthlyUsageUSD). + SetNillableAssignedBy(sub.AssignedBy). + SetAssignedAt(sub.AssignedAt). + SetNotes(sub.Notes) + + updated, err := builder.Save(ctx) + if err == nil { + applyUserSubscriptionEntityToService(sub, updated) + return nil + } + return translatePersistenceError(err, service.ErrSubscriptionNotFound, service.ErrSubscriptionAlreadyExists) +} + +func (r *userSubscriptionRepository) Delete(ctx context.Context, id int64) error { + // Match GORM semantics: deleting a missing row is not an error. + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.Delete().Where(usersubscription.IDEQ(id)).Exec(ctx) + return err +} + +func (r *userSubscriptionRepository) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + subs, err := client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID)). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + All(ctx) + if err != nil { + return nil, err + } + return userSubscriptionEntitiesToService(subs), nil +} + +func (r *userSubscriptionRepository) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + subs, err := client.UserSubscription.Query(). + Where( + usersubscription.UserIDEQ(userID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + All(ctx) + if err != nil { + return nil, err + } + return userSubscriptionEntitiesToService(subs), nil +} + +func (r *userSubscriptionRepository) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { + client := clientFromContext(ctx, r.client) + q := client.UserSubscription.Query().Where(usersubscription.GroupIDEQ(groupID)) + + total, err := q.Clone().Count(ctx) + if err != nil { + return nil, nil, err + } + + subs, err := q. + WithUser(). + WithGroup(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + + return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil +} + +func (r *userSubscriptionRepository) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { + client := clientFromContext(ctx, r.client) + q := client.UserSubscription.Query() + if userID != nil { + q = q.Where(usersubscription.UserIDEQ(*userID)) + } + if groupID != nil { + q = q.Where(usersubscription.GroupIDEQ(*groupID)) + } + if status != "" { + q = q.Where(usersubscription.StatusEQ(status)) + } + + total, err := q.Clone().Count(ctx) + if err != nil { + return nil, nil, err + } + + subs, err := q. + WithUser(). + WithGroup(). + WithAssignedByUser(). + Order(dbent.Desc(usersubscription.FieldCreatedAt)). + Offset(params.Offset()). + Limit(params.Limit()). + All(ctx) + if err != nil { + return nil, nil, err + } + + return userSubscriptionEntitiesToService(subs), paginationResultFromTotal(int64(total), params), nil +} + +func (r *userSubscriptionRepository) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { + client := clientFromContext(ctx, r.client) + return client.UserSubscription.Query(). + Where(usersubscription.UserIDEQ(userID), usersubscription.GroupIDEQ(groupID)). + Exist(ctx) +} + +func (r *userSubscriptionRepository) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(subscriptionID). + SetExpiresAt(newExpiresAt). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) UpdateStatus(ctx context.Context, subscriptionID int64, status string) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(subscriptionID). + SetStatus(status). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(subscriptionID). + SetNotes(notes). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) ActivateWindows(ctx context.Context, id int64, start time.Time) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(id). + SetDailyWindowStart(start). + SetWeeklyWindowStart(start). + SetMonthlyWindowStart(start). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) ResetDailyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(id). + SetDailyUsageUsd(0). + SetDailyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) ResetWeeklyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(id). + SetWeeklyUsageUsd(0). + SetWeeklyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +func (r *userSubscriptionRepository) ResetMonthlyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + client := clientFromContext(ctx, r.client) + _, err := client.UserSubscription.UpdateOneID(id). + SetMonthlyUsageUsd(0). + SetMonthlyWindowStart(newWindowStart). + Save(ctx) + return translatePersistenceError(err, service.ErrSubscriptionNotFound, nil) +} + +// IncrementUsage 原子性地累加订阅用量。 +// 限额检查已在请求前由 BillingCacheService.CheckBillingEligibility 完成, +// 此处仅负责记录实际消费,确保消费数据的完整性。 +func (r *userSubscriptionRepository) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { + const updateSQL = ` + UPDATE user_subscriptions us + SET + daily_usage_usd = us.daily_usage_usd + $1, + weekly_usage_usd = us.weekly_usage_usd + $1, + monthly_usage_usd = us.monthly_usage_usd + $1, + updated_at = NOW() + FROM groups g + WHERE us.id = $2 + AND us.deleted_at IS NULL + AND us.group_id = g.id + AND g.deleted_at IS NULL + ` + + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext(ctx, updateSQL, costUSD, id) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + + if affected > 0 { + return nil + } + + // affected == 0:订阅不存在或已删除 + return service.ErrSubscriptionNotFound +} + +func (r *userSubscriptionRepository) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { + client := clientFromContext(ctx, r.client) + n, err := client.UserSubscription.Update(). + Where( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtLTE(time.Now()), + ). + SetStatus(service.SubscriptionStatusExpired). + Save(ctx) + return int64(n), err +} + +// Extra repository helpers (currently used only by integration tests). + +func (r *userSubscriptionRepository) ListExpired(ctx context.Context) ([]service.UserSubscription, error) { + client := clientFromContext(ctx, r.client) + subs, err := client.UserSubscription.Query(). + Where( + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtLTE(time.Now()), + ). + All(ctx) + if err != nil { + return nil, err + } + return userSubscriptionEntitiesToService(subs), nil +} + +func (r *userSubscriptionRepository) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + client := clientFromContext(ctx, r.client) + count, err := client.UserSubscription.Query().Where(usersubscription.GroupIDEQ(groupID)).Count(ctx) + return int64(count), err +} + +func (r *userSubscriptionRepository) CountActiveByGroupID(ctx context.Context, groupID int64) (int64, error) { + client := clientFromContext(ctx, r.client) + count, err := client.UserSubscription.Query(). + Where( + usersubscription.GroupIDEQ(groupID), + usersubscription.StatusEQ(service.SubscriptionStatusActive), + usersubscription.ExpiresAtGT(time.Now()), + ). + Count(ctx) + return int64(count), err +} + +func (r *userSubscriptionRepository) DeleteByGroupID(ctx context.Context, groupID int64) (int64, error) { + client := clientFromContext(ctx, r.client) + n, err := client.UserSubscription.Delete().Where(usersubscription.GroupIDEQ(groupID)).Exec(ctx) + return int64(n), err +} + +func userSubscriptionEntityToService(m *dbent.UserSubscription) *service.UserSubscription { + if m == nil { + return nil + } + out := &service.UserSubscription{ + ID: m.ID, + UserID: m.UserID, + GroupID: m.GroupID, + StartsAt: m.StartsAt, + ExpiresAt: m.ExpiresAt, + Status: m.Status, + DailyWindowStart: m.DailyWindowStart, + WeeklyWindowStart: m.WeeklyWindowStart, + MonthlyWindowStart: m.MonthlyWindowStart, + DailyUsageUSD: m.DailyUsageUsd, + WeeklyUsageUSD: m.WeeklyUsageUsd, + MonthlyUsageUSD: m.MonthlyUsageUsd, + AssignedBy: m.AssignedBy, + AssignedAt: m.AssignedAt, + Notes: derefString(m.Notes), + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + if m.Edges.Group != nil { + out.Group = groupEntityToService(m.Edges.Group) + } + if m.Edges.AssignedByUser != nil { + out.AssignedByUser = userEntityToService(m.Edges.AssignedByUser) + } + return out +} + +func userSubscriptionEntitiesToService(models []*dbent.UserSubscription) []service.UserSubscription { + out := make([]service.UserSubscription, 0, len(models)) + for i := range models { + if s := userSubscriptionEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} + +func applyUserSubscriptionEntityToService(dst *service.UserSubscription, src *dbent.UserSubscription) { + if dst == nil || src == nil { + return + } + dst.ID = src.ID + dst.CreatedAt = src.CreatedAt + dst.UpdatedAt = src.UpdatedAt +} diff --git a/backend/internal/repository/user_subscription_repo_integration_test.go b/backend/internal/repository/user_subscription_repo_integration_test.go new file mode 100644 index 00000000..2099e5d8 --- /dev/null +++ b/backend/internal/repository/user_subscription_repo_integration_test.go @@ -0,0 +1,747 @@ +//go:build integration + +package repository + +import ( + "context" + "fmt" + "testing" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/suite" +) + +type UserSubscriptionRepoSuite struct { + suite.Suite + ctx context.Context + client *dbent.Client + repo *userSubscriptionRepository +} + +func (s *UserSubscriptionRepoSuite) SetupTest() { + s.ctx = context.Background() + tx := testEntTx(s.T()) + s.client = tx.Client() + s.repo = NewUserSubscriptionRepository(s.client).(*userSubscriptionRepository) +} + +func TestUserSubscriptionRepoSuite(t *testing.T) { + suite.Run(t, new(UserSubscriptionRepoSuite)) +} + +func (s *UserSubscriptionRepoSuite) mustCreateUser(email string, role string) *service.User { + s.T().Helper() + + if role == "" { + role = service.RoleUser + } + + u, err := s.client.User.Create(). + SetEmail(email). + SetPasswordHash("test-password-hash"). + SetStatus(service.StatusActive). + SetRole(role). + Save(s.ctx) + s.Require().NoError(err, "create user") + return userEntityToService(u) +} + +func (s *UserSubscriptionRepoSuite) mustCreateGroup(name string) *service.Group { + s.T().Helper() + + g, err := s.client.Group.Create(). + SetName(name). + SetStatus(service.StatusActive). + Save(s.ctx) + s.Require().NoError(err, "create group") + return groupEntityToService(g) +} + +func (s *UserSubscriptionRepoSuite) mustCreateSubscription(userID, groupID int64, mutate func(*dbent.UserSubscriptionCreate)) *dbent.UserSubscription { + s.T().Helper() + + now := time.Now() + create := s.client.UserSubscription.Create(). + SetUserID(userID). + SetGroupID(groupID). + SetStartsAt(now.Add(-1 * time.Hour)). + SetExpiresAt(now.Add(24 * time.Hour)). + SetStatus(service.SubscriptionStatusActive). + SetAssignedAt(now). + SetNotes("") + + if mutate != nil { + mutate(create) + } + + sub, err := create.Save(s.ctx) + s.Require().NoError(err, "create user subscription") + return sub +} + +// --- Create / GetByID / Update / Delete --- + +func (s *UserSubscriptionRepoSuite) TestCreate() { + user := s.mustCreateUser("sub-create@test.com", service.RoleUser) + group := s.mustCreateGroup("g-create") + + sub := &service.UserSubscription{ + UserID: user.ID, + GroupID: group.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: time.Now().Add(24 * time.Hour), + } + + err := s.repo.Create(s.ctx, sub) + s.Require().NoError(err, "Create") + s.Require().NotZero(sub.ID, "expected ID to be set") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err, "GetByID") + s.Require().Equal(sub.UserID, got.UserID) + s.Require().Equal(sub.GroupID, got.GroupID) +} + +func (s *UserSubscriptionRepoSuite) TestGetByID_WithPreloads() { + user := s.mustCreateUser("preload@test.com", service.RoleUser) + group := s.mustCreateGroup("g-preload") + admin := s.mustCreateUser("admin@test.com", service.RoleAdmin) + + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetAssignedBy(admin.ID) + }) + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err, "GetByID") + s.Require().NotNil(got.User, "expected User preload") + s.Require().NotNil(got.Group, "expected Group preload") + s.Require().NotNil(got.AssignedByUser, "expected AssignedByUser preload") + s.Require().Equal(user.ID, got.User.ID) + s.Require().Equal(group.ID, got.Group.ID) + s.Require().Equal(admin.ID, got.AssignedByUser.ID) +} + +func (s *UserSubscriptionRepoSuite) TestGetByID_NotFound() { + _, err := s.repo.GetByID(s.ctx, 999999) + s.Require().Error(err, "expected error for non-existent ID") +} + +func (s *UserSubscriptionRepoSuite) TestUpdate() { + user := s.mustCreateUser("update@test.com", service.RoleUser) + group := s.mustCreateGroup("g-update") + created := s.mustCreateSubscription(user.ID, group.ID, nil) + + sub, err := s.repo.GetByID(s.ctx, created.ID) + s.Require().NoError(err, "GetByID") + + sub.Notes = "updated notes" + s.Require().NoError(s.repo.Update(s.ctx, sub), "Update") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err, "GetByID after update") + s.Require().Equal("updated notes", got.Notes) +} + +func (s *UserSubscriptionRepoSuite) TestDelete() { + user := s.mustCreateUser("delete@test.com", service.RoleUser) + group := s.mustCreateGroup("g-delete") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + err := s.repo.Delete(s.ctx, sub.ID) + s.Require().NoError(err, "Delete") + + _, err = s.repo.GetByID(s.ctx, sub.ID) + s.Require().Error(err, "expected error after delete") +} + +func (s *UserSubscriptionRepoSuite) TestDelete_Idempotent() { + s.Require().NoError(s.repo.Delete(s.ctx, 42424242), "Delete should be idempotent") +} + +// --- GetByUserIDAndGroupID / GetActiveByUserIDAndGroupID --- + +func (s *UserSubscriptionRepoSuite) TestGetByUserIDAndGroupID() { + user := s.mustCreateUser("byuser@test.com", service.RoleUser) + group := s.mustCreateGroup("g-byuser") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + got, err := s.repo.GetByUserIDAndGroupID(s.ctx, user.ID, group.ID) + s.Require().NoError(err, "GetByUserIDAndGroupID") + s.Require().Equal(sub.ID, got.ID) + s.Require().NotNil(got.Group, "expected Group preload") +} + +func (s *UserSubscriptionRepoSuite) TestGetByUserIDAndGroupID_NotFound() { + _, err := s.repo.GetByUserIDAndGroupID(s.ctx, 999999, 999999) + s.Require().Error(err, "expected error for non-existent pair") +} + +func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID() { + user := s.mustCreateUser("active@test.com", service.RoleUser) + group := s.mustCreateGroup("g-active") + + active := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(2 * time.Hour)) + }) + + got, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, group.ID) + s.Require().NoError(err, "GetActiveByUserIDAndGroupID") + s.Require().Equal(active.ID, got.ID) +} + +func (s *UserSubscriptionRepoSuite) TestGetActiveByUserIDAndGroupID_ExpiredIgnored() { + user := s.mustCreateUser("expired@test.com", service.RoleUser) + group := s.mustCreateGroup("g-expired") + + s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-2 * time.Hour)) + }) + + _, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, group.ID) + s.Require().Error(err, "expected error for expired subscription") +} + +// --- ListByUserID / ListActiveByUserID --- + +func (s *UserSubscriptionRepoSuite) TestListByUserID() { + user := s.mustCreateUser("listby@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-list1") + g2 := s.mustCreateGroup("g-list2") + + s.mustCreateSubscription(user.ID, g1.ID, nil) + s.mustCreateSubscription(user.ID, g2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + subs, err := s.repo.ListByUserID(s.ctx, user.ID) + s.Require().NoError(err, "ListByUserID") + s.Require().Len(subs, 2) + for _, sub := range subs { + s.Require().NotNil(sub.Group, "expected Group preload") + } +} + +func (s *UserSubscriptionRepoSuite) TestListActiveByUserID() { + user := s.mustCreateUser("listactive@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-act1") + g2 := s.mustCreateGroup("g-act2") + + s.mustCreateSubscription(user.ID, g1.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) + }) + s.mustCreateSubscription(user.ID, g2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + subs, err := s.repo.ListActiveByUserID(s.ctx, user.ID) + s.Require().NoError(err, "ListActiveByUserID") + s.Require().Len(subs, 1) + s.Require().Equal(service.SubscriptionStatusActive, subs[0].Status) +} + +// --- ListByGroupID --- + +func (s *UserSubscriptionRepoSuite) TestListByGroupID() { + user1 := s.mustCreateUser("u1@test.com", service.RoleUser) + user2 := s.mustCreateUser("u2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-listgrp") + + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) + + subs, page, err := s.repo.ListByGroupID(s.ctx, group.ID, pagination.PaginationParams{Page: 1, PageSize: 10}) + s.Require().NoError(err, "ListByGroupID") + s.Require().Len(subs, 2) + s.Require().Equal(int64(2), page.Total) + for _, sub := range subs { + s.Require().NotNil(sub.User, "expected User preload") + s.Require().NotNil(sub.Group, "expected Group preload") + } +} + +// --- List with filters --- + +func (s *UserSubscriptionRepoSuite) TestList_NoFilters() { + user := s.mustCreateUser("list@test.com", service.RoleUser) + group := s.mustCreateGroup("g-list") + s.mustCreateSubscription(user.ID, group.ID, nil) + + subs, page, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, "") + s.Require().NoError(err, "List") + s.Require().Len(subs, 1) + s.Require().Equal(int64(1), page.Total) +} + +func (s *UserSubscriptionRepoSuite) TestList_FilterByUserID() { + user1 := s.mustCreateUser("filter1@test.com", service.RoleUser) + user2 := s.mustCreateUser("filter2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-filter") + + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) + + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, &user1.ID, nil, "") + s.Require().NoError(err) + s.Require().Len(subs, 1) + s.Require().Equal(user1.ID, subs[0].UserID) +} + +func (s *UserSubscriptionRepoSuite) TestList_FilterByGroupID() { + user := s.mustCreateUser("grpfilter@test.com", service.RoleUser) + g1 := s.mustCreateGroup("g-f1") + g2 := s.mustCreateGroup("g-f2") + + s.mustCreateSubscription(user.ID, g1.ID, nil) + s.mustCreateSubscription(user.ID, g2.ID, nil) + + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, &g1.ID, "") + s.Require().NoError(err) + s.Require().Len(subs, 1) + s.Require().Equal(g1.ID, subs[0].GroupID) +} + +func (s *UserSubscriptionRepoSuite) TestList_FilterByStatus() { + user1 := s.mustCreateUser("statfilter1@test.com", service.RoleUser) + user2 := s.mustCreateUser("statfilter2@test.com", service.RoleUser) + group1 := s.mustCreateGroup("g-stat-1") + group2 := s.mustCreateGroup("g-stat-2") + + s.mustCreateSubscription(user1.ID, group1.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusActive) + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) + }) + s.mustCreateSubscription(user2.ID, group2.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + subs, _, err := s.repo.List(s.ctx, pagination.PaginationParams{Page: 1, PageSize: 10}, nil, nil, service.SubscriptionStatusExpired) + s.Require().NoError(err) + s.Require().Len(subs, 1) + s.Require().Equal(service.SubscriptionStatusExpired, subs[0].Status) +} + +// --- Usage tracking --- + +func (s *UserSubscriptionRepoSuite) TestIncrementUsage() { + user := s.mustCreateUser("usage@test.com", service.RoleUser) + group := s.mustCreateGroup("g-usage") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + err := s.repo.IncrementUsage(s.ctx, sub.ID, 1.25) + s.Require().NoError(err, "IncrementUsage") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().InDelta(1.25, got.DailyUsageUSD, 1e-6) + s.Require().InDelta(1.25, got.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(1.25, got.MonthlyUsageUSD, 1e-6) +} + +func (s *UserSubscriptionRepoSuite) TestIncrementUsage_Accumulates() { + user := s.mustCreateUser("accum@test.com", service.RoleUser) + group := s.mustCreateGroup("g-accum") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + s.Require().NoError(s.repo.IncrementUsage(s.ctx, sub.ID, 1.0)) + s.Require().NoError(s.repo.IncrementUsage(s.ctx, sub.ID, 2.5)) + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().InDelta(3.5, got.DailyUsageUSD, 1e-6) +} + +func (s *UserSubscriptionRepoSuite) TestActivateWindows() { + user := s.mustCreateUser("activate@test.com", service.RoleUser) + group := s.mustCreateGroup("g-activate") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + activateAt := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + err := s.repo.ActivateWindows(s.ctx, sub.ID, activateAt) + s.Require().NoError(err, "ActivateWindows") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().NotNil(got.DailyWindowStart) + s.Require().NotNil(got.WeeklyWindowStart) + s.Require().NotNil(got.MonthlyWindowStart) + s.Require().WithinDuration(activateAt, *got.DailyWindowStart, time.Microsecond) +} + +func (s *UserSubscriptionRepoSuite) TestResetDailyUsage() { + user := s.mustCreateUser("resetd@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetd") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetDailyUsageUsd(10.0) + c.SetWeeklyUsageUsd(20.0) + }) + + resetAt := time.Date(2025, 1, 2, 0, 0, 0, 0, time.UTC) + err := s.repo.ResetDailyUsage(s.ctx, sub.ID, resetAt) + s.Require().NoError(err, "ResetDailyUsage") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().InDelta(0.0, got.DailyUsageUSD, 1e-6) + s.Require().InDelta(20.0, got.WeeklyUsageUSD, 1e-6) + s.Require().NotNil(got.DailyWindowStart) + s.Require().WithinDuration(resetAt, *got.DailyWindowStart, time.Microsecond) +} + +func (s *UserSubscriptionRepoSuite) TestResetWeeklyUsage() { + user := s.mustCreateUser("resetw@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetw") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetWeeklyUsageUsd(15.0) + c.SetMonthlyUsageUsd(30.0) + }) + + resetAt := time.Date(2025, 1, 6, 0, 0, 0, 0, time.UTC) + err := s.repo.ResetWeeklyUsage(s.ctx, sub.ID, resetAt) + s.Require().NoError(err, "ResetWeeklyUsage") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().InDelta(0.0, got.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(30.0, got.MonthlyUsageUSD, 1e-6) + s.Require().NotNil(got.WeeklyWindowStart) + s.Require().WithinDuration(resetAt, *got.WeeklyWindowStart, time.Microsecond) +} + +func (s *UserSubscriptionRepoSuite) TestResetMonthlyUsage() { + user := s.mustCreateUser("resetm@test.com", service.RoleUser) + group := s.mustCreateGroup("g-resetm") + sub := s.mustCreateSubscription(user.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetMonthlyUsageUsd(25.0) + }) + + resetAt := time.Date(2025, 2, 1, 0, 0, 0, 0, time.UTC) + err := s.repo.ResetMonthlyUsage(s.ctx, sub.ID, resetAt) + s.Require().NoError(err, "ResetMonthlyUsage") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().InDelta(0.0, got.MonthlyUsageUSD, 1e-6) + s.Require().NotNil(got.MonthlyWindowStart) + s.Require().WithinDuration(resetAt, *got.MonthlyWindowStart, time.Microsecond) +} + +// --- UpdateStatus / ExtendExpiry / UpdateNotes --- + +func (s *UserSubscriptionRepoSuite) TestUpdateStatus() { + user := s.mustCreateUser("status@test.com", service.RoleUser) + group := s.mustCreateGroup("g-status") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + err := s.repo.UpdateStatus(s.ctx, sub.ID, service.SubscriptionStatusExpired) + s.Require().NoError(err, "UpdateStatus") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().Equal(service.SubscriptionStatusExpired, got.Status) +} + +func (s *UserSubscriptionRepoSuite) TestExtendExpiry() { + user := s.mustCreateUser("extend@test.com", service.RoleUser) + group := s.mustCreateGroup("g-extend") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + newExpiry := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) + err := s.repo.ExtendExpiry(s.ctx, sub.ID, newExpiry) + s.Require().NoError(err, "ExtendExpiry") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().WithinDuration(newExpiry, got.ExpiresAt, time.Microsecond) +} + +func (s *UserSubscriptionRepoSuite) TestUpdateNotes() { + user := s.mustCreateUser("notes@test.com", service.RoleUser) + group := s.mustCreateGroup("g-notes") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + err := s.repo.UpdateNotes(s.ctx, sub.ID, "VIP user") + s.Require().NoError(err, "UpdateNotes") + + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + s.Require().Equal("VIP user", got.Notes) +} + +// --- ListExpired / BatchUpdateExpiredStatus --- + +func (s *UserSubscriptionRepoSuite) TestListExpired() { + user := s.mustCreateUser("listexp@test.com", service.RoleUser) + groupActive := s.mustCreateGroup("g-listexp-active") + groupExpired := s.mustCreateGroup("g-listexp-expired") + + s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) + }) + s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + expired, err := s.repo.ListExpired(s.ctx) + s.Require().NoError(err, "ListExpired") + s.Require().Len(expired, 1) +} + +func (s *UserSubscriptionRepoSuite) TestBatchUpdateExpiredStatus() { + user := s.mustCreateUser("batch@test.com", service.RoleUser) + groupFuture := s.mustCreateGroup("g-batch-future") + groupPast := s.mustCreateGroup("g-batch-past") + + active := s.mustCreateSubscription(user.ID, groupFuture.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) + }) + expiredActive := s.mustCreateSubscription(user.ID, groupPast.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + affected, err := s.repo.BatchUpdateExpiredStatus(s.ctx) + s.Require().NoError(err, "BatchUpdateExpiredStatus") + s.Require().Equal(int64(1), affected) + + gotActive, _ := s.repo.GetByID(s.ctx, active.ID) + s.Require().Equal(service.SubscriptionStatusActive, gotActive.Status) + + gotExpired, _ := s.repo.GetByID(s.ctx, expiredActive.ID) + s.Require().Equal(service.SubscriptionStatusExpired, gotExpired.Status) +} + +// --- ExistsByUserIDAndGroupID --- + +func (s *UserSubscriptionRepoSuite) TestExistsByUserIDAndGroupID() { + user := s.mustCreateUser("exists@test.com", service.RoleUser) + group := s.mustCreateGroup("g-exists") + + s.mustCreateSubscription(user.ID, group.ID, nil) + + exists, err := s.repo.ExistsByUserIDAndGroupID(s.ctx, user.ID, group.ID) + s.Require().NoError(err, "ExistsByUserIDAndGroupID") + s.Require().True(exists) + + notExists, err := s.repo.ExistsByUserIDAndGroupID(s.ctx, user.ID, 999999) + s.Require().NoError(err) + s.Require().False(notExists) +} + +// --- CountByGroupID / CountActiveByGroupID --- + +func (s *UserSubscriptionRepoSuite) TestCountByGroupID() { + user1 := s.mustCreateUser("cnt1@test.com", service.RoleUser) + user2 := s.mustCreateUser("cnt2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-count") + + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetStatus(service.SubscriptionStatusExpired) + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) + }) + + count, err := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "CountByGroupID") + s.Require().Equal(int64(2), count) +} + +func (s *UserSubscriptionRepoSuite) TestCountActiveByGroupID() { + user1 := s.mustCreateUser("cntact1@test.com", service.RoleUser) + user2 := s.mustCreateUser("cntact2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-cntact") + + s.mustCreateSubscription(user1.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(24 * time.Hour)) + }) + s.mustCreateSubscription(user2.ID, group.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-24 * time.Hour)) // expired by time + }) + + count, err := s.repo.CountActiveByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "CountActiveByGroupID") + s.Require().Equal(int64(1), count, "only future expiry counts as active") +} + +// --- DeleteByGroupID --- + +func (s *UserSubscriptionRepoSuite) TestDeleteByGroupID() { + user1 := s.mustCreateUser("delgrp1@test.com", service.RoleUser) + user2 := s.mustCreateUser("delgrp2@test.com", service.RoleUser) + group := s.mustCreateGroup("g-delgrp") + + s.mustCreateSubscription(user1.ID, group.ID, nil) + s.mustCreateSubscription(user2.ID, group.ID, nil) + + affected, err := s.repo.DeleteByGroupID(s.ctx, group.ID) + s.Require().NoError(err, "DeleteByGroupID") + s.Require().Equal(int64(2), affected) + + count, _ := s.repo.CountByGroupID(s.ctx, group.ID) + s.Require().Zero(count) +} + +// --- Combined scenario --- + +func (s *UserSubscriptionRepoSuite) TestActiveExpiredBoundaries_UsageAndReset_BatchUpdateExpiredStatus() { + user := s.mustCreateUser("subr@example.com", service.RoleUser) + groupActive := s.mustCreateGroup("g-subr-active") + groupExpired := s.mustCreateGroup("g-subr-expired") + + active := s.mustCreateSubscription(user.ID, groupActive.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(2 * time.Hour)) + }) + expiredActive := s.mustCreateSubscription(user.ID, groupExpired.ID, func(c *dbent.UserSubscriptionCreate) { + c.SetExpiresAt(time.Now().Add(-2 * time.Hour)) + }) + + got, err := s.repo.GetActiveByUserIDAndGroupID(s.ctx, user.ID, groupActive.ID) + s.Require().NoError(err, "GetActiveByUserIDAndGroupID") + s.Require().Equal(active.ID, got.ID, "expected active subscription") + + activateAt := time.Now().Add(-25 * time.Hour) + s.Require().NoError(s.repo.ActivateWindows(s.ctx, active.ID, activateAt), "ActivateWindows") + s.Require().NoError(s.repo.IncrementUsage(s.ctx, active.ID, 1.25), "IncrementUsage") + + after, err := s.repo.GetByID(s.ctx, active.ID) + s.Require().NoError(err, "GetByID") + s.Require().InDelta(1.25, after.DailyUsageUSD, 1e-6) + s.Require().InDelta(1.25, after.WeeklyUsageUSD, 1e-6) + s.Require().InDelta(1.25, after.MonthlyUsageUSD, 1e-6) + s.Require().NotNil(after.DailyWindowStart, "expected DailyWindowStart activated") + s.Require().NotNil(after.WeeklyWindowStart, "expected WeeklyWindowStart activated") + s.Require().NotNil(after.MonthlyWindowStart, "expected MonthlyWindowStart activated") + + resetAt := time.Now().Truncate(time.Microsecond) // truncate to microsecond for DB precision + s.Require().NoError(s.repo.ResetDailyUsage(s.ctx, active.ID, resetAt), "ResetDailyUsage") + afterReset, err := s.repo.GetByID(s.ctx, active.ID) + s.Require().NoError(err, "GetByID after reset") + s.Require().InDelta(0.0, afterReset.DailyUsageUSD, 1e-6) + s.Require().NotNil(afterReset.DailyWindowStart) + s.Require().WithinDuration(resetAt, *afterReset.DailyWindowStart, time.Microsecond) + + affected, err := s.repo.BatchUpdateExpiredStatus(s.ctx) + s.Require().NoError(err, "BatchUpdateExpiredStatus") + s.Require().Equal(int64(1), affected, "expected 1 affected row") + + updated, err := s.repo.GetByID(s.ctx, expiredActive.ID) + s.Require().NoError(err, "GetByID expired") + s.Require().Equal(service.SubscriptionStatusExpired, updated.Status, "expected status expired") +} + +// --- 软删除过滤测试 --- + +func (s *UserSubscriptionRepoSuite) TestIncrementUsage_SoftDeletedGroup() { + user := s.mustCreateUser("softdeleted@test.com", service.RoleUser) + group := s.mustCreateGroup("g-softdeleted") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + // 软删除分组 + _, err := s.client.Group.UpdateOneID(group.ID).SetDeletedAt(time.Now()).Save(s.ctx) + s.Require().NoError(err, "soft delete group") + + // IncrementUsage 应该失败,因为分组已软删除 + err = s.repo.IncrementUsage(s.ctx, sub.ID, 1.0) + s.Require().Error(err, "should fail for soft-deleted group") + s.Require().ErrorIs(err, service.ErrSubscriptionNotFound) +} + +func (s *UserSubscriptionRepoSuite) TestIncrementUsage_NotFound() { + err := s.repo.IncrementUsage(s.ctx, 999999, 1.0) + s.Require().Error(err, "should fail for non-existent subscription") + s.Require().ErrorIs(err, service.ErrSubscriptionNotFound) +} + +// --- nil 入参测试 --- + +func (s *UserSubscriptionRepoSuite) TestCreate_NilInput() { + err := s.repo.Create(s.ctx, nil) + s.Require().Error(err, "Create should fail with nil input") + s.Require().ErrorIs(err, service.ErrSubscriptionNilInput) +} + +func (s *UserSubscriptionRepoSuite) TestUpdate_NilInput() { + err := s.repo.Update(s.ctx, nil) + s.Require().Error(err, "Update should fail with nil input") + s.Require().ErrorIs(err, service.ErrSubscriptionNilInput) +} + +// --- 并发用量更新测试 --- + +func (s *UserSubscriptionRepoSuite) TestIncrementUsage_Concurrent() { + user := s.mustCreateUser("concurrent@test.com", service.RoleUser) + group := s.mustCreateGroup("g-concurrent") + sub := s.mustCreateSubscription(user.ID, group.ID, nil) + + const numGoroutines = 10 + const incrementPerGoroutine = 1.5 + + // 启动多个 goroutine 并发调用 IncrementUsage + errCh := make(chan error, numGoroutines) + for i := 0; i < numGoroutines; i++ { + go func() { + errCh <- s.repo.IncrementUsage(s.ctx, sub.ID, incrementPerGoroutine) + }() + } + + // 等待所有 goroutine 完成 + for i := 0; i < numGoroutines; i++ { + err := <-errCh + s.Require().NoError(err, "IncrementUsage should succeed") + } + + // 验证累加结果正确 + got, err := s.repo.GetByID(s.ctx, sub.ID) + s.Require().NoError(err) + expectedUsage := float64(numGoroutines) * incrementPerGoroutine + s.Require().InDelta(expectedUsage, got.DailyUsageUSD, 1e-6, "daily usage should be correctly accumulated") + s.Require().InDelta(expectedUsage, got.WeeklyUsageUSD, 1e-6, "weekly usage should be correctly accumulated") + s.Require().InDelta(expectedUsage, got.MonthlyUsageUSD, 1e-6, "monthly usage should be correctly accumulated") +} + +func (s *UserSubscriptionRepoSuite) TestTxContext_RollbackIsolation() { + baseClient := testEntClient(s.T()) + tx, err := baseClient.Tx(context.Background()) + s.Require().NoError(err, "begin tx") + defer func() { + if tx != nil { + _ = tx.Rollback() + } + }() + + txCtx := dbent.NewTxContext(context.Background(), tx) + suffix := fmt.Sprintf("%d", time.Now().UnixNano()) + + userEnt, err := tx.Client().User.Create(). + SetEmail("tx-user-" + suffix + "@example.com"). + SetPasswordHash("test"). + Save(txCtx) + s.Require().NoError(err, "create user in tx") + + groupEnt, err := tx.Client().Group.Create(). + SetName("tx-group-" + suffix). + Save(txCtx) + s.Require().NoError(err, "create group in tx") + + repo := NewUserSubscriptionRepository(baseClient) + sub := &service.UserSubscription{ + UserID: userEnt.ID, + GroupID: groupEnt.ID, + ExpiresAt: time.Now().AddDate(0, 0, 30), + Status: service.SubscriptionStatusActive, + AssignedAt: time.Now(), + Notes: "tx", + } + s.Require().NoError(repo.Create(txCtx, sub), "create subscription in tx") + s.Require().NoError(repo.UpdateNotes(txCtx, sub.ID, "tx-note"), "update subscription in tx") + + s.Require().NoError(tx.Rollback(), "rollback tx") + tx = nil + + _, err = repo.GetByID(context.Background(), sub.ID) + s.Require().ErrorIs(err, service.ErrSubscriptionNotFound) +} diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go new file mode 100644 index 00000000..91ef9413 --- /dev/null +++ b/backend/internal/repository/wire.go @@ -0,0 +1,139 @@ +package repository + +import ( + "database/sql" + "errors" + + entsql "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/google/wire" + "github.com/redis/go-redis/v9" +) + +// ProvideConcurrencyCache 创建并发控制缓存,从配置读取 TTL 参数 +// 性能优化:TTL 可配置,支持长时间运行的 LLM 请求场景 +func ProvideConcurrencyCache(rdb *redis.Client, cfg *config.Config) service.ConcurrencyCache { + waitTTLSeconds := int(cfg.Gateway.Scheduling.StickySessionWaitTimeout.Seconds()) + if cfg.Gateway.Scheduling.FallbackWaitTimeout > cfg.Gateway.Scheduling.StickySessionWaitTimeout { + waitTTLSeconds = int(cfg.Gateway.Scheduling.FallbackWaitTimeout.Seconds()) + } + if waitTTLSeconds <= 0 { + waitTTLSeconds = cfg.Gateway.ConcurrencySlotTTLMinutes * 60 + } + return NewConcurrencyCache(rdb, cfg.Gateway.ConcurrencySlotTTLMinutes, waitTTLSeconds) +} + +// ProvideGitHubReleaseClient 创建 GitHub Release 客户端 +// 从配置中读取代理设置,支持国内服务器通过代理访问 GitHub +func ProvideGitHubReleaseClient(cfg *config.Config) service.GitHubReleaseClient { + return NewGitHubReleaseClient(cfg.Update.ProxyURL) +} + +// ProvidePricingRemoteClient 创建定价数据远程客户端 +// 从配置中读取代理设置,支持国内服务器通过代理访问 GitHub 上的定价数据 +func ProvidePricingRemoteClient(cfg *config.Config) service.PricingRemoteClient { + return NewPricingRemoteClient(cfg.Update.ProxyURL) +} + +// ProviderSet is the Wire provider set for all repositories +var ProviderSet = wire.NewSet( + NewUserRepository, + NewAPIKeyRepository, + NewGroupRepository, + NewAccountRepository, + NewProxyRepository, + NewRedeemCodeRepository, + NewPromoCodeRepository, + NewUsageLogRepository, + NewDashboardAggregationRepository, + NewSettingRepository, + NewOpsRepository, + NewUserSubscriptionRepository, + NewUserAttributeDefinitionRepository, + NewUserAttributeValueRepository, + + // Cache implementations + NewGatewayCache, + NewBillingCache, + NewAPIKeyCache, + NewTempUnschedCache, + NewTimeoutCounterCache, + ProvideConcurrencyCache, + NewDashboardCache, + NewEmailCache, + NewIdentityCache, + NewRedeemCache, + NewUpdateCache, + NewGeminiTokenCache, + NewSchedulerCache, + NewSchedulerOutboxRepository, + NewProxyLatencyCache, + + // HTTP service ports (DI Strategy A: return interface directly) + NewTurnstileVerifier, + ProvidePricingRemoteClient, + ProvideGitHubReleaseClient, + NewProxyExitInfoProber, + NewClaudeUsageFetcher, + NewClaudeOAuthClient, + NewHTTPUpstream, + NewOpenAIOAuthClient, + NewGeminiOAuthClient, + NewGeminiCliCodeAssistClient, + + ProvideEnt, + ProvideSQLDB, + ProvideRedis, +) + +// ProvideEnt 为依赖注入提供 Ent 客户端。 +// +// 该函数是 InitEnt 的包装器,符合 Wire 的依赖提供函数签名要求。 +// Wire 会在编译时分析依赖关系,自动生成初始化代码。 +// +// 依赖:config.Config +// 提供:*ent.Client +func ProvideEnt(cfg *config.Config) (*ent.Client, error) { + client, _, err := InitEnt(cfg) + return client, err +} + +// ProvideSQLDB 从 Ent 客户端提取底层的 *sql.DB 连接。 +// +// 某些 Repository 需要直接执行原生 SQL(如复杂的批量更新、聚合查询), +// 此时需要访问底层的 sql.DB 而不是通过 Ent ORM。 +// +// 设计说明: +// - Ent 底层使用 sql.DB,通过 Driver 接口可以访问 +// - 这种设计允许在同一事务中混用 Ent 和原生 SQL +// +// 依赖:*ent.Client +// 提供:*sql.DB +func ProvideSQLDB(client *ent.Client) (*sql.DB, error) { + if client == nil { + return nil, errors.New("nil ent client") + } + // 从 Ent 客户端获取底层驱动 + drv, ok := client.Driver().(*entsql.Driver) + if !ok { + return nil, errors.New("ent driver does not expose *sql.DB") + } + // 返回驱动持有的 sql.DB 实例 + return drv.DB(), nil +} + +// ProvideRedis 为依赖注入提供 Redis 客户端。 +// +// Redis 用于: +// - 分布式锁(如并发控制) +// - 缓存(如用户会话、API 响应缓存) +// - 速率限制 +// - 实时统计数据 +// +// 依赖:config.Config +// 提供:*redis.Client +func ProvideRedis(cfg *config.Config) *redis.Client { + return InitRedis(cfg) +} diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go new file mode 100644 index 00000000..7d2b789f --- /dev/null +++ b/backend/internal/server/api_contract_test.go @@ -0,0 +1,1484 @@ +//go:build unit + +package server_test + +import ( + "bytes" + "context" + "errors" + "io" + "math" + "net/http" + "net/http/httptest" + "sort" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + adminhandler "github.com/Wei-Shaw/sub2api/internal/handler/admin" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestAPIContracts(t *testing.T) { + gin.SetMode(gin.TestMode) + + tests := []struct { + name string + setup func(t *testing.T, deps *contractDeps) + method string + path string + body string + headers map[string]string + wantStatus int + wantJSON string + }{ + { + name: "GET /api/v1/auth/me", + method: http.MethodGet, + path: "/api/v1/auth/me", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "id": 1, + "email": "alice@example.com", + "username": "alice", + "notes": "hello", + "role": "user", + "balance": 12.5, + "concurrency": 5, + "status": "active", + "allowed_groups": null, + "created_at": "2025-01-02T03:04:05Z", + "updated_at": "2025-01-02T03:04:05Z", + "run_mode": "standard" + } + }`, + }, + { + name: "POST /api/v1/keys", + method: http.MethodPost, + path: "/api/v1/keys", + body: `{"name":"Key One","custom_key":"sk_custom_1234567890"}`, + headers: map[string]string{ + "Content-Type": "application/json", + }, + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "id": 100, + "user_id": 1, + "key": "sk_custom_1234567890", + "name": "Key One", + "group_id": null, + "status": "active", + "ip_whitelist": null, + "ip_blacklist": null, + "created_at": "2025-01-02T03:04:05Z", + "updated_at": "2025-01-02T03:04:05Z" + } + }`, + }, + { + name: "GET /api/v1/keys (paginated)", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + deps.apiKeyRepo.MustSeed(&service.APIKey{ + ID: 100, + UserID: 1, + Key: "sk_custom_1234567890", + Name: "Key One", + Status: service.StatusActive, + CreatedAt: deps.now, + UpdatedAt: deps.now, + }) + }, + method: http.MethodGet, + path: "/api/v1/keys?page=1&page_size=10", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "items": [ + { + "id": 100, + "user_id": 1, + "key": "sk_custom_1234567890", + "name": "Key One", + "group_id": null, + "status": "active", + "ip_whitelist": null, + "ip_blacklist": null, + "created_at": "2025-01-02T03:04:05Z", + "updated_at": "2025-01-02T03:04:05Z" + } + ], + "total": 1, + "page": 1, + "page_size": 10, + "pages": 1 + } + }`, + }, + { + name: "GET /api/v1/usage/stats", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + deps.usageRepo.SetUserLogs(1, []service.UsageLog{ + { + ID: 1, + UserID: 1, + APIKeyID: 100, + AccountID: 200, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 1, + CacheReadTokens: 2, + TotalCost: 0.5, + ActualCost: 0.5, + DurationMs: ptr(100), + CreatedAt: deps.now, + }, + { + ID: 2, + UserID: 1, + APIKeyID: 100, + AccountID: 200, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 15, + TotalCost: 0.25, + ActualCost: 0.25, + DurationMs: ptr(300), + CreatedAt: deps.now, + }, + }) + }, + method: http.MethodGet, + path: "/api/v1/usage/stats?start_date=2025-01-01&end_date=2025-01-02", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "total_requests": 2, + "total_input_tokens": 15, + "total_output_tokens": 35, + "total_cache_tokens": 3, + "total_tokens": 53, + "total_cost": 0.75, + "total_actual_cost": 0.75, + "average_duration_ms": 200 + } + }`, + }, + { + name: "GET /api/v1/usage (paginated)", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + deps.usageRepo.SetUserLogs(1, []service.UsageLog{ + { + ID: 1, + UserID: 1, + APIKeyID: 100, + AccountID: 200, + RequestID: "req_123", + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 1, + CacheReadTokens: 2, + TotalCost: 0.5, + ActualCost: 0.5, + RateMultiplier: 1, + BillingType: service.BillingTypeBalance, + Stream: true, + DurationMs: ptr(100), + FirstTokenMs: ptr(50), + CreatedAt: deps.now, + }, + }) + }, + method: http.MethodGet, + path: "/api/v1/usage?page=1&page_size=10", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "items": [ + { + "id": 1, + "user_id": 1, + "api_key_id": 100, + "account_id": 200, + "request_id": "req_123", + "model": "claude-3", + "group_id": null, + "subscription_id": null, + "input_tokens": 10, + "output_tokens": 20, + "cache_creation_tokens": 1, + "cache_read_tokens": 2, + "cache_creation_5m_tokens": 0, + "cache_creation_1h_tokens": 0, + "input_cost": 0, + "output_cost": 0, + "cache_creation_cost": 0, + "cache_read_cost": 0, + "total_cost": 0.5, + "actual_cost": 0.5, + "rate_multiplier": 1, + "account_rate_multiplier": null, + "billing_type": 0, + "stream": true, + "duration_ms": 100, + "first_token_ms": 50, + "image_count": 0, + "image_size": null, + "created_at": "2025-01-02T03:04:05Z", + "user_agent": null + } + ], + "total": 1, + "page": 1, + "page_size": 10, + "pages": 1 + } + }`, + }, + { + name: "GET /api/v1/admin/settings", + setup: func(t *testing.T, deps *contractDeps) { + t.Helper() + deps.settingRepo.SetAll(map[string]string{ + service.SettingKeyRegistrationEnabled: "true", + service.SettingKeyEmailVerifyEnabled: "false", + + service.SettingKeySMTPHost: "smtp.example.com", + service.SettingKeySMTPPort: "587", + service.SettingKeySMTPUsername: "user", + service.SettingKeySMTPPassword: "secret", + service.SettingKeySMTPFrom: "no-reply@example.com", + service.SettingKeySMTPFromName: "Sub2API", + service.SettingKeySMTPUseTLS: "true", + + service.SettingKeyTurnstileEnabled: "true", + service.SettingKeyTurnstileSiteKey: "site-key", + service.SettingKeyTurnstileSecretKey: "secret-key", + + service.SettingKeySiteName: "Sub2API", + service.SettingKeySiteLogo: "", + service.SettingKeySiteSubtitle: "Subtitle", + service.SettingKeyAPIBaseURL: "https://api.example.com", + service.SettingKeyContactInfo: "support", + service.SettingKeyDocURL: "https://docs.example.com", + + service.SettingKeyDefaultConcurrency: "5", + service.SettingKeyDefaultBalance: "1.25", + + service.SettingKeyOpsMonitoringEnabled: "false", + service.SettingKeyOpsRealtimeMonitoringEnabled: "true", + service.SettingKeyOpsQueryModeDefault: "auto", + service.SettingKeyOpsMetricsIntervalSeconds: "60", + }) + }, + method: http.MethodGet, + path: "/api/v1/admin/settings", + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "registration_enabled": true, + "email_verify_enabled": false, + "smtp_host": "smtp.example.com", + "smtp_port": 587, + "smtp_username": "user", + "smtp_password_configured": true, + "smtp_from_email": "no-reply@example.com", + "smtp_from_name": "Sub2API", + "smtp_use_tls": true, + "turnstile_enabled": true, + "turnstile_site_key": "site-key", + "turnstile_secret_key_configured": true, + "linuxdo_connect_enabled": false, + "linuxdo_connect_client_id": "", + "linuxdo_connect_client_secret_configured": false, + "linuxdo_connect_redirect_url": "", + "ops_monitoring_enabled": false, + "ops_realtime_monitoring_enabled": true, + "ops_query_mode_default": "auto", + "ops_metrics_interval_seconds": 60, + "site_name": "Sub2API", + "site_logo": "", + "site_subtitle": "Subtitle", + "api_base_url": "https://api.example.com", + "contact_info": "support", + "doc_url": "https://docs.example.com", + "default_concurrency": 5, + "default_balance": 1.25, + "enable_model_fallback": false, + "fallback_model_anthropic": "claude-3-5-sonnet-20241022", + "fallback_model_antigravity": "gemini-2.5-pro", + "fallback_model_gemini": "gemini-2.5-pro", + "fallback_model_openai": "gpt-4o", + "enable_identity_patch": true, + "identity_patch_prompt": "", + "home_content": "" + } + }`, + }, + { + name: "POST /api/v1/admin/accounts/bulk-update", + method: http.MethodPost, + path: "/api/v1/admin/accounts/bulk-update", + body: `{"account_ids":[101,102],"schedulable":false}`, + headers: map[string]string{ + "Content-Type": "application/json", + }, + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "success": 2, + "failed": 0, + "success_ids": [101, 102], + "failed_ids": [], + "results": [ + {"account_id": 101, "success": true}, + {"account_id": 102, "success": true} + ] + } + }`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + deps := newContractDeps(t) + if tt.setup != nil { + tt.setup(t, deps) + } + + status, body := doRequest(t, deps.router, tt.method, tt.path, tt.body, tt.headers) + require.Equal(t, tt.wantStatus, status) + require.JSONEq(t, tt.wantJSON, body) + }) + } +} + +type contractDeps struct { + now time.Time + router http.Handler + apiKeyRepo *stubApiKeyRepo + usageRepo *stubUsageLogRepo + settingRepo *stubSettingRepo +} + +func newContractDeps(t *testing.T) *contractDeps { + t.Helper() + + now := time.Date(2025, 1, 2, 3, 4, 5, 0, time.UTC) + + userRepo := &stubUserRepo{ + users: map[int64]*service.User{ + 1: { + ID: 1, + Email: "alice@example.com", + Username: "alice", + Notes: "hello", + Role: service.RoleUser, + Balance: 12.5, + Concurrency: 5, + Status: service.StatusActive, + AllowedGroups: nil, + CreatedAt: now, + UpdatedAt: now, + }, + }, + } + + apiKeyRepo := newStubApiKeyRepo(now) + apiKeyCache := stubApiKeyCache{} + groupRepo := stubGroupRepo{} + userSubRepo := stubUserSubscriptionRepo{} + accountRepo := stubAccountRepo{} + proxyRepo := stubProxyRepo{} + redeemRepo := stubRedeemCodeRepo{} + + cfg := &config.Config{ + Default: config.DefaultConfig{ + APIKeyPrefix: "sk-", + }, + RunMode: config.RunModeStandard, + } + + userService := service.NewUserService(userRepo, nil) + apiKeyService := service.NewAPIKeyService(apiKeyRepo, userRepo, groupRepo, userSubRepo, apiKeyCache, cfg) + + usageRepo := newStubUsageLogRepo() + usageService := service.NewUsageService(usageRepo, userRepo, nil, nil) + + settingRepo := newStubSettingRepo() + settingService := service.NewSettingService(settingRepo, cfg) + + adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil, nil) + authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil) + apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) + usageHandler := handler.NewUsageHandler(usageService, apiKeyService) + adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil, nil) + adminAccountHandler := adminhandler.NewAccountHandler(adminService, nil, nil, nil, nil, nil, nil, nil, nil, nil) + + jwtAuth := func(c *gin.Context) { + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ + UserID: 1, + Concurrency: 5, + }) + c.Set(string(middleware.ContextKeyUserRole), service.RoleUser) + c.Next() + } + adminAuth := func(c *gin.Context) { + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ + UserID: 1, + Concurrency: 5, + }) + c.Set(string(middleware.ContextKeyUserRole), service.RoleAdmin) + c.Next() + } + + r := gin.New() + + v1 := r.Group("/api/v1") + + v1Auth := v1.Group("") + v1Auth.Use(jwtAuth) + v1Auth.GET("/auth/me", authHandler.GetCurrentUser) + + v1Keys := v1.Group("") + v1Keys.Use(jwtAuth) + v1Keys.GET("/keys", apiKeyHandler.List) + v1Keys.POST("/keys", apiKeyHandler.Create) + + v1Usage := v1.Group("") + v1Usage.Use(jwtAuth) + v1Usage.GET("/usage", usageHandler.List) + v1Usage.GET("/usage/stats", usageHandler.Stats) + + v1Admin := v1.Group("/admin") + v1Admin.Use(adminAuth) + v1Admin.GET("/settings", adminSettingHandler.GetSettings) + v1Admin.POST("/accounts/bulk-update", adminAccountHandler.BulkUpdate) + + return &contractDeps{ + now: now, + router: r, + apiKeyRepo: apiKeyRepo, + usageRepo: usageRepo, + settingRepo: settingRepo, + } +} + +func doRequest(t *testing.T, router http.Handler, method, path, body string, headers map[string]string) (int, string) { + t.Helper() + + req := httptest.NewRequest(method, path, bytes.NewBufferString(body)) + for k, v := range headers { + req.Header.Set(k, v) + } + + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + + respBody, err := io.ReadAll(w.Result().Body) + require.NoError(t, err) + + return w.Result().StatusCode, string(respBody) +} + +func ptr[T any](v T) *T { return &v } + +type stubUserRepo struct { + users map[int64]*service.User +} + +func (r *stubUserRepo) Create(ctx context.Context, user *service.User) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) GetByID(ctx context.Context, id int64) (*service.User, error) { + user, ok := r.users[id] + if !ok { + return nil, service.ErrUserNotFound + } + clone := *user + return &clone, nil +} + +func (r *stubUserRepo) GetByEmail(ctx context.Context, email string) (*service.User, error) { + for _, user := range r.users { + if user.Email == email { + clone := *user + return &clone, nil + } + } + return nil, service.ErrUserNotFound +} + +func (r *stubUserRepo) GetFirstAdmin(ctx context.Context) (*service.User, error) { + for _, user := range r.users { + if user.Role == service.RoleAdmin && user.Status == service.StatusActive { + clone := *user + return &clone, nil + } + } + return nil, service.ErrUserNotFound +} + +func (r *stubUserRepo) Update(ctx context.Context, user *service.User) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.User, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUserRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters service.UserListFilters) ([]service.User, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUserRepo) UpdateBalance(ctx context.Context, id int64, amount float64) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) DeductBalance(ctx context.Context, id int64, amount float64) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) UpdateConcurrency(ctx context.Context, id int64, amount int) error { + return errors.New("not implemented") +} + +func (r *stubUserRepo) ExistsByEmail(ctx context.Context, email string) (bool, error) { + return false, errors.New("not implemented") +} + +func (r *stubUserRepo) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +type stubApiKeyCache struct{} + +func (stubApiKeyCache) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +func (stubApiKeyCache) IncrementCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (stubApiKeyCache) DeleteCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (stubApiKeyCache) IncrementDailyUsage(ctx context.Context, apiKey string) error { + return nil +} + +func (stubApiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { + return nil +} + +func (stubApiKeyCache) GetAuthCache(ctx context.Context, key string) (*service.APIKeyAuthCacheEntry, error) { + return nil, nil +} + +func (stubApiKeyCache) SetAuthCache(ctx context.Context, key string, entry *service.APIKeyAuthCacheEntry, ttl time.Duration) error { + return nil +} + +func (stubApiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { + return nil +} + +type stubGroupRepo struct{} + +func (stubGroupRepo) Create(ctx context.Context, group *service.Group) error { + return errors.New("not implemented") +} + +func (stubGroupRepo) GetByID(ctx context.Context, id int64) (*service.Group, error) { + return nil, service.ErrGroupNotFound +} + +func (stubGroupRepo) GetByIDLite(ctx context.Context, id int64) (*service.Group, error) { + return nil, service.ErrGroupNotFound +} + +func (stubGroupRepo) Update(ctx context.Context, group *service.Group) error { + return errors.New("not implemented") +} + +func (stubGroupRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (stubGroupRepo) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + return nil, errors.New("not implemented") +} + +func (stubGroupRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.Group, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubGroupRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]service.Group, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubGroupRepo) ListActive(ctx context.Context) ([]service.Group, error) { + return nil, errors.New("not implemented") +} + +func (stubGroupRepo) ListActiveByPlatform(ctx context.Context, platform string) ([]service.Group, error) { + return nil, errors.New("not implemented") +} + +func (stubGroupRepo) ExistsByName(ctx context.Context, name string) (bool, error) { + return false, errors.New("not implemented") +} + +func (stubGroupRepo) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +type stubAccountRepo struct { + bulkUpdateIDs []int64 +} + +func (s *stubAccountRepo) Create(ctx context.Context, account *service.Account) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) GetByID(ctx context.Context, id int64) (*service.Account, error) { + return nil, service.ErrAccountNotFound +} + +func (s *stubAccountRepo) GetByIDs(ctx context.Context, ids []int64) ([]*service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ExistsByID(ctx context.Context, id int64) (bool, error) { + return false, errors.New("not implemented") +} + +func (s *stubAccountRepo) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) Update(ctx context.Context, account *service.Account) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.Account, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]service.Account, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListByGroup(ctx context.Context, groupID int64) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListActive(ctx context.Context) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateLastUsed(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetError(ctx context.Context, id int64, errorMsg string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + return 0, errors.New("not implemented") +} + +func (s *stubAccountRepo) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulable(ctx context.Context) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearTempUnschedulable(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearRateLimit(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) BulkUpdate(ctx context.Context, ids []int64, updates service.AccountBulkUpdate) (int64, error) { + s.bulkUpdateIDs = append([]int64{}, ids...) + return int64(len(ids)), nil +} + +type stubProxyRepo struct{} + +func (stubProxyRepo) Create(ctx context.Context, proxy *service.Proxy) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) GetByID(ctx context.Context, id int64) (*service.Proxy, error) { + return nil, service.ErrProxyNotFound +} + +func (stubProxyRepo) Update(ctx context.Context, proxy *service.Proxy) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.Proxy, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.Proxy, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListWithFiltersAndAccountCount(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.ProxyWithAccountCount, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListActive(ctx context.Context) ([]service.Proxy, error) { + return nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListActiveWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { + return nil, errors.New("not implemented") +} + +func (stubProxyRepo) ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) { + return false, errors.New("not implemented") +} + +func (stubProxyRepo) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (stubProxyRepo) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + return nil, errors.New("not implemented") +} + +type stubRedeemCodeRepo struct{} + +func (stubRedeemCodeRepo) Create(ctx context.Context, code *service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) CreateBatch(ctx context.Context, codes []service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) GetByID(ctx context.Context, id int64) (*service.RedeemCode, error) { + return nil, service.ErrRedeemCodeNotFound +} + +func (stubRedeemCodeRepo) GetByCode(ctx context.Context, code string) (*service.RedeemCode, error) { + return nil, service.ErrRedeemCodeNotFound +} + +func (stubRedeemCodeRepo) Update(ctx context.Context, code *service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) Use(ctx context.Context, id, userID int64) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.RedeemCode, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubRedeemCodeRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]service.RedeemCode, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubRedeemCodeRepo) ListByUser(ctx context.Context, userID int64, limit int) ([]service.RedeemCode, error) { + return nil, errors.New("not implemented") +} + +type stubUserSubscriptionRepo struct{} + +func (stubUserSubscriptionRepo) Create(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) GetByID(ctx context.Context, id int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) Update(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { + return false, errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) UpdateStatus(ctx context.Context, subscriptionID int64, status string) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ActivateWindows(ctx context.Context, id int64, start time.Time) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ResetDailyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ResetWeeklyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) ResetMonthlyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { + return errors.New("not implemented") +} +func (stubUserSubscriptionRepo) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { + return 0, errors.New("not implemented") +} + +type stubApiKeyRepo struct { + now time.Time + + nextID int64 + byID map[int64]*service.APIKey + byKey map[string]*service.APIKey +} + +func newStubApiKeyRepo(now time.Time) *stubApiKeyRepo { + return &stubApiKeyRepo{ + now: now, + nextID: 100, + byID: make(map[int64]*service.APIKey), + byKey: make(map[string]*service.APIKey), + } +} + +func (r *stubApiKeyRepo) MustSeed(key *service.APIKey) { + if key == nil { + return + } + clone := *key + r.byID[clone.ID] = &clone + r.byKey[clone.Key] = &clone +} + +func (r *stubApiKeyRepo) Create(ctx context.Context, key *service.APIKey) error { + if key == nil { + return errors.New("nil key") + } + if key.ID == 0 { + key.ID = r.nextID + r.nextID++ + } + if key.CreatedAt.IsZero() { + key.CreatedAt = r.now + } + if key.UpdatedAt.IsZero() { + key.UpdatedAt = r.now + } + clone := *key + r.byID[clone.ID] = &clone + r.byKey[clone.Key] = &clone + return nil +} + +func (r *stubApiKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey, error) { + key, ok := r.byID[id] + if !ok { + return nil, service.ErrAPIKeyNotFound + } + clone := *key + return &clone, nil +} + +func (r *stubApiKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + key, ok := r.byID[id] + if !ok { + return "", 0, service.ErrAPIKeyNotFound + } + return key.Key, key.UserID, nil +} + +func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { + found, ok := r.byKey[key] + if !ok { + return nil, service.ErrAPIKeyNotFound + } + clone := *found + return &clone, nil +} + +func (r *stubApiKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return r.GetByKey(ctx, key) +} + +func (r *stubApiKeyRepo) Update(ctx context.Context, key *service.APIKey) error { + if key == nil { + return errors.New("nil key") + } + if _, ok := r.byID[key.ID]; !ok { + return service.ErrAPIKeyNotFound + } + if key.UpdatedAt.IsZero() { + key.UpdatedAt = r.now + } + clone := *key + r.byID[clone.ID] = &clone + r.byKey[clone.Key] = &clone + return nil +} + +func (r *stubApiKeyRepo) Delete(ctx context.Context, id int64) error { + key, ok := r.byID[id] + if !ok { + return service.ErrAPIKeyNotFound + } + delete(r.byID, id) + delete(r.byKey, key.Key) + return nil +} + +func (r *stubApiKeyRepo) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + ids := make([]int64, 0, len(r.byID)) + for id := range r.byID { + if r.byID[id].UserID == userID { + ids = append(ids, id) + } + } + sort.Slice(ids, func(i, j int) bool { return ids[i] > ids[j] }) + + start := params.Offset() + if start > len(ids) { + start = len(ids) + } + end := start + params.Limit() + if end > len(ids) { + end = len(ids) + } + + out := make([]service.APIKey, 0, end-start) + for _, id := range ids[start:end] { + clone := *r.byID[id] + out = append(out, clone) + } + + total := int64(len(ids)) + pageSize := params.Limit() + pages := int(math.Ceil(float64(total) / float64(pageSize))) + if pages < 1 { + pages = 1 + } + return out, &pagination.PaginationResult{ + Total: total, + Page: params.Page, + PageSize: pageSize, + Pages: pages, + }, nil +} + +func (r *stubApiKeyRepo) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + if len(apiKeyIDs) == 0 { + return []int64{}, nil + } + seen := make(map[int64]struct{}, len(apiKeyIDs)) + out := make([]int64, 0, len(apiKeyIDs)) + for _, id := range apiKeyIDs { + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + key, ok := r.byID[id] + if ok && key.UserID == userID { + out = append(out, id) + } + } + return out, nil +} + +func (r *stubApiKeyRepo) CountByUserID(ctx context.Context, userID int64) (int64, error) { + var count int64 + for _, key := range r.byID { + if key.UserID == userID { + count++ + } + } + return count, nil +} + +func (r *stubApiKeyRepo) ExistsByKey(ctx context.Context, key string) (bool, error) { + _, ok := r.byKey[key] + return ok, nil +} + +func (r *stubApiKeyRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.APIKey, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +type stubUsageLogRepo struct { + userLogs map[int64][]service.UsageLog +} + +func newStubUsageLogRepo() *stubUsageLogRepo { + return &stubUsageLogRepo{userLogs: make(map[int64][]service.UsageLog)} +} + +func (r *stubUsageLogRepo) SetUserLogs(userID int64, logs []service.UsageLog) { + r.userLogs[userID] = logs +} + +func (r *stubUsageLogRepo) Create(ctx context.Context, log *service.UsageLog) (bool, error) { + return false, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetByID(ctx context.Context, id int64) (*service.UsageLog, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListByUser(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + logs := r.userLogs[userID] + total := int64(len(logs)) + out := paginateLogs(logs, params) + return out, paginationResult(total, params), nil +} + +func (r *stubUsageLogRepo) ListByAPIKey(ctx context.Context, apiKeyID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListByUserAndTimeRange(ctx context.Context, userID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + logs := r.userLogs[userID] + return logs, paginationResult(int64(len(logs)), pagination.PaginationParams{Page: 1, PageSize: 100}), nil +} + +func (r *stubUsageLogRepo) ListByAPIKeyAndTimeRange(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListByAccountAndTimeRange(ctx context.Context, accountID int64, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListByModelAndTimeRange(ctx context.Context, modelName string, startTime, endTime time.Time) ([]service.UsageLog, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetAccountWindowStats(ctx context.Context, accountID int64, startTime time.Time) (*usagestats.AccountStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetAccountTodayStats(ctx context.Context, accountID int64) (*usagestats.AccountStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUserStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + logs := r.userLogs[userID] + if len(logs) == 0 { + return &usagestats.UsageStats{}, nil + } + + var totalRequests int64 + var totalInputTokens int64 + var totalOutputTokens int64 + var totalCacheTokens int64 + var totalCost float64 + var totalActualCost float64 + var totalDuration int64 + var durationCount int64 + + for _, log := range logs { + totalRequests++ + totalInputTokens += int64(log.InputTokens) + totalOutputTokens += int64(log.OutputTokens) + totalCacheTokens += int64(log.CacheCreationTokens + log.CacheReadTokens) + totalCost += log.TotalCost + totalActualCost += log.ActualCost + if log.DurationMs != nil { + totalDuration += int64(*log.DurationMs) + durationCount++ + } + } + + var avgDuration float64 + if durationCount > 0 { + avgDuration = float64(totalDuration) / float64(durationCount) + } + + return &usagestats.UsageStats{ + TotalRequests: totalRequests, + TotalInputTokens: totalInputTokens, + TotalOutputTokens: totalOutputTokens, + TotalCacheTokens: totalCacheTokens, + TotalTokens: totalInputTokens + totalOutputTokens + totalCacheTokens, + TotalCost: totalCost, + TotalActualCost: totalActualCost, + AverageDurationMs: avgDuration, + }, nil +} + +func (r *stubUsageLogRepo) GetAPIKeyStatsAggregated(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetAccountStatsAggregated(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetModelStatsAggregated(ctx context.Context, modelName string, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetDailyStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) ([]map[string]any, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetBatchAPIKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*usagestats.BatchAPIKeyUsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUserDashboardStats(ctx context.Context, userID int64) (*usagestats.UserDashboardStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUserUsageTrendByUserID(ctx context.Context, userID int64, startTime, endTime time.Time, granularity string) ([]usagestats.TrendDataPoint, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetUserModelStats(ctx context.Context, userID int64, startTime, endTime time.Time) ([]usagestats.ModelStat, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) { + logs := r.userLogs[filters.UserID] + + // Apply filters + var filtered []service.UsageLog + for _, log := range logs { + // Apply APIKeyID filter + if filters.APIKeyID > 0 && log.APIKeyID != filters.APIKeyID { + continue + } + // Apply Model filter + if filters.Model != "" && log.Model != filters.Model { + continue + } + // Apply Stream filter + if filters.Stream != nil && log.Stream != *filters.Stream { + continue + } + // Apply BillingType filter + if filters.BillingType != nil && log.BillingType != *filters.BillingType { + continue + } + // Apply time range filters + if filters.StartTime != nil && log.CreatedAt.Before(*filters.StartTime) { + continue + } + if filters.EndTime != nil && log.CreatedAt.After(*filters.EndTime) { + continue + } + filtered = append(filtered, log) + } + + total := int64(len(filtered)) + out := paginateLogs(filtered, params) + return out, paginationResult(total, params), nil +} + +func (r *stubUsageLogRepo) GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetAccountUsageStats(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.AccountUsageStatsResponse, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUsageLogRepo) GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) { + return nil, errors.New("not implemented") +} + +type stubSettingRepo struct { + all map[string]string +} + +func newStubSettingRepo() *stubSettingRepo { + return &stubSettingRepo{all: make(map[string]string)} +} + +func (r *stubSettingRepo) SetAll(values map[string]string) { + r.all = make(map[string]string, len(values)) + for k, v := range values { + r.all[k] = v + } +} + +func (r *stubSettingRepo) Get(ctx context.Context, key string) (*service.Setting, error) { + value, ok := r.all[key] + if !ok { + return nil, service.ErrSettingNotFound + } + return &service.Setting{Key: key, Value: value}, nil +} + +func (r *stubSettingRepo) GetValue(ctx context.Context, key string) (string, error) { + value, ok := r.all[key] + if !ok { + return "", service.ErrSettingNotFound + } + return value, nil +} + +func (r *stubSettingRepo) Set(ctx context.Context, key, value string) error { + r.all[key] = value + return nil +} + +func (r *stubSettingRepo) GetMultiple(ctx context.Context, keys []string) (map[string]string, error) { + out := make(map[string]string, len(keys)) + for _, key := range keys { + out[key] = r.all[key] + } + return out, nil +} + +func (r *stubSettingRepo) SetMultiple(ctx context.Context, settings map[string]string) error { + for k, v := range settings { + r.all[k] = v + } + return nil +} + +func (r *stubSettingRepo) GetAll(ctx context.Context) (map[string]string, error) { + out := make(map[string]string, len(r.all)) + for k, v := range r.all { + out[k] = v + } + return out, nil +} + +func (r *stubSettingRepo) Delete(ctx context.Context, key string) error { + delete(r.all, key) + return nil +} + +func paginateLogs(logs []service.UsageLog, params pagination.PaginationParams) []service.UsageLog { + start := params.Offset() + if start > len(logs) { + start = len(logs) + } + end := start + params.Limit() + if end > len(logs) { + end = len(logs) + } + out := make([]service.UsageLog, 0, end-start) + out = append(out, logs[start:end]...) + return out +} + +func paginationResult(total int64, params pagination.PaginationParams) *pagination.PaginationResult { + pageSize := params.Limit() + pages := int(math.Ceil(float64(total) / float64(pageSize))) + if pages < 1 { + pages = 1 + } + return &pagination.PaginationResult{ + Total: total, + Page: params.Page, + PageSize: pageSize, + Pages: pages, + } +} + +// Ensure compile-time interface compliance. +var ( + _ service.UserRepository = (*stubUserRepo)(nil) + _ service.APIKeyRepository = (*stubApiKeyRepo)(nil) + _ service.APIKeyCache = (*stubApiKeyCache)(nil) + _ service.GroupRepository = (*stubGroupRepo)(nil) + _ service.UserSubscriptionRepository = (*stubUserSubscriptionRepo)(nil) + _ service.UsageLogRepository = (*stubUsageLogRepo)(nil) + _ service.SettingRepository = (*stubSettingRepo)(nil) +) diff --git a/backend/internal/server/http.go b/backend/internal/server/http.go new file mode 100644 index 00000000..52d5c926 --- /dev/null +++ b/backend/internal/server/http.go @@ -0,0 +1,69 @@ +// Package server provides HTTP server initialization and configuration. +package server + +import ( + "log" + "net/http" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/google/wire" + "github.com/redis/go-redis/v9" +) + +// ProviderSet 提供服务器层的依赖 +var ProviderSet = wire.NewSet( + ProvideRouter, + ProvideHTTPServer, +) + +// ProvideRouter 提供路由器 +func ProvideRouter( + cfg *config.Config, + handlers *handler.Handlers, + jwtAuth middleware2.JWTAuthMiddleware, + adminAuth middleware2.AdminAuthMiddleware, + apiKeyAuth middleware2.APIKeyAuthMiddleware, + apiKeyService *service.APIKeyService, + subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + settingService *service.SettingService, + redisClient *redis.Client, +) *gin.Engine { + if cfg.Server.Mode == "release" { + gin.SetMode(gin.ReleaseMode) + } + + r := gin.New() + r.Use(middleware2.Recovery()) + if len(cfg.Server.TrustedProxies) > 0 { + if err := r.SetTrustedProxies(cfg.Server.TrustedProxies); err != nil { + log.Printf("Failed to set trusted proxies: %v", err) + } + } else { + if err := r.SetTrustedProxies(nil); err != nil { + log.Printf("Failed to disable trusted proxies: %v", err) + } + } + + return SetupRouter(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, opsService, settingService, cfg, redisClient) +} + +// ProvideHTTPServer 提供 HTTP 服务器 +func ProvideHTTPServer(cfg *config.Config, router *gin.Engine) *http.Server { + return &http.Server{ + Addr: cfg.Server.Address(), + Handler: router, + // ReadHeaderTimeout: 读取请求头的超时时间,防止慢速请求头攻击 + ReadHeaderTimeout: time.Duration(cfg.Server.ReadHeaderTimeout) * time.Second, + // IdleTimeout: 空闲连接超时时间,释放不活跃的连接资源 + IdleTimeout: time.Duration(cfg.Server.IdleTimeout) * time.Second, + // 注意:不设置 WriteTimeout,因为流式响应可能持续十几分钟 + // 不设置 ReadTimeout,因为大请求体可能需要较长时间读取 + } +} diff --git a/backend/internal/server/middleware/admin_auth.go b/backend/internal/server/middleware/admin_auth.go new file mode 100644 index 00000000..8f30107c --- /dev/null +++ b/backend/internal/server/middleware/admin_auth.go @@ -0,0 +1,193 @@ +// Package middleware provides HTTP middleware for authentication, authorization, and request processing. +package middleware + +import ( + "crypto/subtle" + "errors" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// NewAdminAuthMiddleware 创建管理员认证中间件 +func NewAdminAuthMiddleware( + authService *service.AuthService, + userService *service.UserService, + settingService *service.SettingService, +) AdminAuthMiddleware { + return AdminAuthMiddleware(adminAuth(authService, userService, settingService)) +} + +// adminAuth 管理员认证中间件实现 +// 支持两种认证方式(通过不同的 header 区分): +// 1. Admin API Key: x-api-key: +// 2. JWT Token: Authorization: Bearer (需要管理员角色) +func adminAuth( + authService *service.AuthService, + userService *service.UserService, + settingService *service.SettingService, +) gin.HandlerFunc { + return func(c *gin.Context) { + // WebSocket upgrade requests cannot set Authorization headers in browsers. + // For admin WebSocket endpoints (e.g. Ops realtime), allow passing the JWT via + // Sec-WebSocket-Protocol (subprotocol list) using a prefixed token item: + // Sec-WebSocket-Protocol: sub2api-admin, jwt. + if isWebSocketUpgradeRequest(c) { + if token := extractJWTFromWebSocketSubprotocol(c); token != "" { + if !validateJWTForAdmin(c, token, authService, userService) { + return + } + c.Next() + return + } + } + + // 检查 x-api-key header(Admin API Key 认证) + apiKey := c.GetHeader("x-api-key") + if apiKey != "" { + if !validateAdminAPIKey(c, apiKey, settingService, userService) { + return + } + c.Next() + return + } + + // 检查 Authorization header(JWT 认证) + authHeader := c.GetHeader("Authorization") + if authHeader != "" { + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) == 2 && parts[0] == "Bearer" { + if !validateJWTForAdmin(c, parts[1], authService, userService) { + return + } + c.Next() + return + } + } + + // 无有效认证信息 + AbortWithError(c, 401, "UNAUTHORIZED", "Authorization required") + } +} + +func isWebSocketUpgradeRequest(c *gin.Context) bool { + if c == nil || c.Request == nil { + return false + } + // RFC6455 handshake uses: + // Connection: Upgrade + // Upgrade: websocket + upgrade := strings.ToLower(strings.TrimSpace(c.GetHeader("Upgrade"))) + if upgrade != "websocket" { + return false + } + connection := strings.ToLower(c.GetHeader("Connection")) + return strings.Contains(connection, "upgrade") +} + +func extractJWTFromWebSocketSubprotocol(c *gin.Context) string { + if c == nil { + return "" + } + raw := strings.TrimSpace(c.GetHeader("Sec-WebSocket-Protocol")) + if raw == "" { + return "" + } + + // The header is a comma-separated list of tokens. We reserve the prefix "jwt." + // for carrying the admin JWT. + for _, part := range strings.Split(raw, ",") { + p := strings.TrimSpace(part) + if strings.HasPrefix(p, "jwt.") { + token := strings.TrimSpace(strings.TrimPrefix(p, "jwt.")) + if token != "" { + return token + } + } + } + return "" +} + +// validateAdminAPIKey 验证管理员 API Key +func validateAdminAPIKey( + c *gin.Context, + key string, + settingService *service.SettingService, + userService *service.UserService, +) bool { + storedKey, err := settingService.GetAdminAPIKey(c.Request.Context()) + if err != nil { + AbortWithError(c, 500, "INTERNAL_ERROR", "Internal server error") + return false + } + + // 未配置或不匹配,统一返回相同错误(避免信息泄露) + if storedKey == "" || subtle.ConstantTimeCompare([]byte(key), []byte(storedKey)) != 1 { + AbortWithError(c, 401, "INVALID_ADMIN_KEY", "Invalid admin API key") + return false + } + + // 获取真实的管理员用户 + admin, err := userService.GetFirstAdmin(c.Request.Context()) + if err != nil { + AbortWithError(c, 500, "INTERNAL_ERROR", "No admin user found") + return false + } + + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: admin.ID, + Concurrency: admin.Concurrency, + }) + c.Set(string(ContextKeyUserRole), admin.Role) + c.Set("auth_method", "admin_api_key") + return true +} + +// validateJWTForAdmin 验证 JWT 并检查管理员权限 +func validateJWTForAdmin( + c *gin.Context, + token string, + authService *service.AuthService, + userService *service.UserService, +) bool { + // 验证 JWT token + claims, err := authService.ValidateToken(token) + if err != nil { + if errors.Is(err, service.ErrTokenExpired) { + AbortWithError(c, 401, "TOKEN_EXPIRED", "Token has expired") + return false + } + AbortWithError(c, 401, "INVALID_TOKEN", "Invalid token") + return false + } + + // 从数据库获取用户 + user, err := userService.GetByID(c.Request.Context(), claims.UserID) + if err != nil { + AbortWithError(c, 401, "USER_NOT_FOUND", "User not found") + return false + } + + // 检查用户状态 + if !user.IsActive() { + AbortWithError(c, 401, "USER_INACTIVE", "User account is not active") + return false + } + + // 检查管理员权限 + if !user.IsAdmin() { + AbortWithError(c, 403, "FORBIDDEN", "Admin access required") + return false + } + + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: user.ID, + Concurrency: user.Concurrency, + }) + c.Set(string(ContextKeyUserRole), user.Role) + c.Set("auth_method", "jwt") + + return true +} diff --git a/backend/internal/server/middleware/admin_only.go b/backend/internal/server/middleware/admin_only.go new file mode 100644 index 00000000..2cd697a3 --- /dev/null +++ b/backend/internal/server/middleware/admin_only.go @@ -0,0 +1,27 @@ +package middleware + +import ( + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// AdminOnly 管理员权限中间件 +// 必须在JWTAuth中间件之后使用 +func AdminOnly() gin.HandlerFunc { + return func(c *gin.Context) { + role, ok := GetUserRoleFromContext(c) + if !ok { + AbortWithError(c, 401, "UNAUTHORIZED", "User not found in context") + return + } + + // 检查是否为管理员 + if role != service.RoleAdmin { + AbortWithError(c, 403, "FORBIDDEN", "Admin access required") + return + } + + c.Next() + } +} diff --git a/backend/internal/server/middleware/api_key_auth.go b/backend/internal/server/middleware/api_key_auth.go new file mode 100644 index 00000000..dff6ba95 --- /dev/null +++ b/backend/internal/server/middleware/api_key_auth.go @@ -0,0 +1,202 @@ +package middleware + +import ( + "context" + "errors" + "log" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// NewAPIKeyAuthMiddleware 创建 API Key 认证中间件 +func NewAPIKeyAuthMiddleware(apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, cfg *config.Config) APIKeyAuthMiddleware { + return APIKeyAuthMiddleware(apiKeyAuthWithSubscription(apiKeyService, subscriptionService, cfg)) +} + +// apiKeyAuthWithSubscription API Key认证中间件(支持订阅验证) +func apiKeyAuthWithSubscription(apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, cfg *config.Config) gin.HandlerFunc { + return func(c *gin.Context) { + queryKey := strings.TrimSpace(c.Query("key")) + queryApiKey := strings.TrimSpace(c.Query("api_key")) + if queryKey != "" || queryApiKey != "" { + AbortWithError(c, 400, "api_key_in_query_deprecated", "API key in query parameter is deprecated. Please use Authorization header instead.") + return + } + + // 尝试从Authorization header中提取API key (Bearer scheme) + authHeader := c.GetHeader("Authorization") + var apiKeyString string + + if authHeader != "" { + // 验证Bearer scheme + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) == 2 && parts[0] == "Bearer" { + apiKeyString = parts[1] + } + } + + // 如果Authorization header中没有,尝试从x-api-key header中提取 + if apiKeyString == "" { + apiKeyString = c.GetHeader("x-api-key") + } + + // 如果x-api-key header中没有,尝试从x-goog-api-key header中提取(Gemini CLI兼容) + if apiKeyString == "" { + apiKeyString = c.GetHeader("x-goog-api-key") + } + + // 如果所有header都没有API key + if apiKeyString == "" { + AbortWithError(c, 401, "API_KEY_REQUIRED", "API key is required in Authorization header (Bearer scheme), x-api-key header, or x-goog-api-key header") + return + } + + // 从数据库验证API key + apiKey, err := apiKeyService.GetByKey(c.Request.Context(), apiKeyString) + if err != nil { + if errors.Is(err, service.ErrAPIKeyNotFound) { + AbortWithError(c, 401, "INVALID_API_KEY", "Invalid API key") + return + } + AbortWithError(c, 500, "INTERNAL_ERROR", "Failed to validate API key") + return + } + + // 检查API key是否激活 + if !apiKey.IsActive() { + AbortWithError(c, 401, "API_KEY_DISABLED", "API key is disabled") + return + } + + // 检查 IP 限制(白名单/黑名单) + // 注意:错误信息故意模糊,避免暴露具体的 IP 限制机制 + if len(apiKey.IPWhitelist) > 0 || len(apiKey.IPBlacklist) > 0 { + clientIP := ip.GetClientIP(c) + allowed, _ := ip.CheckIPRestriction(clientIP, apiKey.IPWhitelist, apiKey.IPBlacklist) + if !allowed { + AbortWithError(c, 403, "ACCESS_DENIED", "Access denied") + return + } + } + + // 检查关联的用户 + if apiKey.User == nil { + AbortWithError(c, 401, "USER_NOT_FOUND", "User associated with API key not found") + return + } + + // 检查用户状态 + if !apiKey.User.IsActive() { + AbortWithError(c, 401, "USER_INACTIVE", "User account is not active") + return + } + + if cfg.RunMode == config.RunModeSimple { + // 简易模式:跳过余额和订阅检查,但仍需设置必要的上下文 + c.Set(string(ContextKeyAPIKey), apiKey) + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: apiKey.User.ID, + Concurrency: apiKey.User.Concurrency, + }) + c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) + c.Next() + return + } + + // 判断计费方式:订阅模式 vs 余额模式 + isSubscriptionType := apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + + if isSubscriptionType && subscriptionService != nil { + // 订阅模式:验证订阅 + subscription, err := subscriptionService.GetActiveSubscription( + c.Request.Context(), + apiKey.User.ID, + apiKey.Group.ID, + ) + if err != nil { + AbortWithError(c, 403, "SUBSCRIPTION_NOT_FOUND", "No active subscription found for this group") + return + } + + // 验证订阅状态(是否过期、暂停等) + if err := subscriptionService.ValidateSubscription(c.Request.Context(), subscription); err != nil { + AbortWithError(c, 403, "SUBSCRIPTION_INVALID", err.Error()) + return + } + + // 激活滑动窗口(首次使用时) + if err := subscriptionService.CheckAndActivateWindow(c.Request.Context(), subscription); err != nil { + log.Printf("Failed to activate subscription windows: %v", err) + } + + // 检查并重置过期窗口 + if err := subscriptionService.CheckAndResetWindows(c.Request.Context(), subscription); err != nil { + log.Printf("Failed to reset subscription windows: %v", err) + } + + // 预检查用量限制(使用0作为额外费用进行预检查) + if err := subscriptionService.CheckUsageLimits(c.Request.Context(), subscription, apiKey.Group, 0); err != nil { + AbortWithError(c, 429, "USAGE_LIMIT_EXCEEDED", err.Error()) + return + } + + // 将订阅信息存入上下文 + c.Set(string(ContextKeySubscription), subscription) + } else { + // 余额模式:检查用户余额 + if apiKey.User.Balance <= 0 { + AbortWithError(c, 403, "INSUFFICIENT_BALANCE", "Insufficient account balance") + return + } + } + + // 将API key和用户信息存入上下文 + c.Set(string(ContextKeyAPIKey), apiKey) + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: apiKey.User.ID, + Concurrency: apiKey.User.Concurrency, + }) + c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) + + c.Next() + } +} + +// GetAPIKeyFromContext 从上下文中获取API key +func GetAPIKeyFromContext(c *gin.Context) (*service.APIKey, bool) { + value, exists := c.Get(string(ContextKeyAPIKey)) + if !exists { + return nil, false + } + apiKey, ok := value.(*service.APIKey) + return apiKey, ok +} + +// GetSubscriptionFromContext 从上下文中获取订阅信息 +func GetSubscriptionFromContext(c *gin.Context) (*service.UserSubscription, bool) { + value, exists := c.Get(string(ContextKeySubscription)) + if !exists { + return nil, false + } + subscription, ok := value.(*service.UserSubscription) + return subscription, ok +} + +func setGroupContext(c *gin.Context, group *service.Group) { + if !service.IsGroupContextValid(group) { + return + } + if existing, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group); ok && existing != nil && existing.ID == group.ID && service.IsGroupContextValid(existing) { + return + } + ctx := context.WithValue(c.Request.Context(), ctxkey.Group, group) + c.Request = c.Request.WithContext(ctx) +} diff --git a/backend/internal/server/middleware/api_key_auth_google.go b/backend/internal/server/middleware/api_key_auth_google.go new file mode 100644 index 00000000..1a0b0dd5 --- /dev/null +++ b/backend/internal/server/middleware/api_key_auth_google.go @@ -0,0 +1,146 @@ +package middleware + +import ( + "errors" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// APIKeyAuthGoogle is a Google-style error wrapper for API key auth. +func APIKeyAuthGoogle(apiKeyService *service.APIKeyService, cfg *config.Config) gin.HandlerFunc { + return APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, cfg) +} + +// APIKeyAuthWithSubscriptionGoogle behaves like ApiKeyAuthWithSubscription but returns Google-style errors: +// {"error":{"code":401,"message":"...","status":"UNAUTHENTICATED"}} +// +// It is intended for Gemini native endpoints (/v1beta) to match Gemini SDK expectations. +func APIKeyAuthWithSubscriptionGoogle(apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, cfg *config.Config) gin.HandlerFunc { + return func(c *gin.Context) { + if v := strings.TrimSpace(c.Query("api_key")); v != "" { + abortWithGoogleError(c, 400, "Query parameter api_key is deprecated. Use Authorization header or key instead.") + return + } + apiKeyString := extractAPIKeyFromRequest(c) + if apiKeyString == "" { + abortWithGoogleError(c, 401, "API key is required") + return + } + + apiKey, err := apiKeyService.GetByKey(c.Request.Context(), apiKeyString) + if err != nil { + if errors.Is(err, service.ErrAPIKeyNotFound) { + abortWithGoogleError(c, 401, "Invalid API key") + return + } + abortWithGoogleError(c, 500, "Failed to validate API key") + return + } + + if !apiKey.IsActive() { + abortWithGoogleError(c, 401, "API key is disabled") + return + } + if apiKey.User == nil { + abortWithGoogleError(c, 401, "User associated with API key not found") + return + } + if !apiKey.User.IsActive() { + abortWithGoogleError(c, 401, "User account is not active") + return + } + + // 简易模式:跳过余额和订阅检查 + if cfg.RunMode == config.RunModeSimple { + c.Set(string(ContextKeyAPIKey), apiKey) + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: apiKey.User.ID, + Concurrency: apiKey.User.Concurrency, + }) + c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) + c.Next() + return + } + + isSubscriptionType := apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + if isSubscriptionType && subscriptionService != nil { + subscription, err := subscriptionService.GetActiveSubscription( + c.Request.Context(), + apiKey.User.ID, + apiKey.Group.ID, + ) + if err != nil { + abortWithGoogleError(c, 403, "No active subscription found for this group") + return + } + if err := subscriptionService.ValidateSubscription(c.Request.Context(), subscription); err != nil { + abortWithGoogleError(c, 403, err.Error()) + return + } + _ = subscriptionService.CheckAndActivateWindow(c.Request.Context(), subscription) + _ = subscriptionService.CheckAndResetWindows(c.Request.Context(), subscription) + if err := subscriptionService.CheckUsageLimits(c.Request.Context(), subscription, apiKey.Group, 0); err != nil { + abortWithGoogleError(c, 429, err.Error()) + return + } + c.Set(string(ContextKeySubscription), subscription) + } else { + if apiKey.User.Balance <= 0 { + abortWithGoogleError(c, 403, "Insufficient account balance") + return + } + } + + c.Set(string(ContextKeyAPIKey), apiKey) + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: apiKey.User.ID, + Concurrency: apiKey.User.Concurrency, + }) + c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) + c.Next() + } +} + +func extractAPIKeyFromRequest(c *gin.Context) string { + authHeader := c.GetHeader("Authorization") + if authHeader != "" { + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) == 2 && parts[0] == "Bearer" && strings.TrimSpace(parts[1]) != "" { + return strings.TrimSpace(parts[1]) + } + } + if v := strings.TrimSpace(c.GetHeader("x-api-key")); v != "" { + return v + } + if v := strings.TrimSpace(c.GetHeader("x-goog-api-key")); v != "" { + return v + } + if allowGoogleQueryKey(c.Request.URL.Path) { + if v := strings.TrimSpace(c.Query("key")); v != "" { + return v + } + } + return "" +} + +func allowGoogleQueryKey(path string) bool { + return strings.HasPrefix(path, "/v1beta") || strings.HasPrefix(path, "/antigravity/v1beta") +} + +func abortWithGoogleError(c *gin.Context, status int, message string) { + c.JSON(status, gin.H{ + "error": gin.H{ + "code": status, + "message": message, + "status": googleapi.HTTPStatusToGoogleStatus(status), + }, + }) + c.Abort() +} diff --git a/backend/internal/server/middleware/api_key_auth_google_test.go b/backend/internal/server/middleware/api_key_auth_google_test.go new file mode 100644 index 00000000..6f09469b --- /dev/null +++ b/backend/internal/server/middleware/api_key_auth_google_test.go @@ -0,0 +1,353 @@ +package middleware + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type fakeAPIKeyRepo struct { + getByKey func(ctx context.Context, key string) (*service.APIKey, error) +} + +func (f fakeAPIKeyRepo) Create(ctx context.Context, key *service.APIKey) error { + return errors.New("not implemented") +} +func (f fakeAPIKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey, error) { + return nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + return "", 0, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { + if f.getByKey == nil { + return nil, errors.New("unexpected call") + } + return f.getByKey(ctx, key) +} +func (f fakeAPIKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return f.GetByKey(ctx, key) +} +func (f fakeAPIKeyRepo) Update(ctx context.Context, key *service.APIKey) error { + return errors.New("not implemented") +} +func (f fakeAPIKeyRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + return nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) CountByUserID(ctx context.Context, userID int64) (int64, error) { + return 0, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ExistsByKey(ctx context.Context, key string) (bool, error) { + return false, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.APIKey, error) { + return nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +type googleErrorResponse struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` + } `json:"error"` +} + +func newTestAPIKeyService(repo service.APIKeyRepository) *service.APIKeyService { + return service.NewAPIKeyService( + repo, + nil, // userRepo (unused in GetByKey) + nil, // groupRepo + nil, // userSubRepo + nil, // cache + &config.Config{}, + ) +} + +func TestApiKeyAuthWithSubscriptionGoogle_MissingKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return nil, errors.New("should not be called") + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "API key is required", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_QueryApiKeyRejected(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return nil, errors.New("should not be called") + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test?api_key=legacy", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusBadRequest, resp.Error.Code) + require.Equal(t, "Query parameter api_key is deprecated. Use Authorization header or key instead.", resp.Error.Message) + require.Equal(t, "INVALID_ARGUMENT", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogleSetsGroupContext(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 99, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformGemini, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyService := service.NewAPIKeyService( + fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + }, + nil, + nil, + nil, + nil, + &config.Config{RunMode: config.RunModeSimple}, + ) + + cfg := &config.Config{RunMode: config.RunModeSimple} + r := gin.New() + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, cfg)) + r.GET("/v1beta/test", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("x-api-key", apiKey.Key) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestApiKeyAuthWithSubscriptionGoogle_QueryKeyAllowedOnV1Beta(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return &service.APIKey{ + ID: 1, + Key: key, + Status: service.StatusActive, + User: &service.User{ + ID: 123, + Status: service.StatusActive, + }, + }, nil + }, + }) + cfg := &config.Config{RunMode: config.RunModeSimple} + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, cfg)) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test?key=valid", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) +} + +func TestApiKeyAuthWithSubscriptionGoogle_InvalidKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return nil, service.ErrAPIKeyNotFound + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer invalid") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "Invalid API key", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_RepoError(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return nil, errors.New("db down") + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer any") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusInternalServerError, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusInternalServerError, resp.Error.Code) + require.Equal(t, "Failed to validate API key", resp.Error.Message) + require.Equal(t, "INTERNAL", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_DisabledKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return &service.APIKey{ + ID: 1, + Key: key, + Status: service.StatusDisabled, + User: &service.User{ + ID: 123, + Status: service.StatusActive, + }, + }, nil + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer disabled") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusUnauthorized, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusUnauthorized, resp.Error.Code) + require.Equal(t, "API key is disabled", resp.Error.Message) + require.Equal(t, "UNAUTHENTICATED", resp.Error.Status) +} + +func TestApiKeyAuthWithSubscriptionGoogle_InsufficientBalance(t *testing.T) { + gin.SetMode(gin.TestMode) + + r := gin.New() + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + return &service.APIKey{ + ID: 1, + Key: key, + Status: service.StatusActive, + User: &service.User{ + ID: 123, + Status: service.StatusActive, + Balance: 0, + }, + }, nil + }, + }) + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, &config.Config{})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("Authorization", "Bearer ok") + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusForbidden, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusForbidden, resp.Error.Code) + require.Equal(t, "Insufficient account balance", resp.Error.Message) + require.Equal(t, "PERMISSION_DENIED", resp.Error.Status) +} diff --git a/backend/internal/server/middleware/api_key_auth_test.go b/backend/internal/server/middleware/api_key_auth_test.go new file mode 100644 index 00000000..84398093 --- /dev/null +++ b/backend/internal/server/middleware/api_key_auth_test.go @@ -0,0 +1,427 @@ +//go:build unit + +package middleware + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestSimpleModeBypassesQuotaCheck(t *testing.T) { + gin.SetMode(gin.TestMode) + + limit := 1.0 + group := &service.Group{ + ID: 42, + Name: "sub", + Status: service.StatusActive, + Hydrated: true, + SubscriptionType: service.SubscriptionTypeSubscription, + DailyLimitUSD: &limit, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyRepo := &stubApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + } + + t.Run("simple_mode_bypasses_quota_check", func(t *testing.T) { + cfg := &config.Config{RunMode: config.RunModeSimple} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + subscriptionService := service.NewSubscriptionService(nil, &stubUserSubscriptionRepo{}, nil) + router := newAuthTestRouter(apiKeyService, subscriptionService, cfg) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) + }) + + t.Run("standard_mode_enforces_quota_check", func(t *testing.T) { + cfg := &config.Config{RunMode: config.RunModeStandard} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + + now := time.Now() + sub := &service.UserSubscription{ + ID: 55, + UserID: user.ID, + GroupID: group.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: now.Add(24 * time.Hour), + DailyWindowStart: &now, + DailyUsageUSD: 10, + } + subscriptionRepo := &stubUserSubscriptionRepo{ + getActive: func(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + if userID != sub.UserID || groupID != sub.GroupID { + return nil, service.ErrSubscriptionNotFound + } + clone := *sub + return &clone, nil + }, + updateStatus: func(ctx context.Context, subscriptionID int64, status string) error { return nil }, + activateWindow: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetDaily: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetWeekly: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetMonthly: func(ctx context.Context, id int64, start time.Time) error { return nil }, + } + subscriptionService := service.NewSubscriptionService(nil, subscriptionRepo, nil) + router := newAuthTestRouter(apiKeyService, subscriptionService, cfg) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusTooManyRequests, w.Code) + require.Contains(t, w.Body.String(), "USAGE_LIMIT_EXCEEDED") + }) +} + +func TestAPIKeyAuthSetsGroupContext(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 101, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformAnthropic, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyRepo := &stubApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + } + + cfg := &config.Config{RunMode: config.RunModeSimple} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + router := gin.New() + router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, nil, cfg))) + router.GET("/t", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) +} + +func TestAPIKeyAuthOverwritesInvalidContextGroup(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 101, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformAnthropic, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyRepo := &stubApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + } + + cfg := &config.Config{RunMode: config.RunModeSimple} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + router := gin.New() + router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, nil, cfg))) + + invalidGroup := &service.Group{ + ID: group.ID, + Platform: group.Platform, + Status: group.Status, + } + router.GET("/t", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID || !groupFromCtx.Hydrated || groupFromCtx == invalidGroup { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + req = req.WithContext(context.WithValue(req.Context(), ctxkey.Group, invalidGroup)) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) +} + +func newAuthTestRouter(apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, cfg *config.Config) *gin.Engine { + router := gin.New() + router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, cfg))) + router.GET("/t", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + return router +} + +type stubApiKeyRepo struct { + getByKey func(ctx context.Context, key string) (*service.APIKey, error) +} + +func (r *stubApiKeyRepo) Create(ctx context.Context, key *service.APIKey) error { + return errors.New("not implemented") +} + +func (r *stubApiKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + return "", 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { + if r.getByKey != nil { + return r.getByKey(ctx, key) + } + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return r.GetByKey(ctx, key) +} + +func (r *stubApiKeyRepo) Update(ctx context.Context, key *service.APIKey) error { + return errors.New("not implemented") +} + +func (r *stubApiKeyRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) CountByUserID(ctx context.Context, userID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ExistsByKey(ctx context.Context, key string) (bool, error) { + return false, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.APIKey, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]service.APIKey, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +type stubUserSubscriptionRepo struct { + getActive func(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) + updateStatus func(ctx context.Context, subscriptionID int64, status string) error + activateWindow func(ctx context.Context, id int64, start time.Time) error + resetDaily func(ctx context.Context, id int64, start time.Time) error + resetWeekly func(ctx context.Context, id int64, start time.Time) error + resetMonthly func(ctx context.Context, id int64, start time.Time) error +} + +func (r *stubUserSubscriptionRepo) Create(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) GetByID(ctx context.Context, id int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + if r.getActive != nil { + return r.getActive(ctx, userID, groupID) + } + return nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) Update(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { + return false, errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) UpdateStatus(ctx context.Context, subscriptionID int64, status string) error { + if r.updateStatus != nil { + return r.updateStatus(ctx, subscriptionID, status) + } + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ActivateWindows(ctx context.Context, id int64, start time.Time) error { + if r.activateWindow != nil { + return r.activateWindow(ctx, id, start) + } + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ResetDailyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + if r.resetDaily != nil { + return r.resetDaily(ctx, id, newWindowStart) + } + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ResetWeeklyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + if r.resetWeekly != nil { + return r.resetWeekly(ctx, id, newWindowStart) + } + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) ResetMonthlyUsage(ctx context.Context, id int64, newWindowStart time.Time) error { + if r.resetMonthly != nil { + return r.resetMonthly(ctx, id, newWindowStart) + } + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { + return errors.New("not implemented") +} + +func (r *stubUserSubscriptionRepo) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { + return 0, errors.New("not implemented") +} diff --git a/backend/internal/server/middleware/auth_subject.go b/backend/internal/server/middleware/auth_subject.go new file mode 100644 index 00000000..200c7b77 --- /dev/null +++ b/backend/internal/server/middleware/auth_subject.go @@ -0,0 +1,28 @@ +package middleware + +import "github.com/gin-gonic/gin" + +// AuthSubject is the minimal authenticated identity stored in gin context. +// Decision: {UserID int64, Concurrency int} +type AuthSubject struct { + UserID int64 + Concurrency int +} + +func GetAuthSubjectFromContext(c *gin.Context) (AuthSubject, bool) { + value, exists := c.Get(string(ContextKeyUser)) + if !exists { + return AuthSubject{}, false + } + subject, ok := value.(AuthSubject) + return subject, ok +} + +func GetUserRoleFromContext(c *gin.Context) (string, bool) { + value, exists := c.Get(string(ContextKeyUserRole)) + if !exists { + return "", false + } + role, ok := value.(string) + return role, ok +} diff --git a/backend/internal/server/middleware/client_request_id.go b/backend/internal/server/middleware/client_request_id.go new file mode 100644 index 00000000..d22b6cc5 --- /dev/null +++ b/backend/internal/server/middleware/client_request_id.go @@ -0,0 +1,30 @@ +package middleware + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// ClientRequestID ensures every request has a unique client_request_id in request.Context(). +// +// This is used by the Ops monitoring module for end-to-end request correlation. +func ClientRequestID() gin.HandlerFunc { + return func(c *gin.Context) { + if c.Request == nil { + c.Next() + return + } + + if v := c.Request.Context().Value(ctxkey.ClientRequestID); v != nil { + c.Next() + return + } + + id := uuid.New().String() + c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ClientRequestID, id)) + c.Next() + } +} diff --git a/backend/internal/server/middleware/cors.go b/backend/internal/server/middleware/cors.go new file mode 100644 index 00000000..7d82f183 --- /dev/null +++ b/backend/internal/server/middleware/cors.go @@ -0,0 +1,103 @@ +package middleware + +import ( + "log" + "net/http" + "strings" + "sync" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" +) + +var corsWarningOnce sync.Once + +// CORS 跨域中间件 +func CORS(cfg config.CORSConfig) gin.HandlerFunc { + allowedOrigins := normalizeOrigins(cfg.AllowedOrigins) + allowAll := false + for _, origin := range allowedOrigins { + if origin == "*" { + allowAll = true + break + } + } + wildcardWithSpecific := allowAll && len(allowedOrigins) > 1 + if wildcardWithSpecific { + allowedOrigins = []string{"*"} + } + allowCredentials := cfg.AllowCredentials + + corsWarningOnce.Do(func() { + if len(allowedOrigins) == 0 { + log.Println("Warning: CORS allowed_origins not configured; cross-origin requests will be rejected.") + } + if wildcardWithSpecific { + log.Println("Warning: CORS allowed_origins includes '*'; wildcard will take precedence over explicit origins.") + } + if allowAll && allowCredentials { + log.Println("Warning: CORS allowed_origins set to '*', disabling allow_credentials.") + } + }) + if allowAll && allowCredentials { + allowCredentials = false + } + + allowedSet := make(map[string]struct{}, len(allowedOrigins)) + for _, origin := range allowedOrigins { + if origin == "" || origin == "*" { + continue + } + allowedSet[origin] = struct{}{} + } + + return func(c *gin.Context) { + origin := strings.TrimSpace(c.GetHeader("Origin")) + originAllowed := allowAll + if origin != "" && !allowAll { + _, originAllowed = allowedSet[origin] + } + + if originAllowed { + if allowAll { + c.Writer.Header().Set("Access-Control-Allow-Origin", "*") + } else if origin != "" { + c.Writer.Header().Set("Access-Control-Allow-Origin", origin) + c.Writer.Header().Add("Vary", "Origin") + } + if allowCredentials { + c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") + } + } + + c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, X-API-Key") + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE, PATCH") + + // 处理预检请求 + if c.Request.Method == http.MethodOptions { + if originAllowed { + c.AbortWithStatus(http.StatusNoContent) + } else { + c.AbortWithStatus(http.StatusForbidden) + } + return + } + + c.Next() + } +} + +func normalizeOrigins(values []string) []string { + if len(values) == 0 { + return nil + } + normalized := make([]string, 0, len(values)) + for _, value := range values { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + continue + } + normalized = append(normalized, trimmed) + } + return normalized +} diff --git a/backend/internal/server/middleware/jwt_auth.go b/backend/internal/server/middleware/jwt_auth.go new file mode 100644 index 00000000..9a89aab7 --- /dev/null +++ b/backend/internal/server/middleware/jwt_auth.go @@ -0,0 +1,81 @@ +package middleware + +import ( + "errors" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// NewJWTAuthMiddleware 创建 JWT 认证中间件 +func NewJWTAuthMiddleware(authService *service.AuthService, userService *service.UserService) JWTAuthMiddleware { + return JWTAuthMiddleware(jwtAuth(authService, userService)) +} + +// jwtAuth JWT认证中间件实现 +func jwtAuth(authService *service.AuthService, userService *service.UserService) gin.HandlerFunc { + return func(c *gin.Context) { + // 从Authorization header中提取token + authHeader := c.GetHeader("Authorization") + if authHeader == "" { + AbortWithError(c, 401, "UNAUTHORIZED", "Authorization header is required") + return + } + + // 验证Bearer scheme + parts := strings.SplitN(authHeader, " ", 2) + if len(parts) != 2 || parts[0] != "Bearer" { + AbortWithError(c, 401, "INVALID_AUTH_HEADER", "Authorization header format must be 'Bearer {token}'") + return + } + + tokenString := parts[1] + if tokenString == "" { + AbortWithError(c, 401, "EMPTY_TOKEN", "Token cannot be empty") + return + } + + // 验证token + claims, err := authService.ValidateToken(tokenString) + if err != nil { + if errors.Is(err, service.ErrTokenExpired) { + AbortWithError(c, 401, "TOKEN_EXPIRED", "Token has expired") + return + } + AbortWithError(c, 401, "INVALID_TOKEN", "Invalid token") + return + } + + // 从数据库获取最新的用户信息 + user, err := userService.GetByID(c.Request.Context(), claims.UserID) + if err != nil { + AbortWithError(c, 401, "USER_NOT_FOUND", "User not found") + return + } + + // 检查用户状态 + if !user.IsActive() { + AbortWithError(c, 401, "USER_INACTIVE", "User account is not active") + return + } + + // Security: Validate TokenVersion to ensure token hasn't been invalidated + // This check ensures tokens issued before a password change are rejected + if claims.TokenVersion != user.TokenVersion { + AbortWithError(c, 401, "TOKEN_REVOKED", "Token has been revoked (password changed)") + return + } + + c.Set(string(ContextKeyUser), AuthSubject{ + UserID: user.ID, + Concurrency: user.Concurrency, + }) + c.Set(string(ContextKeyUserRole), user.Role) + + c.Next() + } +} + +// Deprecated: prefer GetAuthSubjectFromContext in auth_subject.go. diff --git a/backend/internal/server/middleware/logger.go b/backend/internal/server/middleware/logger.go new file mode 100644 index 00000000..a9beeb40 --- /dev/null +++ b/backend/internal/server/middleware/logger.go @@ -0,0 +1,52 @@ +package middleware + +import ( + "log" + "time" + + "github.com/gin-gonic/gin" +) + +// Logger 请求日志中间件 +func Logger() gin.HandlerFunc { + return func(c *gin.Context) { + // 开始时间 + startTime := time.Now() + + // 处理请求 + c.Next() + + // 结束时间 + endTime := time.Now() + + // 执行时间 + latency := endTime.Sub(startTime) + + // 请求方法 + method := c.Request.Method + + // 请求路径 + path := c.Request.URL.Path + + // 状态码 + statusCode := c.Writer.Status() + + // 客户端IP + clientIP := c.ClientIP() + + // 日志格式: [时间] 状态码 | 延迟 | IP | 方法 路径 + log.Printf("[GIN] %v | %3d | %13v | %15s | %-7s %s", + endTime.Format("2006/01/02 - 15:04:05"), + statusCode, + latency, + clientIP, + method, + path, + ) + + // 如果有错误,额外记录错误信息 + if len(c.Errors) > 0 { + log.Printf("[GIN] Errors: %v", c.Errors.String()) + } + } +} diff --git a/backend/internal/server/middleware/middleware.go b/backend/internal/server/middleware/middleware.go new file mode 100644 index 00000000..26572019 --- /dev/null +++ b/backend/internal/server/middleware/middleware.go @@ -0,0 +1,73 @@ +package middleware + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/gin-gonic/gin" +) + +// ContextKey 定义上下文键类型 +type ContextKey string + +const ( + // ContextKeyUser 用户上下文键 + ContextKeyUser ContextKey = "user" + // ContextKeyUserRole 当前用户角色(string) + ContextKeyUserRole ContextKey = "user_role" + // ContextKeyAPIKey API密钥上下文键 + ContextKeyAPIKey ContextKey = "api_key" + // ContextKeySubscription 订阅上下文键 + ContextKeySubscription ContextKey = "subscription" + // ContextKeyForcePlatform 强制平台(用于 /antigravity 路由) + ContextKeyForcePlatform ContextKey = "force_platform" +) + +// ForcePlatform 返回设置强制平台的中间件 +// 同时设置 request.Context(供 Service 使用)和 gin.Context(供 Handler 快速检查) +func ForcePlatform(platform string) gin.HandlerFunc { + return func(c *gin.Context) { + // 设置到 request.Context,使用 ctxkey.ForcePlatform 供 Service 层读取 + ctx := context.WithValue(c.Request.Context(), ctxkey.ForcePlatform, platform) + c.Request = c.Request.WithContext(ctx) + // 同时设置到 gin.Context,供 Handler 快速检查 + c.Set(string(ContextKeyForcePlatform), platform) + c.Next() + } +} + +// HasForcePlatform 检查是否有强制平台(用于 Handler 跳过分组检查) +func HasForcePlatform(c *gin.Context) bool { + _, exists := c.Get(string(ContextKeyForcePlatform)) + return exists +} + +// GetForcePlatformFromContext 从 gin.Context 获取强制平台 +func GetForcePlatformFromContext(c *gin.Context) (string, bool) { + value, exists := c.Get(string(ContextKeyForcePlatform)) + if !exists { + return "", false + } + platform, ok := value.(string) + return platform, ok +} + +// ErrorResponse 标准错误响应结构 +type ErrorResponse struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// NewErrorResponse 创建错误响应 +func NewErrorResponse(code, message string) ErrorResponse { + return ErrorResponse{ + Code: code, + Message: message, + } +} + +// AbortWithError 中断请求并返回JSON错误 +func AbortWithError(c *gin.Context, statusCode int, code, message string) { + c.JSON(statusCode, NewErrorResponse(code, message)) + c.Abort() +} diff --git a/backend/internal/server/middleware/recovery.go b/backend/internal/server/middleware/recovery.go new file mode 100644 index 00000000..f05154d3 --- /dev/null +++ b/backend/internal/server/middleware/recovery.go @@ -0,0 +1,64 @@ +package middleware + +import ( + "errors" + "net" + "net/http" + "os" + "strings" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/gin-gonic/gin" +) + +// Recovery converts panics into the project's standard JSON error envelope. +// +// It preserves Gin's broken-pipe handling by not attempting to write a response +// when the client connection is already gone. +func Recovery() gin.HandlerFunc { + return gin.CustomRecoveryWithWriter(gin.DefaultErrorWriter, func(c *gin.Context, recovered any) { + recoveredErr, _ := recovered.(error) + + if isBrokenPipe(recoveredErr) { + if recoveredErr != nil { + _ = c.Error(recoveredErr) + } + c.Abort() + return + } + + if c.Writer.Written() { + c.Abort() + return + } + + response.ErrorWithDetails( + c, + http.StatusInternalServerError, + infraerrors.UnknownMessage, + infraerrors.UnknownReason, + nil, + ) + c.Abort() + }) +} + +func isBrokenPipe(err error) bool { + if err == nil { + return false + } + + var opErr *net.OpError + if !errors.As(err, &opErr) { + return false + } + + var syscallErr *os.SyscallError + if !errors.As(opErr.Err, &syscallErr) { + return false + } + + msg := strings.ToLower(syscallErr.Error()) + return strings.Contains(msg, "broken pipe") || strings.Contains(msg, "connection reset by peer") +} diff --git a/backend/internal/server/middleware/recovery_test.go b/backend/internal/server/middleware/recovery_test.go new file mode 100644 index 00000000..439f44cb --- /dev/null +++ b/backend/internal/server/middleware/recovery_test.go @@ -0,0 +1,81 @@ +//go:build unit + +package middleware + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +func TestRecovery(t *testing.T) { + gin.SetMode(gin.TestMode) + + tests := []struct { + name string + handler gin.HandlerFunc + wantHTTPCode int + wantBody response.Response + }{ + { + name: "panic_returns_standard_json_500", + handler: func(c *gin.Context) { + panic("boom") + }, + wantHTTPCode: http.StatusInternalServerError, + wantBody: response.Response{ + Code: http.StatusInternalServerError, + Message: infraerrors.UnknownMessage, + }, + }, + { + name: "no_panic_passthrough", + handler: func(c *gin.Context) { + response.Success(c, gin.H{"ok": true}) + }, + wantHTTPCode: http.StatusOK, + wantBody: response.Response{ + Code: 0, + Message: "success", + Data: map[string]any{"ok": true}, + }, + }, + { + name: "panic_after_write_does_not_override_body", + handler: func(c *gin.Context) { + response.Success(c, gin.H{"ok": true}) + panic("boom") + }, + wantHTTPCode: http.StatusOK, + wantBody: response.Response{ + Code: 0, + Message: "success", + Data: map[string]any{"ok": true}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + r := gin.New() + r.Use(Recovery()) + r.GET("/t", tt.handler) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + r.ServeHTTP(w, req) + + require.Equal(t, tt.wantHTTPCode, w.Code) + + var got response.Response + require.NoError(t, json.Unmarshal(w.Body.Bytes(), &got)) + require.Equal(t, tt.wantBody, got) + }) + } +} diff --git a/backend/internal/server/middleware/request_body_limit.go b/backend/internal/server/middleware/request_body_limit.go new file mode 100644 index 00000000..fce13eea --- /dev/null +++ b/backend/internal/server/middleware/request_body_limit.go @@ -0,0 +1,15 @@ +package middleware + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// RequestBodyLimit 使用 MaxBytesReader 限制请求体大小。 +func RequestBodyLimit(maxBytes int64) gin.HandlerFunc { + return func(c *gin.Context) { + c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, maxBytes) + c.Next() + } +} diff --git a/backend/internal/server/middleware/security_headers.go b/backend/internal/server/middleware/security_headers.go new file mode 100644 index 00000000..9fca0cd3 --- /dev/null +++ b/backend/internal/server/middleware/security_headers.go @@ -0,0 +1,26 @@ +package middleware + +import ( + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" +) + +// SecurityHeaders sets baseline security headers for all responses. +func SecurityHeaders(cfg config.CSPConfig) gin.HandlerFunc { + policy := strings.TrimSpace(cfg.Policy) + if policy == "" { + policy = config.DefaultCSPPolicy + } + + return func(c *gin.Context) { + c.Header("X-Content-Type-Options", "nosniff") + c.Header("X-Frame-Options", "DENY") + c.Header("Referrer-Policy", "strict-origin-when-cross-origin") + if cfg.Enabled { + c.Header("Content-Security-Policy", policy) + } + c.Next() + } +} diff --git a/backend/internal/server/middleware/wire.go b/backend/internal/server/middleware/wire.go new file mode 100644 index 00000000..dc01b743 --- /dev/null +++ b/backend/internal/server/middleware/wire.go @@ -0,0 +1,22 @@ +package middleware + +import ( + "github.com/gin-gonic/gin" + "github.com/google/wire" +) + +// JWTAuthMiddleware JWT 认证中间件类型 +type JWTAuthMiddleware gin.HandlerFunc + +// AdminAuthMiddleware 管理员认证中间件类型 +type AdminAuthMiddleware gin.HandlerFunc + +// APIKeyAuthMiddleware API Key 认证中间件类型 +type APIKeyAuthMiddleware gin.HandlerFunc + +// ProviderSet 中间件层的依赖注入 +var ProviderSet = wire.NewSet( + NewJWTAuthMiddleware, + NewAdminAuthMiddleware, + NewAPIKeyAuthMiddleware, +) diff --git a/backend/internal/server/router.go b/backend/internal/server/router.go new file mode 100644 index 00000000..cf9015e4 --- /dev/null +++ b/backend/internal/server/router.go @@ -0,0 +1,79 @@ +package server + +import ( + "log" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/server/routes" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/Wei-Shaw/sub2api/internal/web" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" +) + +// SetupRouter 配置路由器中间件和路由 +func SetupRouter( + r *gin.Engine, + handlers *handler.Handlers, + jwtAuth middleware2.JWTAuthMiddleware, + adminAuth middleware2.AdminAuthMiddleware, + apiKeyAuth middleware2.APIKeyAuthMiddleware, + apiKeyService *service.APIKeyService, + subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + settingService *service.SettingService, + cfg *config.Config, + redisClient *redis.Client, +) *gin.Engine { + // 应用中间件 + r.Use(middleware2.Logger()) + r.Use(middleware2.CORS(cfg.CORS)) + r.Use(middleware2.SecurityHeaders(cfg.Security.CSP)) + + // Serve embedded frontend with settings injection if available + if web.HasEmbeddedFrontend() { + frontendServer, err := web.NewFrontendServer(settingService) + if err != nil { + log.Printf("Warning: Failed to create frontend server with settings injection: %v, using legacy mode", err) + r.Use(web.ServeEmbeddedFrontend()) + } else { + // Register cache invalidation callback + settingService.SetOnUpdateCallback(frontendServer.InvalidateCache) + r.Use(frontendServer.Middleware()) + } + } + + // 注册路由 + registerRoutes(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, opsService, cfg, redisClient) + + return r +} + +// registerRoutes 注册所有 HTTP 路由 +func registerRoutes( + r *gin.Engine, + h *handler.Handlers, + jwtAuth middleware2.JWTAuthMiddleware, + adminAuth middleware2.AdminAuthMiddleware, + apiKeyAuth middleware2.APIKeyAuthMiddleware, + apiKeyService *service.APIKeyService, + subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + cfg *config.Config, + redisClient *redis.Client, +) { + // 通用路由(健康检查、状态等) + routes.RegisterCommonRoutes(r) + + // API v1 + v1 := r.Group("/api/v1") + + // 注册各模块路由 + routes.RegisterAuthRoutes(v1, h, jwtAuth, redisClient) + routes.RegisterUserRoutes(v1, h, jwtAuth) + routes.RegisterAdminRoutes(v1, h, adminAuth) + routes.RegisterGatewayRoutes(r, h, apiKeyAuth, apiKeyService, subscriptionService, opsService, cfg) +} diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go new file mode 100644 index 00000000..ff05b32a --- /dev/null +++ b/backend/internal/server/routes/admin.go @@ -0,0 +1,370 @@ +// Package routes provides HTTP route registration and handlers. +package routes + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + + "github.com/gin-gonic/gin" +) + +// RegisterAdminRoutes 注册管理员路由 +func RegisterAdminRoutes( + v1 *gin.RouterGroup, + h *handler.Handlers, + adminAuth middleware.AdminAuthMiddleware, +) { + admin := v1.Group("/admin") + admin.Use(gin.HandlerFunc(adminAuth)) + { + // 仪表盘 + registerDashboardRoutes(admin, h) + + // 用户管理 + registerUserManagementRoutes(admin, h) + + // 分组管理 + registerGroupRoutes(admin, h) + + // 账号管理 + registerAccountRoutes(admin, h) + + // OpenAI OAuth + registerOpenAIOAuthRoutes(admin, h) + + // Gemini OAuth + registerGeminiOAuthRoutes(admin, h) + + // Antigravity OAuth + registerAntigravityOAuthRoutes(admin, h) + + // 代理管理 + registerProxyRoutes(admin, h) + + // 卡密管理 + registerRedeemCodeRoutes(admin, h) + + // 优惠码管理 + registerPromoCodeRoutes(admin, h) + + // 系统设置 + registerSettingsRoutes(admin, h) + + // 运维监控(Ops) + registerOpsRoutes(admin, h) + + // 系统管理 + registerSystemRoutes(admin, h) + + // 订阅管理 + registerSubscriptionRoutes(admin, h) + + // 使用记录管理 + registerUsageRoutes(admin, h) + + // 用户属性管理 + registerUserAttributeRoutes(admin, h) + } +} + +func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + ops := admin.Group("/ops") + { + // Realtime ops signals + ops.GET("/concurrency", h.Admin.Ops.GetConcurrencyStats) + ops.GET("/account-availability", h.Admin.Ops.GetAccountAvailability) + ops.GET("/realtime-traffic", h.Admin.Ops.GetRealtimeTrafficSummary) + + // Alerts (rules + events) + ops.GET("/alert-rules", h.Admin.Ops.ListAlertRules) + ops.POST("/alert-rules", h.Admin.Ops.CreateAlertRule) + ops.PUT("/alert-rules/:id", h.Admin.Ops.UpdateAlertRule) + ops.DELETE("/alert-rules/:id", h.Admin.Ops.DeleteAlertRule) + ops.GET("/alert-events", h.Admin.Ops.ListAlertEvents) + ops.GET("/alert-events/:id", h.Admin.Ops.GetAlertEvent) + ops.PUT("/alert-events/:id/status", h.Admin.Ops.UpdateAlertEventStatus) + ops.POST("/alert-silences", h.Admin.Ops.CreateAlertSilence) + + // Email notification config (DB-backed) + ops.GET("/email-notification/config", h.Admin.Ops.GetEmailNotificationConfig) + ops.PUT("/email-notification/config", h.Admin.Ops.UpdateEmailNotificationConfig) + + // Runtime settings (DB-backed) + runtime := ops.Group("/runtime") + { + runtime.GET("/alert", h.Admin.Ops.GetAlertRuntimeSettings) + runtime.PUT("/alert", h.Admin.Ops.UpdateAlertRuntimeSettings) + } + + // Advanced settings (DB-backed) + ops.GET("/advanced-settings", h.Admin.Ops.GetAdvancedSettings) + ops.PUT("/advanced-settings", h.Admin.Ops.UpdateAdvancedSettings) + + // Settings group (DB-backed) + settings := ops.Group("/settings") + { + settings.GET("/metric-thresholds", h.Admin.Ops.GetMetricThresholds) + settings.PUT("/metric-thresholds", h.Admin.Ops.UpdateMetricThresholds) + } + + // WebSocket realtime (QPS/TPS) + ws := ops.Group("/ws") + { + ws.GET("/qps", h.Admin.Ops.QPSWSHandler) + } + + // Error logs (legacy) + ops.GET("/errors", h.Admin.Ops.GetErrorLogs) + ops.GET("/errors/:id", h.Admin.Ops.GetErrorLogByID) + ops.GET("/errors/:id/retries", h.Admin.Ops.ListRetryAttempts) + ops.POST("/errors/:id/retry", h.Admin.Ops.RetryErrorRequest) + ops.PUT("/errors/:id/resolve", h.Admin.Ops.UpdateErrorResolution) + + // Request errors (client-visible failures) + ops.GET("/request-errors", h.Admin.Ops.ListRequestErrors) + ops.GET("/request-errors/:id", h.Admin.Ops.GetRequestError) + ops.GET("/request-errors/:id/upstream-errors", h.Admin.Ops.ListRequestErrorUpstreamErrors) + ops.POST("/request-errors/:id/retry-client", h.Admin.Ops.RetryRequestErrorClient) + ops.POST("/request-errors/:id/upstream-errors/:idx/retry", h.Admin.Ops.RetryRequestErrorUpstreamEvent) + ops.PUT("/request-errors/:id/resolve", h.Admin.Ops.ResolveRequestError) + + // Upstream errors (independent upstream failures) + ops.GET("/upstream-errors", h.Admin.Ops.ListUpstreamErrors) + ops.GET("/upstream-errors/:id", h.Admin.Ops.GetUpstreamError) + ops.POST("/upstream-errors/:id/retry", h.Admin.Ops.RetryUpstreamError) + ops.PUT("/upstream-errors/:id/resolve", h.Admin.Ops.ResolveUpstreamError) + + // Request drilldown (success + error) + ops.GET("/requests", h.Admin.Ops.ListRequestDetails) + + // Dashboard (vNext - raw path for MVP) + ops.GET("/dashboard/overview", h.Admin.Ops.GetDashboardOverview) + ops.GET("/dashboard/throughput-trend", h.Admin.Ops.GetDashboardThroughputTrend) + ops.GET("/dashboard/latency-histogram", h.Admin.Ops.GetDashboardLatencyHistogram) + ops.GET("/dashboard/error-trend", h.Admin.Ops.GetDashboardErrorTrend) + ops.GET("/dashboard/error-distribution", h.Admin.Ops.GetDashboardErrorDistribution) + } +} + +func registerDashboardRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + dashboard := admin.Group("/dashboard") + { + dashboard.GET("/stats", h.Admin.Dashboard.GetStats) + dashboard.GET("/realtime", h.Admin.Dashboard.GetRealtimeMetrics) + dashboard.GET("/trend", h.Admin.Dashboard.GetUsageTrend) + dashboard.GET("/models", h.Admin.Dashboard.GetModelStats) + dashboard.GET("/api-keys-trend", h.Admin.Dashboard.GetAPIKeyUsageTrend) + dashboard.GET("/users-trend", h.Admin.Dashboard.GetUserUsageTrend) + dashboard.POST("/users-usage", h.Admin.Dashboard.GetBatchUsersUsage) + dashboard.POST("/api-keys-usage", h.Admin.Dashboard.GetBatchAPIKeysUsage) + dashboard.POST("/aggregation/backfill", h.Admin.Dashboard.BackfillAggregation) + } +} + +func registerUserManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + users := admin.Group("/users") + { + users.GET("", h.Admin.User.List) + users.GET("/:id", h.Admin.User.GetByID) + users.POST("", h.Admin.User.Create) + users.PUT("/:id", h.Admin.User.Update) + users.DELETE("/:id", h.Admin.User.Delete) + users.POST("/:id/balance", h.Admin.User.UpdateBalance) + users.GET("/:id/api-keys", h.Admin.User.GetUserAPIKeys) + users.GET("/:id/usage", h.Admin.User.GetUserUsage) + + // User attribute values + users.GET("/:id/attributes", h.Admin.UserAttribute.GetUserAttributes) + users.PUT("/:id/attributes", h.Admin.UserAttribute.UpdateUserAttributes) + } +} + +func registerGroupRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + groups := admin.Group("/groups") + { + groups.GET("", h.Admin.Group.List) + groups.GET("/all", h.Admin.Group.GetAll) + groups.GET("/:id", h.Admin.Group.GetByID) + groups.POST("", h.Admin.Group.Create) + groups.PUT("/:id", h.Admin.Group.Update) + groups.DELETE("/:id", h.Admin.Group.Delete) + groups.GET("/:id/stats", h.Admin.Group.GetStats) + groups.GET("/:id/api-keys", h.Admin.Group.GetGroupAPIKeys) + } +} + +func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + accounts := admin.Group("/accounts") + { + accounts.GET("", h.Admin.Account.List) + accounts.GET("/:id", h.Admin.Account.GetByID) + accounts.POST("", h.Admin.Account.Create) + accounts.POST("/sync/crs", h.Admin.Account.SyncFromCRS) + accounts.PUT("/:id", h.Admin.Account.Update) + accounts.DELETE("/:id", h.Admin.Account.Delete) + accounts.POST("/:id/test", h.Admin.Account.Test) + accounts.POST("/:id/refresh", h.Admin.Account.Refresh) + accounts.POST("/:id/refresh-tier", h.Admin.Account.RefreshTier) + accounts.GET("/:id/stats", h.Admin.Account.GetStats) + accounts.POST("/:id/clear-error", h.Admin.Account.ClearError) + accounts.GET("/:id/usage", h.Admin.Account.GetUsage) + accounts.GET("/:id/today-stats", h.Admin.Account.GetTodayStats) + accounts.POST("/:id/clear-rate-limit", h.Admin.Account.ClearRateLimit) + accounts.GET("/:id/temp-unschedulable", h.Admin.Account.GetTempUnschedulable) + accounts.DELETE("/:id/temp-unschedulable", h.Admin.Account.ClearTempUnschedulable) + accounts.POST("/:id/schedulable", h.Admin.Account.SetSchedulable) + accounts.GET("/:id/models", h.Admin.Account.GetAvailableModels) + accounts.POST("/batch", h.Admin.Account.BatchCreate) + accounts.POST("/batch-update-credentials", h.Admin.Account.BatchUpdateCredentials) + accounts.POST("/batch-refresh-tier", h.Admin.Account.BatchRefreshTier) + accounts.POST("/bulk-update", h.Admin.Account.BulkUpdate) + + // Claude OAuth routes + accounts.POST("/generate-auth-url", h.Admin.OAuth.GenerateAuthURL) + accounts.POST("/generate-setup-token-url", h.Admin.OAuth.GenerateSetupTokenURL) + accounts.POST("/exchange-code", h.Admin.OAuth.ExchangeCode) + accounts.POST("/exchange-setup-token-code", h.Admin.OAuth.ExchangeSetupTokenCode) + accounts.POST("/cookie-auth", h.Admin.OAuth.CookieAuth) + accounts.POST("/setup-token-cookie-auth", h.Admin.OAuth.SetupTokenCookieAuth) + } +} + +func registerOpenAIOAuthRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + openai := admin.Group("/openai") + { + openai.POST("/generate-auth-url", h.Admin.OpenAIOAuth.GenerateAuthURL) + openai.POST("/exchange-code", h.Admin.OpenAIOAuth.ExchangeCode) + openai.POST("/refresh-token", h.Admin.OpenAIOAuth.RefreshToken) + openai.POST("/accounts/:id/refresh", h.Admin.OpenAIOAuth.RefreshAccountToken) + openai.POST("/create-from-oauth", h.Admin.OpenAIOAuth.CreateAccountFromOAuth) + } +} + +func registerGeminiOAuthRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + gemini := admin.Group("/gemini") + { + gemini.POST("/oauth/auth-url", h.Admin.GeminiOAuth.GenerateAuthURL) + gemini.POST("/oauth/exchange-code", h.Admin.GeminiOAuth.ExchangeCode) + gemini.GET("/oauth/capabilities", h.Admin.GeminiOAuth.GetCapabilities) + } +} + +func registerAntigravityOAuthRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + antigravity := admin.Group("/antigravity") + { + antigravity.POST("/oauth/auth-url", h.Admin.AntigravityOAuth.GenerateAuthURL) + antigravity.POST("/oauth/exchange-code", h.Admin.AntigravityOAuth.ExchangeCode) + } +} + +func registerProxyRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + proxies := admin.Group("/proxies") + { + proxies.GET("", h.Admin.Proxy.List) + proxies.GET("/all", h.Admin.Proxy.GetAll) + proxies.GET("/:id", h.Admin.Proxy.GetByID) + proxies.POST("", h.Admin.Proxy.Create) + proxies.PUT("/:id", h.Admin.Proxy.Update) + proxies.DELETE("/:id", h.Admin.Proxy.Delete) + proxies.POST("/:id/test", h.Admin.Proxy.Test) + proxies.GET("/:id/stats", h.Admin.Proxy.GetStats) + proxies.GET("/:id/accounts", h.Admin.Proxy.GetProxyAccounts) + proxies.POST("/batch-delete", h.Admin.Proxy.BatchDelete) + proxies.POST("/batch", h.Admin.Proxy.BatchCreate) + } +} + +func registerRedeemCodeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + codes := admin.Group("/redeem-codes") + { + codes.GET("", h.Admin.Redeem.List) + codes.GET("/stats", h.Admin.Redeem.GetStats) + codes.GET("/export", h.Admin.Redeem.Export) + codes.GET("/:id", h.Admin.Redeem.GetByID) + codes.POST("/generate", h.Admin.Redeem.Generate) + codes.DELETE("/:id", h.Admin.Redeem.Delete) + codes.POST("/batch-delete", h.Admin.Redeem.BatchDelete) + codes.POST("/:id/expire", h.Admin.Redeem.Expire) + } +} + +func registerPromoCodeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + promoCodes := admin.Group("/promo-codes") + { + promoCodes.GET("", h.Admin.Promo.List) + promoCodes.GET("/:id", h.Admin.Promo.GetByID) + promoCodes.POST("", h.Admin.Promo.Create) + promoCodes.PUT("/:id", h.Admin.Promo.Update) + promoCodes.DELETE("/:id", h.Admin.Promo.Delete) + promoCodes.GET("/:id/usages", h.Admin.Promo.GetUsages) + } +} + +func registerSettingsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + adminSettings := admin.Group("/settings") + { + adminSettings.GET("", h.Admin.Setting.GetSettings) + adminSettings.PUT("", h.Admin.Setting.UpdateSettings) + adminSettings.POST("/test-smtp", h.Admin.Setting.TestSMTPConnection) + adminSettings.POST("/send-test-email", h.Admin.Setting.SendTestEmail) + // Admin API Key 管理 + adminSettings.GET("/admin-api-key", h.Admin.Setting.GetAdminAPIKey) + adminSettings.POST("/admin-api-key/regenerate", h.Admin.Setting.RegenerateAdminAPIKey) + adminSettings.DELETE("/admin-api-key", h.Admin.Setting.DeleteAdminAPIKey) + // 流超时处理配置 + adminSettings.GET("/stream-timeout", h.Admin.Setting.GetStreamTimeoutSettings) + adminSettings.PUT("/stream-timeout", h.Admin.Setting.UpdateStreamTimeoutSettings) + } +} + +func registerSystemRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + system := admin.Group("/system") + { + system.GET("/version", h.Admin.System.GetVersion) + system.GET("/check-updates", h.Admin.System.CheckUpdates) + system.POST("/update", h.Admin.System.PerformUpdate) + system.POST("/rollback", h.Admin.System.Rollback) + system.POST("/restart", h.Admin.System.RestartService) + } +} + +func registerSubscriptionRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + subscriptions := admin.Group("/subscriptions") + { + subscriptions.GET("", h.Admin.Subscription.List) + subscriptions.GET("/:id", h.Admin.Subscription.GetByID) + subscriptions.GET("/:id/progress", h.Admin.Subscription.GetProgress) + subscriptions.POST("/assign", h.Admin.Subscription.Assign) + subscriptions.POST("/bulk-assign", h.Admin.Subscription.BulkAssign) + subscriptions.POST("/:id/extend", h.Admin.Subscription.Extend) + subscriptions.DELETE("/:id", h.Admin.Subscription.Revoke) + } + + // 分组下的订阅列表 + admin.GET("/groups/:id/subscriptions", h.Admin.Subscription.ListByGroup) + + // 用户下的订阅列表 + admin.GET("/users/:id/subscriptions", h.Admin.Subscription.ListByUser) +} + +func registerUsageRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + usage := admin.Group("/usage") + { + usage.GET("", h.Admin.Usage.List) + usage.GET("/stats", h.Admin.Usage.Stats) + usage.GET("/search-users", h.Admin.Usage.SearchUsers) + usage.GET("/search-api-keys", h.Admin.Usage.SearchAPIKeys) + } +} + +func registerUserAttributeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + attrs := admin.Group("/user-attributes") + { + attrs.GET("", h.Admin.UserAttribute.ListDefinitions) + attrs.POST("", h.Admin.UserAttribute.CreateDefinition) + attrs.POST("/batch", h.Admin.UserAttribute.GetBatchUserAttributes) + attrs.PUT("/reorder", h.Admin.UserAttribute.ReorderDefinitions) + attrs.PUT("/:id", h.Admin.UserAttribute.UpdateDefinition) + attrs.DELETE("/:id", h.Admin.UserAttribute.DeleteDefinition) + } +} diff --git a/backend/internal/server/routes/auth.go b/backend/internal/server/routes/auth.go new file mode 100644 index 00000000..aa691eba --- /dev/null +++ b/backend/internal/server/routes/auth.go @@ -0,0 +1,50 @@ +package routes + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/middleware" + servermiddleware "github.com/Wei-Shaw/sub2api/internal/server/middleware" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" +) + +// RegisterAuthRoutes 注册认证相关路由 +func RegisterAuthRoutes( + v1 *gin.RouterGroup, + h *handler.Handlers, + jwtAuth servermiddleware.JWTAuthMiddleware, + redisClient *redis.Client, +) { + // 创建速率限制器 + rateLimiter := middleware.NewRateLimiter(redisClient) + + // 公开接口 + auth := v1.Group("/auth") + { + auth.POST("/register", h.Auth.Register) + auth.POST("/login", h.Auth.Login) + auth.POST("/send-verify-code", h.Auth.SendVerifyCode) + // 优惠码验证接口添加速率限制:每分钟最多 10 次(Redis 故障时 fail-close) + auth.POST("/validate-promo-code", rateLimiter.LimitWithOptions("validate-promo", 10, time.Minute, middleware.RateLimitOptions{ + FailureMode: middleware.RateLimitFailClose, + }), h.Auth.ValidatePromoCode) + auth.GET("/oauth/linuxdo/start", h.Auth.LinuxDoOAuthStart) + auth.GET("/oauth/linuxdo/callback", h.Auth.LinuxDoOAuthCallback) + } + + // 公开设置(无需认证) + settings := v1.Group("/settings") + { + settings.GET("/public", h.Setting.GetPublicSettings) + } + + // 需要认证的当前用户信息 + authenticated := v1.Group("") + authenticated.Use(gin.HandlerFunc(jwtAuth)) + { + authenticated.GET("/auth/me", h.Auth.GetCurrentUser) + } +} diff --git a/backend/internal/server/routes/common.go b/backend/internal/server/routes/common.go new file mode 100644 index 00000000..4989358d --- /dev/null +++ b/backend/internal/server/routes/common.go @@ -0,0 +1,32 @@ +package routes + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +// RegisterCommonRoutes 注册通用路由(健康检查、状态等) +func RegisterCommonRoutes(r *gin.Engine) { + // 健康检查 + r.GET("/health", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"status": "ok"}) + }) + + // Claude Code 遥测日志(忽略,直接返回200) + r.POST("/api/event_logging/batch", func(c *gin.Context) { + c.Status(http.StatusOK) + }) + + // Setup status endpoint (always returns needs_setup: false in normal mode) + // This is used by the frontend to detect when the service has restarted after setup + r.GET("/setup/status", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "code": 0, + "data": gin.H{ + "needs_setup": false, + "step": "completed", + }, + }) + }) +} diff --git a/backend/internal/server/routes/gateway.go b/backend/internal/server/routes/gateway.go new file mode 100644 index 00000000..bf019ce3 --- /dev/null +++ b/backend/internal/server/routes/gateway.go @@ -0,0 +1,85 @@ +package routes + +import ( + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// RegisterGatewayRoutes 注册 API 网关路由(Claude/OpenAI/Gemini 兼容) +func RegisterGatewayRoutes( + r *gin.Engine, + h *handler.Handlers, + apiKeyAuth middleware.APIKeyAuthMiddleware, + apiKeyService *service.APIKeyService, + subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + cfg *config.Config, +) { + bodyLimit := middleware.RequestBodyLimit(cfg.Gateway.MaxBodySize) + clientRequestID := middleware.ClientRequestID() + opsErrorLogger := handler.OpsErrorLoggerMiddleware(opsService) + + // API网关(Claude API兼容) + gateway := r.Group("/v1") + gateway.Use(bodyLimit) + gateway.Use(clientRequestID) + gateway.Use(opsErrorLogger) + gateway.Use(gin.HandlerFunc(apiKeyAuth)) + { + gateway.POST("/messages", h.Gateway.Messages) + gateway.POST("/messages/count_tokens", h.Gateway.CountTokens) + gateway.GET("/models", h.Gateway.Models) + gateway.GET("/usage", h.Gateway.Usage) + // OpenAI Responses API + gateway.POST("/responses", h.OpenAIGateway.Responses) + } + + // Gemini 原生 API 兼容层(Gemini SDK/CLI 直连) + gemini := r.Group("/v1beta") + gemini.Use(bodyLimit) + gemini.Use(clientRequestID) + gemini.Use(opsErrorLogger) + gemini.Use(middleware.APIKeyAuthWithSubscriptionGoogle(apiKeyService, subscriptionService, cfg)) + { + gemini.GET("/models", h.Gateway.GeminiV1BetaListModels) + gemini.GET("/models/:model", h.Gateway.GeminiV1BetaGetModel) + // Gin treats ":" as a param marker, but Gemini uses "{model}:{action}" in the same segment. + gemini.POST("/models/*modelAction", h.Gateway.GeminiV1BetaModels) + } + + // OpenAI Responses API(不带v1前缀的别名) + r.POST("/responses", bodyLimit, clientRequestID, opsErrorLogger, gin.HandlerFunc(apiKeyAuth), h.OpenAIGateway.Responses) + + // Antigravity 模型列表 + r.GET("/antigravity/models", gin.HandlerFunc(apiKeyAuth), h.Gateway.AntigravityModels) + + // Antigravity 专用路由(仅使用 antigravity 账户,不混合调度) + antigravityV1 := r.Group("/antigravity/v1") + antigravityV1.Use(bodyLimit) + antigravityV1.Use(clientRequestID) + antigravityV1.Use(opsErrorLogger) + antigravityV1.Use(middleware.ForcePlatform(service.PlatformAntigravity)) + antigravityV1.Use(gin.HandlerFunc(apiKeyAuth)) + { + antigravityV1.POST("/messages", h.Gateway.Messages) + antigravityV1.POST("/messages/count_tokens", h.Gateway.CountTokens) + antigravityV1.GET("/models", h.Gateway.AntigravityModels) + antigravityV1.GET("/usage", h.Gateway.Usage) + } + + antigravityV1Beta := r.Group("/antigravity/v1beta") + antigravityV1Beta.Use(bodyLimit) + antigravityV1Beta.Use(clientRequestID) + antigravityV1Beta.Use(opsErrorLogger) + antigravityV1Beta.Use(middleware.ForcePlatform(service.PlatformAntigravity)) + antigravityV1Beta.Use(middleware.APIKeyAuthWithSubscriptionGoogle(apiKeyService, subscriptionService, cfg)) + { + antigravityV1Beta.GET("/models", h.Gateway.GeminiV1BetaListModels) + antigravityV1Beta.GET("/models/:model", h.Gateway.GeminiV1BetaGetModel) + antigravityV1Beta.POST("/models/*modelAction", h.Gateway.GeminiV1BetaModels) + } +} diff --git a/backend/internal/server/routes/user.go b/backend/internal/server/routes/user.go new file mode 100644 index 00000000..ad2166fe --- /dev/null +++ b/backend/internal/server/routes/user.go @@ -0,0 +1,72 @@ +package routes + +import ( + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + + "github.com/gin-gonic/gin" +) + +// RegisterUserRoutes 注册用户相关路由(需要认证) +func RegisterUserRoutes( + v1 *gin.RouterGroup, + h *handler.Handlers, + jwtAuth middleware.JWTAuthMiddleware, +) { + authenticated := v1.Group("") + authenticated.Use(gin.HandlerFunc(jwtAuth)) + { + // 用户接口 + user := authenticated.Group("/user") + { + user.GET("/profile", h.User.GetProfile) + user.PUT("/password", h.User.ChangePassword) + user.PUT("", h.User.UpdateProfile) + } + + // API Key管理 + keys := authenticated.Group("/keys") + { + keys.GET("", h.APIKey.List) + keys.GET("/:id", h.APIKey.GetByID) + keys.POST("", h.APIKey.Create) + keys.PUT("/:id", h.APIKey.Update) + keys.DELETE("/:id", h.APIKey.Delete) + } + + // 用户可用分组(非管理员接口) + groups := authenticated.Group("/groups") + { + groups.GET("/available", h.APIKey.GetAvailableGroups) + } + + // 使用记录 + usage := authenticated.Group("/usage") + { + usage.GET("", h.Usage.List) + usage.GET("/:id", h.Usage.GetByID) + usage.GET("/stats", h.Usage.Stats) + // User dashboard endpoints + usage.GET("/dashboard/stats", h.Usage.DashboardStats) + usage.GET("/dashboard/trend", h.Usage.DashboardTrend) + usage.GET("/dashboard/models", h.Usage.DashboardModels) + usage.POST("/dashboard/api-keys-usage", h.Usage.DashboardAPIKeysUsage) + } + + // 卡密兑换 + redeem := authenticated.Group("/redeem") + { + redeem.POST("", h.Redeem.Redeem) + redeem.GET("/history", h.Redeem.GetHistory) + } + + // 用户订阅 + subscriptions := authenticated.Group("/subscriptions") + { + subscriptions.GET("", h.Subscription.List) + subscriptions.GET("/active", h.Subscription.GetActive) + subscriptions.GET("/progress", h.Subscription.GetProgress) + subscriptions.GET("/summary", h.Subscription.GetSummary) + } + } +} diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go new file mode 100644 index 00000000..0d7a9cf9 --- /dev/null +++ b/backend/internal/service/account.go @@ -0,0 +1,559 @@ +// Package service provides business logic and domain services for the application. +package service + +import ( + "encoding/json" + "strconv" + "strings" + "time" +) + +type Account struct { + ID int64 + Name string + Notes *string + Platform string + Type string + Credentials map[string]any + Extra map[string]any + ProxyID *int64 + Concurrency int + Priority int + // RateMultiplier 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。 + // 使用指针用于兼容旧版本调度缓存(Redis)中缺字段的情况:nil 表示按 1.0 处理。 + RateMultiplier *float64 + Status string + ErrorMessage string + LastUsedAt *time.Time + ExpiresAt *time.Time + AutoPauseOnExpired bool + CreatedAt time.Time + UpdatedAt time.Time + + Schedulable bool + + RateLimitedAt *time.Time + RateLimitResetAt *time.Time + OverloadUntil *time.Time + + TempUnschedulableUntil *time.Time + TempUnschedulableReason string + + SessionWindowStart *time.Time + SessionWindowEnd *time.Time + SessionWindowStatus string + + Proxy *Proxy + AccountGroups []AccountGroup + GroupIDs []int64 + Groups []*Group +} + +type TempUnschedulableRule struct { + ErrorCode int `json:"error_code"` + Keywords []string `json:"keywords"` + DurationMinutes int `json:"duration_minutes"` + Description string `json:"description"` +} + +func (a *Account) IsActive() bool { + return a.Status == StatusActive +} + +// BillingRateMultiplier 返回账号计费倍率。 +// - nil 表示未配置/旧缓存缺字段,按 1.0 处理 +// - 允许 0,表示该账号计费为 0 +// - 负数属于非法数据,出于安全考虑按 1.0 处理 +func (a *Account) BillingRateMultiplier() float64 { + if a == nil || a.RateMultiplier == nil { + return 1.0 + } + if *a.RateMultiplier < 0 { + return 1.0 + } + return *a.RateMultiplier +} + +func (a *Account) IsSchedulable() bool { + if !a.IsActive() || !a.Schedulable { + return false + } + now := time.Now() + if a.AutoPauseOnExpired && a.ExpiresAt != nil && !now.Before(*a.ExpiresAt) { + return false + } + if a.OverloadUntil != nil && now.Before(*a.OverloadUntil) { + return false + } + if a.RateLimitResetAt != nil && now.Before(*a.RateLimitResetAt) { + return false + } + if a.TempUnschedulableUntil != nil && now.Before(*a.TempUnschedulableUntil) { + return false + } + return true +} + +func (a *Account) IsRateLimited() bool { + if a.RateLimitResetAt == nil { + return false + } + return time.Now().Before(*a.RateLimitResetAt) +} + +func (a *Account) IsOverloaded() bool { + if a.OverloadUntil == nil { + return false + } + return time.Now().Before(*a.OverloadUntil) +} + +func (a *Account) IsOAuth() bool { + return a.Type == AccountTypeOAuth || a.Type == AccountTypeSetupToken +} + +func (a *Account) IsGemini() bool { + return a.Platform == PlatformGemini +} + +func (a *Account) GeminiOAuthType() string { + if a.Platform != PlatformGemini || a.Type != AccountTypeOAuth { + return "" + } + oauthType := strings.TrimSpace(a.GetCredential("oauth_type")) + if oauthType == "" && strings.TrimSpace(a.GetCredential("project_id")) != "" { + return "code_assist" + } + return oauthType +} + +func (a *Account) GeminiTierID() string { + tierID := strings.TrimSpace(a.GetCredential("tier_id")) + return tierID +} + +func (a *Account) IsGeminiCodeAssist() bool { + if a.Platform != PlatformGemini || a.Type != AccountTypeOAuth { + return false + } + oauthType := a.GeminiOAuthType() + if oauthType == "" { + return strings.TrimSpace(a.GetCredential("project_id")) != "" + } + return oauthType == "code_assist" +} + +func (a *Account) CanGetUsage() bool { + return a.Type == AccountTypeOAuth +} + +func (a *Account) GetCredential(key string) string { + if a.Credentials == nil { + return "" + } + v, ok := a.Credentials[key] + if !ok || v == nil { + return "" + } + + // 支持多种类型(兼容历史数据中 expires_at 等字段可能是数字或字符串) + switch val := v.(type) { + case string: + return val + case json.Number: + // GORM datatypes.JSONMap 使用 UseNumber() 解析,数字类型为 json.Number + return val.String() + case float64: + // JSON 解析后数字默认为 float64 + return strconv.FormatInt(int64(val), 10) + case int64: + return strconv.FormatInt(val, 10) + case int: + return strconv.Itoa(val) + default: + return "" + } +} + +// GetCredentialAsTime 解析凭证中的时间戳字段,支持多种格式 +// 兼容以下格式: +// - RFC3339 字符串: "2025-01-01T00:00:00Z" +// - Unix 时间戳字符串: "1735689600" +// - Unix 时间戳数字: 1735689600 (float64/int64/json.Number) +func (a *Account) GetCredentialAsTime(key string) *time.Time { + s := a.GetCredential(key) + if s == "" { + return nil + } + // 尝试 RFC3339 格式 + if t, err := time.Parse(time.RFC3339, s); err == nil { + return &t + } + // 尝试 Unix 时间戳(纯数字字符串) + if ts, err := strconv.ParseInt(s, 10, 64); err == nil { + t := time.Unix(ts, 0) + return &t + } + return nil +} + +func (a *Account) IsTempUnschedulableEnabled() bool { + if a.Credentials == nil { + return false + } + raw, ok := a.Credentials["temp_unschedulable_enabled"] + if !ok || raw == nil { + return false + } + enabled, ok := raw.(bool) + return ok && enabled +} + +func (a *Account) GetTempUnschedulableRules() []TempUnschedulableRule { + if a.Credentials == nil { + return nil + } + raw, ok := a.Credentials["temp_unschedulable_rules"] + if !ok || raw == nil { + return nil + } + + arr, ok := raw.([]any) + if !ok { + return nil + } + + rules := make([]TempUnschedulableRule, 0, len(arr)) + for _, item := range arr { + entry, ok := item.(map[string]any) + if !ok || entry == nil { + continue + } + + rule := TempUnschedulableRule{ + ErrorCode: parseTempUnschedInt(entry["error_code"]), + Keywords: parseTempUnschedStrings(entry["keywords"]), + DurationMinutes: parseTempUnschedInt(entry["duration_minutes"]), + Description: parseTempUnschedString(entry["description"]), + } + + if rule.ErrorCode <= 0 || rule.DurationMinutes <= 0 || len(rule.Keywords) == 0 { + continue + } + + rules = append(rules, rule) + } + + return rules +} + +func parseTempUnschedString(value any) string { + s, ok := value.(string) + if !ok { + return "" + } + return strings.TrimSpace(s) +} + +func parseTempUnschedStrings(value any) []string { + if value == nil { + return nil + } + + var raw []string + switch v := value.(type) { + case []string: + raw = v + case []any: + raw = make([]string, 0, len(v)) + for _, item := range v { + if s, ok := item.(string); ok { + raw = append(raw, s) + } + } + default: + return nil + } + + out := make([]string, 0, len(raw)) + for _, item := range raw { + s := strings.TrimSpace(item) + if s != "" { + out = append(out, s) + } + } + return out +} + +func normalizeAccountNotes(value *string) *string { + if value == nil { + return nil + } + trimmed := strings.TrimSpace(*value) + if trimmed == "" { + return nil + } + return &trimmed +} + +func parseTempUnschedInt(value any) int { + switch v := value.(type) { + case int: + return v + case int64: + return int(v) + case float64: + return int(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return int(i) + } + case string: + if i, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { + return i + } + } + return 0 +} + +func (a *Account) GetModelMapping() map[string]string { + if a.Credentials == nil { + return nil + } + raw, ok := a.Credentials["model_mapping"] + if !ok || raw == nil { + return nil + } + if m, ok := raw.(map[string]any); ok { + result := make(map[string]string) + for k, v := range m { + if s, ok := v.(string); ok { + result[k] = s + } + } + if len(result) > 0 { + return result + } + } + return nil +} + +func (a *Account) IsModelSupported(requestedModel string) bool { + mapping := a.GetModelMapping() + if len(mapping) == 0 { + return true + } + _, exists := mapping[requestedModel] + return exists +} + +func (a *Account) GetMappedModel(requestedModel string) string { + mapping := a.GetModelMapping() + if len(mapping) == 0 { + return requestedModel + } + if mappedModel, exists := mapping[requestedModel]; exists { + return mappedModel + } + return requestedModel +} + +func (a *Account) GetBaseURL() string { + if a.Type != AccountTypeAPIKey { + return "" + } + baseURL := a.GetCredential("base_url") + if baseURL == "" { + return "https://api.anthropic.com" + } + return baseURL +} + +func (a *Account) GetExtraString(key string) string { + if a.Extra == nil { + return "" + } + if v, ok := a.Extra[key]; ok { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func (a *Account) IsCustomErrorCodesEnabled() bool { + if a.Type != AccountTypeAPIKey || a.Credentials == nil { + return false + } + if v, ok := a.Credentials["custom_error_codes_enabled"]; ok { + if enabled, ok := v.(bool); ok { + return enabled + } + } + return false +} + +func (a *Account) GetCustomErrorCodes() []int { + if a.Credentials == nil { + return nil + } + raw, ok := a.Credentials["custom_error_codes"] + if !ok || raw == nil { + return nil + } + if arr, ok := raw.([]any); ok { + result := make([]int, 0, len(arr)) + for _, v := range arr { + if f, ok := v.(float64); ok { + result = append(result, int(f)) + } + } + return result + } + return nil +} + +func (a *Account) ShouldHandleErrorCode(statusCode int) bool { + if !a.IsCustomErrorCodesEnabled() { + return true + } + codes := a.GetCustomErrorCodes() + if len(codes) == 0 { + return true + } + for _, code := range codes { + if code == statusCode { + return true + } + } + return false +} + +func (a *Account) IsInterceptWarmupEnabled() bool { + if a.Credentials == nil { + return false + } + if v, ok := a.Credentials["intercept_warmup_requests"]; ok { + if enabled, ok := v.(bool); ok { + return enabled + } + } + return false +} + +func (a *Account) IsOpenAI() bool { + return a.Platform == PlatformOpenAI +} + +func (a *Account) IsAnthropic() bool { + return a.Platform == PlatformAnthropic +} + +func (a *Account) IsOpenAIOAuth() bool { + return a.IsOpenAI() && a.Type == AccountTypeOAuth +} + +func (a *Account) IsOpenAIApiKey() bool { + return a.IsOpenAI() && a.Type == AccountTypeAPIKey +} + +func (a *Account) GetOpenAIBaseURL() string { + if !a.IsOpenAI() { + return "" + } + if a.Type == AccountTypeAPIKey { + baseURL := a.GetCredential("base_url") + if baseURL != "" { + return baseURL + } + } + return "https://api.openai.com" +} + +func (a *Account) GetOpenAIAccessToken() string { + if !a.IsOpenAI() { + return "" + } + return a.GetCredential("access_token") +} + +func (a *Account) GetOpenAIRefreshToken() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("refresh_token") +} + +func (a *Account) GetOpenAIIDToken() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("id_token") +} + +func (a *Account) GetOpenAIApiKey() string { + if !a.IsOpenAIApiKey() { + return "" + } + return a.GetCredential("api_key") +} + +func (a *Account) GetOpenAIUserAgent() string { + if !a.IsOpenAI() { + return "" + } + return a.GetCredential("user_agent") +} + +func (a *Account) GetChatGPTAccountID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("chatgpt_account_id") +} + +func (a *Account) GetChatGPTUserID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("chatgpt_user_id") +} + +func (a *Account) GetOpenAIOrganizationID() string { + if !a.IsOpenAIOAuth() { + return "" + } + return a.GetCredential("organization_id") +} + +func (a *Account) GetOpenAITokenExpiresAt() *time.Time { + if !a.IsOpenAIOAuth() { + return nil + } + return a.GetCredentialAsTime("expires_at") +} + +func (a *Account) IsOpenAITokenExpired() bool { + expiresAt := a.GetOpenAITokenExpiresAt() + if expiresAt == nil { + return false + } + return time.Now().Add(60 * time.Second).After(*expiresAt) +} + +// IsMixedSchedulingEnabled 检查 antigravity 账户是否启用混合调度 +// 启用后可参与 anthropic/gemini 分组的账户调度 +func (a *Account) IsMixedSchedulingEnabled() bool { + if a.Platform != PlatformAntigravity { + return false + } + if a.Extra == nil { + return false + } + if v, ok := a.Extra["mixed_scheduling"]; ok { + if enabled, ok := v.(bool); ok { + return enabled + } + } + return false +} diff --git a/backend/internal/service/account_billing_rate_multiplier_test.go b/backend/internal/service/account_billing_rate_multiplier_test.go new file mode 100644 index 00000000..731cfa7a --- /dev/null +++ b/backend/internal/service/account_billing_rate_multiplier_test.go @@ -0,0 +1,27 @@ +package service + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAccount_BillingRateMultiplier_DefaultsToOneWhenNil(t *testing.T) { + var a Account + require.NoError(t, json.Unmarshal([]byte(`{"id":1,"name":"acc","status":"active"}`), &a)) + require.Nil(t, a.RateMultiplier) + require.Equal(t, 1.0, a.BillingRateMultiplier()) +} + +func TestAccount_BillingRateMultiplier_AllowsZero(t *testing.T) { + v := 0.0 + a := Account{RateMultiplier: &v} + require.Equal(t, 0.0, a.BillingRateMultiplier()) +} + +func TestAccount_BillingRateMultiplier_NegativeFallsBackToOne(t *testing.T) { + v := -1.0 + a := Account{RateMultiplier: &v} + require.Equal(t, 1.0, a.BillingRateMultiplier()) +} diff --git a/backend/internal/service/account_expiry_service.go b/backend/internal/service/account_expiry_service.go new file mode 100644 index 00000000..eaada11c --- /dev/null +++ b/backend/internal/service/account_expiry_service.go @@ -0,0 +1,71 @@ +package service + +import ( + "context" + "log" + "sync" + "time" +) + +// AccountExpiryService periodically pauses expired accounts when auto-pause is enabled. +type AccountExpiryService struct { + accountRepo AccountRepository + interval time.Duration + stopCh chan struct{} + stopOnce sync.Once + wg sync.WaitGroup +} + +func NewAccountExpiryService(accountRepo AccountRepository, interval time.Duration) *AccountExpiryService { + return &AccountExpiryService{ + accountRepo: accountRepo, + interval: interval, + stopCh: make(chan struct{}), + } +} + +func (s *AccountExpiryService) Start() { + if s == nil || s.accountRepo == nil || s.interval <= 0 { + return + } + s.wg.Add(1) + go func() { + defer s.wg.Done() + ticker := time.NewTicker(s.interval) + defer ticker.Stop() + + s.runOnce() + for { + select { + case <-ticker.C: + s.runOnce() + case <-s.stopCh: + return + } + } + }() +} + +func (s *AccountExpiryService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + close(s.stopCh) + }) + s.wg.Wait() +} + +func (s *AccountExpiryService) runOnce() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + updated, err := s.accountRepo.AutoPauseExpiredAccounts(ctx, time.Now()) + if err != nil { + log.Printf("[AccountExpiry] Auto pause expired accounts failed: %v", err) + return + } + if updated > 0 { + log.Printf("[AccountExpiry] Auto paused %d expired accounts", updated) + } +} diff --git a/backend/internal/service/account_group.go b/backend/internal/service/account_group.go new file mode 100644 index 00000000..ab702a08 --- /dev/null +++ b/backend/internal/service/account_group.go @@ -0,0 +1,13 @@ +package service + +import "time" + +type AccountGroup struct { + AccountID int64 + GroupID int64 + Priority int + CreatedAt time.Time + + Account *Account + Group *Group +} diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go new file mode 100644 index 00000000..2badc760 --- /dev/null +++ b/backend/internal/service/account_service.go @@ -0,0 +1,351 @@ +package service + +import ( + "context" + "fmt" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrAccountNotFound = infraerrors.NotFound("ACCOUNT_NOT_FOUND", "account not found") + ErrAccountNilInput = infraerrors.BadRequest("ACCOUNT_NIL_INPUT", "account input cannot be nil") +) + +type AccountRepository interface { + Create(ctx context.Context, account *Account) error + GetByID(ctx context.Context, id int64) (*Account, error) + // GetByIDs fetches accounts by IDs in a single query. + // It should return all accounts found (missing IDs are ignored). + GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) + // ExistsByID 检查账号是否存在,仅返回布尔值,用于删除前的轻量级存在性检查 + ExistsByID(ctx context.Context, id int64) (bool, error) + // GetByCRSAccountID finds an account previously synced from CRS. + // Returns (nil, nil) if not found. + GetByCRSAccountID(ctx context.Context, crsAccountID string) (*Account, error) + Update(ctx context.Context, account *Account) error + Delete(ctx context.Context, id int64) error + + List(ctx context.Context, params pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]Account, *pagination.PaginationResult, error) + ListByGroup(ctx context.Context, groupID int64) ([]Account, error) + ListActive(ctx context.Context) ([]Account, error) + ListByPlatform(ctx context.Context, platform string) ([]Account, error) + + UpdateLastUsed(ctx context.Context, id int64) error + BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error + SetError(ctx context.Context, id int64, errorMsg string) error + SetSchedulable(ctx context.Context, id int64, schedulable bool) error + AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) + BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error + + ListSchedulable(ctx context.Context) ([]Account, error) + ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]Account, error) + ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) + ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) + ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]Account, error) + ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) + + SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error + SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error + SetOverloaded(ctx context.Context, id int64, until time.Time) error + SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error + ClearTempUnschedulable(ctx context.Context, id int64) error + ClearRateLimit(ctx context.Context, id int64) error + ClearAntigravityQuotaScopes(ctx context.Context, id int64) error + UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error + UpdateExtra(ctx context.Context, id int64, updates map[string]any) error + BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) +} + +// AccountBulkUpdate describes the fields that can be updated in a bulk operation. +// Nil pointers mean "do not change". +type AccountBulkUpdate struct { + Name *string + ProxyID *int64 + Concurrency *int + Priority *int + RateMultiplier *float64 + Status *string + Schedulable *bool + Credentials map[string]any + Extra map[string]any +} + +// CreateAccountRequest 创建账号请求 +type CreateAccountRequest struct { + Name string `json:"name"` + Notes *string `json:"notes"` + Platform string `json:"platform"` + Type string `json:"type"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` + ProxyID *int64 `json:"proxy_id"` + Concurrency int `json:"concurrency"` + Priority int `json:"priority"` + GroupIDs []int64 `json:"group_ids"` + ExpiresAt *time.Time `json:"expires_at"` + AutoPauseOnExpired *bool `json:"auto_pause_on_expired"` +} + +// UpdateAccountRequest 更新账号请求 +type UpdateAccountRequest struct { + Name *string `json:"name"` + Notes *string `json:"notes"` + Credentials *map[string]any `json:"credentials"` + Extra *map[string]any `json:"extra"` + ProxyID *int64 `json:"proxy_id"` + Concurrency *int `json:"concurrency"` + Priority *int `json:"priority"` + Status *string `json:"status"` + GroupIDs *[]int64 `json:"group_ids"` + ExpiresAt *time.Time `json:"expires_at"` + AutoPauseOnExpired *bool `json:"auto_pause_on_expired"` +} + +// AccountService 账号管理服务 +type AccountService struct { + accountRepo AccountRepository + groupRepo GroupRepository +} + +// NewAccountService 创建账号服务实例 +func NewAccountService(accountRepo AccountRepository, groupRepo GroupRepository) *AccountService { + return &AccountService{ + accountRepo: accountRepo, + groupRepo: groupRepo, + } +} + +// Create 创建账号 +func (s *AccountService) Create(ctx context.Context, req CreateAccountRequest) (*Account, error) { + // 验证分组是否存在(如果指定了分组) + if len(req.GroupIDs) > 0 { + for _, groupID := range req.GroupIDs { + _, err := s.groupRepo.GetByID(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + } + } + + // 创建账号 + account := &Account{ + Name: req.Name, + Notes: normalizeAccountNotes(req.Notes), + Platform: req.Platform, + Type: req.Type, + Credentials: req.Credentials, + Extra: req.Extra, + ProxyID: req.ProxyID, + Concurrency: req.Concurrency, + Priority: req.Priority, + Status: StatusActive, + ExpiresAt: req.ExpiresAt, + } + if req.AutoPauseOnExpired != nil { + account.AutoPauseOnExpired = *req.AutoPauseOnExpired + } else { + account.AutoPauseOnExpired = true + } + + if err := s.accountRepo.Create(ctx, account); err != nil { + return nil, fmt.Errorf("create account: %w", err) + } + + // 绑定分组 + if len(req.GroupIDs) > 0 { + if err := s.accountRepo.BindGroups(ctx, account.ID, req.GroupIDs); err != nil { + return nil, fmt.Errorf("bind groups: %w", err) + } + } + + return account, nil +} + +// GetByID 根据ID获取账号 +func (s *AccountService) GetByID(ctx context.Context, id int64) (*Account, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get account: %w", err) + } + return account, nil +} + +// List 获取账号列表 +func (s *AccountService) List(ctx context.Context, params pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) { + accounts, pagination, err := s.accountRepo.List(ctx, params) + if err != nil { + return nil, nil, fmt.Errorf("list accounts: %w", err) + } + return accounts, pagination, nil +} + +// ListByPlatform 根据平台获取账号列表 +func (s *AccountService) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { + accounts, err := s.accountRepo.ListByPlatform(ctx, platform) + if err != nil { + return nil, fmt.Errorf("list accounts by platform: %w", err) + } + return accounts, nil +} + +// ListByGroup 根据分组获取账号列表 +func (s *AccountService) ListByGroup(ctx context.Context, groupID int64) ([]Account, error) { + accounts, err := s.accountRepo.ListByGroup(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("list accounts by group: %w", err) + } + return accounts, nil +} + +// Update 更新账号 +func (s *AccountService) Update(ctx context.Context, id int64, req UpdateAccountRequest) (*Account, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get account: %w", err) + } + + // 更新字段 + if req.Name != nil { + account.Name = *req.Name + } + if req.Notes != nil { + account.Notes = normalizeAccountNotes(req.Notes) + } + + if req.Credentials != nil { + account.Credentials = *req.Credentials + } + + if req.Extra != nil { + account.Extra = *req.Extra + } + + if req.ProxyID != nil { + account.ProxyID = req.ProxyID + } + + if req.Concurrency != nil { + account.Concurrency = *req.Concurrency + } + + if req.Priority != nil { + account.Priority = *req.Priority + } + + if req.Status != nil { + account.Status = *req.Status + } + if req.ExpiresAt != nil { + account.ExpiresAt = req.ExpiresAt + } + if req.AutoPauseOnExpired != nil { + account.AutoPauseOnExpired = *req.AutoPauseOnExpired + } + + // 先验证分组是否存在(在任何写操作之前) + if req.GroupIDs != nil { + for _, groupID := range *req.GroupIDs { + _, err := s.groupRepo.GetByID(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + } + } + + // 执行更新 + if err := s.accountRepo.Update(ctx, account); err != nil { + return nil, fmt.Errorf("update account: %w", err) + } + + // 绑定分组 + if req.GroupIDs != nil { + if err := s.accountRepo.BindGroups(ctx, account.ID, *req.GroupIDs); err != nil { + return nil, fmt.Errorf("bind groups: %w", err) + } + } + + return account, nil +} + +// Delete 删除账号 +// 优化:使用 ExistsByID 替代 GetByID 进行存在性检查, +// 避免加载完整账号对象及其关联数据,提升删除操作的性能 +func (s *AccountService) Delete(ctx context.Context, id int64) error { + // 使用轻量级的存在性检查,而非加载完整账号对象 + exists, err := s.accountRepo.ExistsByID(ctx, id) + if err != nil { + return fmt.Errorf("check account: %w", err) + } + // 明确返回账号不存在错误,便于调用方区分错误类型 + if !exists { + return ErrAccountNotFound + } + + if err := s.accountRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete account: %w", err) + } + + return nil +} + +// UpdateStatus 更新账号状态 +func (s *AccountService) UpdateStatus(ctx context.Context, id int64, status string, errorMessage string) error { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get account: %w", err) + } + + account.Status = status + account.ErrorMessage = errorMessage + + if err := s.accountRepo.Update(ctx, account); err != nil { + return fmt.Errorf("update account: %w", err) + } + + return nil +} + +// UpdateLastUsed 更新最后使用时间 +func (s *AccountService) UpdateLastUsed(ctx context.Context, id int64) error { + if err := s.accountRepo.UpdateLastUsed(ctx, id); err != nil { + return fmt.Errorf("update last used: %w", err) + } + return nil +} + +// GetCredential 获取账号凭证(安全访问) +func (s *AccountService) GetCredential(ctx context.Context, id int64, key string) (string, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return "", fmt.Errorf("get account: %w", err) + } + + return account.GetCredential(key), nil +} + +// TestCredentials 测试账号凭证是否有效(需要实现具体平台的测试逻辑) +func (s *AccountService) TestCredentials(ctx context.Context, id int64) error { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get account: %w", err) + } + + // 根据平台执行不同的测试逻辑 + switch account.Platform { + case PlatformAnthropic: + // TODO: 测试Anthropic API凭证 + return nil + case PlatformOpenAI: + // TODO: 测试OpenAI API凭证 + return nil + case PlatformGemini: + // TODO: 测试Gemini API凭证 + return nil + default: + return fmt.Errorf("unsupported platform: %s", account.Platform) + } +} diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go new file mode 100644 index 00000000..6923067d --- /dev/null +++ b/backend/internal/service/account_service_delete_test.go @@ -0,0 +1,239 @@ +//go:build unit + +// 账号服务删除方法的单元测试 +// 测试 AccountService.Delete 方法在各种场景下的行为 + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +// accountRepoStub 是 AccountRepository 接口的测试桩实现。 +// 用于隔离测试 AccountService.Delete 方法,避免依赖真实数据库。 +// +// 设计说明: +// - exists: 模拟 ExistsByID 返回的存在性结果 +// - existsErr: 模拟 ExistsByID 返回的错误 +// - deleteErr: 模拟 Delete 返回的错误 +// - deletedIDs: 记录被调用删除的账号 ID,用于断言验证 +type accountRepoStub struct { + exists bool // ExistsByID 的返回值 + existsErr error // ExistsByID 的错误返回值 + deleteErr error // Delete 的错误返回值 + deletedIDs []int64 // 记录已删除的账号 ID 列表 +} + +// 以下方法在本测试中不应被调用,使用 panic 确保测试失败时能快速定位问题 + +func (s *accountRepoStub) Create(ctx context.Context, account *Account) error { + panic("unexpected Create call") +} + +func (s *accountRepoStub) GetByID(ctx context.Context, id int64) (*Account, error) { + panic("unexpected GetByID call") +} + +func (s *accountRepoStub) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + panic("unexpected GetByIDs call") +} + +// ExistsByID 返回预设的存在性检查结果。 +// 这是 Delete 方法调用的第一个仓储方法,用于验证账号是否存在。 +func (s *accountRepoStub) ExistsByID(ctx context.Context, id int64) (bool, error) { + return s.exists, s.existsErr +} + +func (s *accountRepoStub) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*Account, error) { + panic("unexpected GetByCRSAccountID call") +} + +func (s *accountRepoStub) Update(ctx context.Context, account *Account) error { + panic("unexpected Update call") +} + +// Delete 记录被删除的账号 ID 并返回预设的错误。 +// 通过 deletedIDs 可以验证删除操作是否被正确调用。 +func (s *accountRepoStub) Delete(ctx context.Context, id int64) error { + s.deletedIDs = append(s.deletedIDs, id) + return s.deleteErr +} + +// 以下是接口要求实现但本测试不关心的方法 + +func (s *accountRepoStub) List(ctx context.Context, params pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *accountRepoStub) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]Account, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *accountRepoStub) ListByGroup(ctx context.Context, groupID int64) ([]Account, error) { + panic("unexpected ListByGroup call") +} + +func (s *accountRepoStub) ListActive(ctx context.Context) ([]Account, error) { + panic("unexpected ListActive call") +} + +func (s *accountRepoStub) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { + panic("unexpected ListByPlatform call") +} + +func (s *accountRepoStub) UpdateLastUsed(ctx context.Context, id int64) error { + panic("unexpected UpdateLastUsed call") +} + +func (s *accountRepoStub) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + panic("unexpected BatchUpdateLastUsed call") +} + +func (s *accountRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error { + panic("unexpected SetError call") +} + +func (s *accountRepoStub) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + panic("unexpected SetSchedulable call") +} + +func (s *accountRepoStub) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + panic("unexpected AutoPauseExpiredAccounts call") +} + +func (s *accountRepoStub) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + panic("unexpected BindGroups call") +} + +func (s *accountRepoStub) ListSchedulable(ctx context.Context) ([]Account, error) { + panic("unexpected ListSchedulable call") +} + +func (s *accountRepoStub) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]Account, error) { + panic("unexpected ListSchedulableByGroupID call") +} + +func (s *accountRepoStub) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { + panic("unexpected ListSchedulableByPlatform call") +} + +func (s *accountRepoStub) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { + panic("unexpected ListSchedulableByGroupIDAndPlatform call") +} + +func (s *accountRepoStub) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]Account, error) { + panic("unexpected ListSchedulableByPlatforms call") +} + +func (s *accountRepoStub) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) { + panic("unexpected ListSchedulableByGroupIDAndPlatforms call") +} + +func (s *accountRepoStub) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + panic("unexpected SetRateLimited call") +} + +func (s *accountRepoStub) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + panic("unexpected SetAntigravityQuotaScopeLimit call") +} + +func (s *accountRepoStub) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + panic("unexpected SetOverloaded call") +} + +func (s *accountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + panic("unexpected SetTempUnschedulable call") +} + +func (s *accountRepoStub) ClearTempUnschedulable(ctx context.Context, id int64) error { + panic("unexpected ClearTempUnschedulable call") +} + +func (s *accountRepoStub) ClearRateLimit(ctx context.Context, id int64) error { + panic("unexpected ClearRateLimit call") +} + +func (s *accountRepoStub) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + panic("unexpected ClearAntigravityQuotaScopes call") +} + +func (s *accountRepoStub) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + panic("unexpected UpdateSessionWindow call") +} + +func (s *accountRepoStub) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + panic("unexpected UpdateExtra call") +} + +func (s *accountRepoStub) BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) { + panic("unexpected BulkUpdate call") +} + +// TestAccountService_Delete_NotFound 测试删除不存在的账号时返回正确的错误。 +// 预期行为: +// - ExistsByID 返回 false(账号不存在) +// - 返回 ErrAccountNotFound 错误 +// - Delete 方法不被调用(deletedIDs 为空) +func TestAccountService_Delete_NotFound(t *testing.T) { + repo := &accountRepoStub{exists: false} + svc := &AccountService{accountRepo: repo} + + err := svc.Delete(context.Background(), 55) + require.ErrorIs(t, err, ErrAccountNotFound) + require.Empty(t, repo.deletedIDs) // 验证删除操作未被调用 +} + +// TestAccountService_Delete_CheckError 测试存在性检查失败时的错误处理。 +// 预期行为: +// - ExistsByID 返回数据库错误 +// - 返回包含 "check account" 的错误信息 +// - Delete 方法不被调用 +func TestAccountService_Delete_CheckError(t *testing.T) { + repo := &accountRepoStub{existsErr: errors.New("db down")} + svc := &AccountService{accountRepo: repo} + + err := svc.Delete(context.Background(), 55) + require.Error(t, err) + require.ErrorContains(t, err, "check account") // 验证错误信息包含上下文 + require.Empty(t, repo.deletedIDs) +} + +// TestAccountService_Delete_DeleteError 测试删除操作失败时的错误处理。 +// 预期行为: +// - ExistsByID 返回 true(账号存在) +// - Delete 被调用但返回错误 +// - 返回包含 "delete account" 的错误信息 +// - deletedIDs 记录了尝试删除的 ID +func TestAccountService_Delete_DeleteError(t *testing.T) { + repo := &accountRepoStub{ + exists: true, + deleteErr: errors.New("delete failed"), + } + svc := &AccountService{accountRepo: repo} + + err := svc.Delete(context.Background(), 55) + require.Error(t, err) + require.ErrorContains(t, err, "delete account") + require.Equal(t, []int64{55}, repo.deletedIDs) // 验证删除操作被调用 +} + +// TestAccountService_Delete_Success 测试删除操作成功的场景。 +// 预期行为: +// - ExistsByID 返回 true(账号存在) +// - Delete 成功执行 +// - 返回 nil 错误 +// - deletedIDs 记录了被删除的 ID +func TestAccountService_Delete_Success(t *testing.T) { + repo := &accountRepoStub{exists: true} + svc := &AccountService{accountRepo: repo} + + err := svc.Delete(context.Background(), 55) + require.NoError(t, err) + require.Equal(t, []int64{55}, repo.deletedIDs) // 验证正确的 ID 被删除 +} diff --git a/backend/internal/service/account_test_service.go b/backend/internal/service/account_test_service.go new file mode 100644 index 00000000..8419c2b4 --- /dev/null +++ b/backend/internal/service/account_test_service.go @@ -0,0 +1,847 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "regexp" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// sseDataPrefix matches SSE data lines with optional whitespace after colon. +// Some upstream APIs return non-standard "data:" without space (should be "data: "). +var sseDataPrefix = regexp.MustCompile(`^data:\s*`) + +const ( + testClaudeAPIURL = "https://api.anthropic.com/v1/messages" + chatgptCodexAPIURL = "https://chatgpt.com/backend-api/codex/responses" +) + +// TestEvent represents a SSE event for account testing +type TestEvent struct { + Type string `json:"type"` + Text string `json:"text,omitempty"` + Model string `json:"model,omitempty"` + Success bool `json:"success,omitempty"` + Error string `json:"error,omitempty"` +} + +// AccountTestService handles account testing operations +type AccountTestService struct { + accountRepo AccountRepository + geminiTokenProvider *GeminiTokenProvider + antigravityGatewayService *AntigravityGatewayService + httpUpstream HTTPUpstream + cfg *config.Config +} + +// NewAccountTestService creates a new AccountTestService +func NewAccountTestService( + accountRepo AccountRepository, + geminiTokenProvider *GeminiTokenProvider, + antigravityGatewayService *AntigravityGatewayService, + httpUpstream HTTPUpstream, + cfg *config.Config, +) *AccountTestService { + return &AccountTestService{ + accountRepo: accountRepo, + geminiTokenProvider: geminiTokenProvider, + antigravityGatewayService: antigravityGatewayService, + httpUpstream: httpUpstream, + cfg: cfg, + } +} + +func (s *AccountTestService) validateUpstreamBaseURL(raw string) (string, error) { + if s.cfg == nil { + return "", errors.New("config is not available") + } + if !s.cfg.Security.URLAllowlist.Enabled { + return urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.cfg.Security.URLAllowlist.UpstreamHosts, + RequireAllowlist: true, + AllowPrivate: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", err + } + return normalized, nil +} + +// generateSessionString generates a Claude Code style session string +func generateSessionString() (string, error) { + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + hex64 := hex.EncodeToString(bytes) + sessionUUID := uuid.New().String() + return fmt.Sprintf("user_%s_account__session_%s", hex64, sessionUUID), nil +} + +// createTestPayload creates a Claude Code style test request payload +func createTestPayload(modelID string) (map[string]any, error) { + sessionID, err := generateSessionString() + if err != nil { + return nil, err + } + + return map[string]any{ + "model": modelID, + "messages": []map[string]any{ + { + "role": "user", + "content": []map[string]any{ + { + "type": "text", + "text": "hi", + "cache_control": map[string]string{ + "type": "ephemeral", + }, + }, + }, + }, + }, + "system": []map[string]any{ + { + "type": "text", + "text": "You are Claude Code, Anthropic's official CLI for Claude.", + "cache_control": map[string]string{ + "type": "ephemeral", + }, + }, + }, + "metadata": map[string]string{ + "user_id": sessionID, + }, + "max_tokens": 1024, + "temperature": 1, + "stream": true, + }, nil +} + +// TestAccountConnection tests an account's connection by sending a test request +// All account types use full Claude Code client characteristics, only auth header differs +// modelID is optional - if empty, defaults to claude.DefaultTestModel +func (s *AccountTestService) TestAccountConnection(c *gin.Context, accountID int64, modelID string) error { + ctx := c.Request.Context() + + // Get account + account, err := s.accountRepo.GetByID(ctx, accountID) + if err != nil { + return s.sendErrorAndEnd(c, "Account not found") + } + + // Route to platform-specific test method + if account.IsOpenAI() { + return s.testOpenAIAccountConnection(c, account, modelID) + } + + if account.IsGemini() { + return s.testGeminiAccountConnection(c, account, modelID) + } + + if account.Platform == PlatformAntigravity { + return s.testAntigravityAccountConnection(c, account, modelID) + } + + return s.testClaudeAccountConnection(c, account, modelID) +} + +// testClaudeAccountConnection tests an Anthropic Claude account's connection +func (s *AccountTestService) testClaudeAccountConnection(c *gin.Context, account *Account, modelID string) error { + ctx := c.Request.Context() + + // Determine the model to use + testModelID := modelID + if testModelID == "" { + testModelID = claude.DefaultTestModel + } + + // For API Key accounts with model mapping, map the model + if account.Type == "apikey" { + mapping := account.GetModelMapping() + if len(mapping) > 0 { + if mappedModel, exists := mapping[testModelID]; exists { + testModelID = mappedModel + } + } + } + + // Determine authentication method and API URL + var authToken string + var useBearer bool + var apiURL string + + if account.IsOAuth() { + // OAuth or Setup Token - use Bearer token + useBearer = true + apiURL = testClaudeAPIURL + authToken = account.GetCredential("access_token") + if authToken == "" { + return s.sendErrorAndEnd(c, "No access token available") + } + } else if account.Type == "apikey" { + // API Key - use x-api-key header + useBearer = false + authToken = account.GetCredential("api_key") + if authToken == "" { + return s.sendErrorAndEnd(c, "No API key available") + } + + baseURL := account.GetBaseURL() + if baseURL == "" { + baseURL = "https://api.anthropic.com" + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Invalid base URL: %s", err.Error())) + } + apiURL = strings.TrimSuffix(normalizedBaseURL, "/") + "/v1/messages" + } else { + return s.sendErrorAndEnd(c, fmt.Sprintf("Unsupported account type: %s", account.Type)) + } + + // Set SSE headers + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("X-Accel-Buffering", "no") + c.Writer.Flush() + + // Create Claude Code style payload (same for all account types) + payload, err := createTestPayload(testModelID) + if err != nil { + return s.sendErrorAndEnd(c, "Failed to create test payload") + } + payloadBytes, _ := json.Marshal(payload) + + // Send test_start event + s.sendEvent(c, TestEvent{Type: "test_start", Model: testModelID}) + + req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewReader(payloadBytes)) + if err != nil { + return s.sendErrorAndEnd(c, "Failed to create request") + } + + // Set common headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("anthropic-version", "2023-06-01") + req.Header.Set("anthropic-beta", claude.DefaultBetaHeader) + + // Apply Claude Code client headers + for key, value := range claude.DefaultHeaders { + req.Header.Set(key, value) + } + + // Set authentication header + if useBearer { + req.Header.Set("Authorization", "Bearer "+authToken) + } else { + req.Header.Set("x-api-key", authToken) + } + + // Get proxy URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) + } + + // Process SSE stream + return s.processClaudeStream(c, resp.Body) +} + +// testOpenAIAccountConnection tests an OpenAI account's connection +func (s *AccountTestService) testOpenAIAccountConnection(c *gin.Context, account *Account, modelID string) error { + ctx := c.Request.Context() + + // Default to openai.DefaultTestModel for OpenAI testing + testModelID := modelID + if testModelID == "" { + testModelID = openai.DefaultTestModel + } + + // For API Key accounts with model mapping, map the model + if account.Type == "apikey" { + mapping := account.GetModelMapping() + if len(mapping) > 0 { + if mappedModel, exists := mapping[testModelID]; exists { + testModelID = mappedModel + } + } + } + + // Determine authentication method and API URL + var authToken string + var apiURL string + var isOAuth bool + var chatgptAccountID string + + if account.IsOAuth() { + isOAuth = true + // OAuth - use Bearer token with ChatGPT internal API + authToken = account.GetOpenAIAccessToken() + if authToken == "" { + return s.sendErrorAndEnd(c, "No access token available") + } + + // OAuth uses ChatGPT internal API + apiURL = chatgptCodexAPIURL + chatgptAccountID = account.GetChatGPTAccountID() + } else if account.Type == "apikey" { + // API Key - use Platform API + authToken = account.GetOpenAIApiKey() + if authToken == "" { + return s.sendErrorAndEnd(c, "No API key available") + } + + baseURL := account.GetOpenAIBaseURL() + if baseURL == "" { + baseURL = "https://api.openai.com" + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Invalid base URL: %s", err.Error())) + } + apiURL = strings.TrimSuffix(normalizedBaseURL, "/") + "/responses" + } else { + return s.sendErrorAndEnd(c, fmt.Sprintf("Unsupported account type: %s", account.Type)) + } + + // Set SSE headers + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("X-Accel-Buffering", "no") + c.Writer.Flush() + + // Create OpenAI Responses API payload + payload := createOpenAITestPayload(testModelID, isOAuth) + payloadBytes, _ := json.Marshal(payload) + + // Send test_start event + s.sendEvent(c, TestEvent{Type: "test_start", Model: testModelID}) + + req, err := http.NewRequestWithContext(ctx, "POST", apiURL, bytes.NewReader(payloadBytes)) + if err != nil { + return s.sendErrorAndEnd(c, "Failed to create request") + } + + // Set common headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+authToken) + + // Set OAuth-specific headers for ChatGPT internal API + if isOAuth { + req.Host = "chatgpt.com" + req.Header.Set("accept", "text/event-stream") + if chatgptAccountID != "" { + req.Header.Set("chatgpt-account-id", chatgptAccountID) + } + } + + // Get proxy URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) + } + + // Process SSE stream + return s.processOpenAIStream(c, resp.Body) +} + +// testGeminiAccountConnection tests a Gemini account's connection +func (s *AccountTestService) testGeminiAccountConnection(c *gin.Context, account *Account, modelID string) error { + ctx := c.Request.Context() + + // Determine the model to use + testModelID := modelID + if testModelID == "" { + testModelID = geminicli.DefaultTestModel + } + + // For API Key accounts with model mapping, map the model + if account.Type == AccountTypeAPIKey { + mapping := account.GetModelMapping() + if len(mapping) > 0 { + if mappedModel, exists := mapping[testModelID]; exists { + testModelID = mappedModel + } + } + } + + // Set SSE headers + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("X-Accel-Buffering", "no") + c.Writer.Flush() + + // Create test payload (Gemini format) + payload := createGeminiTestPayload() + + // Build request based on account type + var req *http.Request + var err error + + switch account.Type { + case AccountTypeAPIKey: + req, err = s.buildGeminiAPIKeyRequest(ctx, account, testModelID, payload) + case AccountTypeOAuth: + req, err = s.buildGeminiOAuthRequest(ctx, account, testModelID, payload) + default: + return s.sendErrorAndEnd(c, fmt.Sprintf("Unsupported account type: %s", account.Type)) + } + + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Failed to build request: %s", err.Error())) + } + + // Send test_start event + s.sendEvent(c, TestEvent{Type: "test_start", Model: testModelID}) + + // Get proxy and execute request + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + return s.sendErrorAndEnd(c, fmt.Sprintf("Request failed: %s", err.Error())) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return s.sendErrorAndEnd(c, fmt.Sprintf("API returned %d: %s", resp.StatusCode, string(body))) + } + + // Process SSE stream + return s.processGeminiStream(c, resp.Body) +} + +// testAntigravityAccountConnection tests an Antigravity account's connection +// 支持 Claude 和 Gemini 两种协议,使用非流式请求 +func (s *AccountTestService) testAntigravityAccountConnection(c *gin.Context, account *Account, modelID string) error { + ctx := c.Request.Context() + + // 默认模型:Claude 使用 claude-sonnet-4-5,Gemini 使用 gemini-3-pro-preview + testModelID := modelID + if testModelID == "" { + testModelID = "claude-sonnet-4-5" + } + + if s.antigravityGatewayService == nil { + return s.sendErrorAndEnd(c, "Antigravity gateway service not configured") + } + + // Set SSE headers + c.Writer.Header().Set("Content-Type", "text/event-stream") + c.Writer.Header().Set("Cache-Control", "no-cache") + c.Writer.Header().Set("Connection", "keep-alive") + c.Writer.Header().Set("X-Accel-Buffering", "no") + c.Writer.Flush() + + // Send test_start event + s.sendEvent(c, TestEvent{Type: "test_start", Model: testModelID}) + + // 调用 AntigravityGatewayService.TestConnection(复用协议转换逻辑) + result, err := s.antigravityGatewayService.TestConnection(ctx, account, testModelID) + if err != nil { + return s.sendErrorAndEnd(c, err.Error()) + } + + // 发送响应内容 + if result.Text != "" { + s.sendEvent(c, TestEvent{Type: "content", Text: result.Text}) + } + + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil +} + +// buildGeminiAPIKeyRequest builds request for Gemini API Key accounts +func (s *AccountTestService) buildGeminiAPIKeyRequest(ctx context.Context, account *Account, modelID string, payload []byte) (*http.Request, error) { + apiKey := account.GetCredential("api_key") + if strings.TrimSpace(apiKey) == "" { + return nil, fmt.Errorf("no API key available") + } + + baseURL := account.GetCredential("base_url") + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + + // Use streamGenerateContent for real-time feedback + fullURL := fmt.Sprintf("%s/v1beta/models/%s:streamGenerateContent?alt=sse", + strings.TrimRight(normalizedBaseURL, "/"), modelID) + + req, err := http.NewRequestWithContext(ctx, "POST", fullURL, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("x-goog-api-key", apiKey) + + return req, nil +} + +// buildGeminiOAuthRequest builds request for Gemini OAuth accounts +func (s *AccountTestService) buildGeminiOAuthRequest(ctx context.Context, account *Account, modelID string, payload []byte) (*http.Request, error) { + if s.geminiTokenProvider == nil { + return nil, fmt.Errorf("gemini token provider not configured") + } + + // Get access token (auto-refreshes if needed) + accessToken, err := s.geminiTokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, fmt.Errorf("failed to get access token: %w", err) + } + + projectID := strings.TrimSpace(account.GetCredential("project_id")) + if projectID == "" { + // AI Studio OAuth mode (no project_id): call generativelanguage API directly with Bearer token. + baseURL := account.GetCredential("base_url") + if strings.TrimSpace(baseURL) == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + fullURL := fmt.Sprintf("%s/v1beta/models/%s:streamGenerateContent?alt=sse", strings.TrimRight(normalizedBaseURL, "/"), modelID) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(payload)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + return req, nil + } + + // Code Assist mode (with project_id) + return s.buildCodeAssistRequest(ctx, accessToken, projectID, modelID, payload) +} + +// buildCodeAssistRequest builds request for Google Code Assist API (used by Gemini CLI and Antigravity) +func (s *AccountTestService) buildCodeAssistRequest(ctx context.Context, accessToken, projectID, modelID string, payload []byte) (*http.Request, error) { + var inner map[string]any + if err := json.Unmarshal(payload, &inner); err != nil { + return nil, err + } + + wrapped := map[string]any{ + "model": modelID, + "project": projectID, + "request": inner, + } + wrappedBytes, _ := json.Marshal(wrapped) + + normalizedBaseURL, err := s.validateUpstreamBaseURL(geminicli.GeminiCliBaseURL) + if err != nil { + return nil, err + } + fullURL := fmt.Sprintf("%s/v1internal:streamGenerateContent?alt=sse", normalizedBaseURL) + + req, err := http.NewRequestWithContext(ctx, "POST", fullURL, bytes.NewReader(wrappedBytes)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("User-Agent", geminicli.GeminiCLIUserAgent) + + return req, nil +} + +// createGeminiTestPayload creates a minimal test payload for Gemini API +func createGeminiTestPayload() []byte { + payload := map[string]any{ + "contents": []map[string]any{ + { + "role": "user", + "parts": []map[string]any{ + {"text": "hi"}, + }, + }, + }, + "systemInstruction": map[string]any{ + "parts": []map[string]any{ + {"text": "You are a helpful AI assistant."}, + }, + }, + } + bytes, _ := json.Marshal(payload) + return bytes +} + +// processGeminiStream processes SSE stream from Gemini API +func (s *AccountTestService) processGeminiStream(c *gin.Context, body io.Reader) error { + reader := bufio.NewReader(body) + + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + return s.sendErrorAndEnd(c, fmt.Sprintf("Stream read error: %s", err.Error())) + } + + line = strings.TrimSpace(line) + if line == "" || !strings.HasPrefix(line, "data: ") { + continue + } + + jsonStr := strings.TrimPrefix(line, "data: ") + if jsonStr == "[DONE]" { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + + var data map[string]any + if err := json.Unmarshal([]byte(jsonStr), &data); err != nil { + continue + } + + // Support two Gemini response formats: + // - AI Studio: {"candidates": [...]} + // - Gemini CLI: {"response": {"candidates": [...]}} + if resp, ok := data["response"].(map[string]any); ok && resp != nil { + data = resp + } + if candidates, ok := data["candidates"].([]any); ok && len(candidates) > 0 { + if candidate, ok := candidates[0].(map[string]any); ok { + // Extract content first (before checking completion) + if content, ok := candidate["content"].(map[string]any); ok { + if parts, ok := content["parts"].([]any); ok { + for _, part := range parts { + if partMap, ok := part.(map[string]any); ok { + if text, ok := partMap["text"].(string); ok && text != "" { + s.sendEvent(c, TestEvent{Type: "content", Text: text}) + } + } + } + } + } + + // Check for completion after extracting content + if finishReason, ok := candidate["finishReason"].(string); ok && finishReason != "" { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + } + } + + // Handle errors + if errData, ok := data["error"].(map[string]any); ok { + errorMsg := "Unknown error" + if msg, ok := errData["message"].(string); ok { + errorMsg = msg + } + return s.sendErrorAndEnd(c, errorMsg) + } + } +} + +// createOpenAITestPayload creates a test payload for OpenAI Responses API +func createOpenAITestPayload(modelID string, isOAuth bool) map[string]any { + payload := map[string]any{ + "model": modelID, + "input": []map[string]any{ + { + "role": "user", + "content": []map[string]any{ + { + "type": "input_text", + "text": "hi", + }, + }, + }, + }, + "stream": true, + } + + // OAuth accounts using ChatGPT internal API require store: false + if isOAuth { + payload["store"] = false + } + + // All accounts require instructions for Responses API + payload["instructions"] = openai.DefaultInstructions + + return payload +} + +// processClaudeStream processes the SSE stream from Claude API +func (s *AccountTestService) processClaudeStream(c *gin.Context, body io.Reader) error { + reader := bufio.NewReader(body) + + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + return s.sendErrorAndEnd(c, fmt.Sprintf("Stream read error: %s", err.Error())) + } + + line = strings.TrimSpace(line) + if line == "" || !sseDataPrefix.MatchString(line) { + continue + } + + jsonStr := sseDataPrefix.ReplaceAllString(line, "") + if jsonStr == "[DONE]" { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + + var data map[string]any + if err := json.Unmarshal([]byte(jsonStr), &data); err != nil { + continue + } + + eventType, _ := data["type"].(string) + + switch eventType { + case "content_block_delta": + if delta, ok := data["delta"].(map[string]any); ok { + if text, ok := delta["text"].(string); ok { + s.sendEvent(c, TestEvent{Type: "content", Text: text}) + } + } + case "message_stop": + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + case "error": + errorMsg := "Unknown error" + if errData, ok := data["error"].(map[string]any); ok { + if msg, ok := errData["message"].(string); ok { + errorMsg = msg + } + } + return s.sendErrorAndEnd(c, errorMsg) + } + } +} + +// processOpenAIStream processes the SSE stream from OpenAI Responses API +func (s *AccountTestService) processOpenAIStream(c *gin.Context, body io.Reader) error { + reader := bufio.NewReader(body) + + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + return s.sendErrorAndEnd(c, fmt.Sprintf("Stream read error: %s", err.Error())) + } + + line = strings.TrimSpace(line) + if line == "" || !sseDataPrefix.MatchString(line) { + continue + } + + jsonStr := sseDataPrefix.ReplaceAllString(line, "") + if jsonStr == "[DONE]" { + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + } + + var data map[string]any + if err := json.Unmarshal([]byte(jsonStr), &data); err != nil { + continue + } + + eventType, _ := data["type"].(string) + + switch eventType { + case "response.output_text.delta": + // OpenAI Responses API uses "delta" field for text content + if delta, ok := data["delta"].(string); ok && delta != "" { + s.sendEvent(c, TestEvent{Type: "content", Text: delta}) + } + case "response.completed": + s.sendEvent(c, TestEvent{Type: "test_complete", Success: true}) + return nil + case "error": + errorMsg := "Unknown error" + if errData, ok := data["error"].(map[string]any); ok { + if msg, ok := errData["message"].(string); ok { + errorMsg = msg + } + } + return s.sendErrorAndEnd(c, errorMsg) + } + } +} + +// sendEvent sends a SSE event to the client +func (s *AccountTestService) sendEvent(c *gin.Context, event TestEvent) { + eventJSON, _ := json.Marshal(event) + if _, err := fmt.Fprintf(c.Writer, "data: %s\n\n", eventJSON); err != nil { + log.Printf("failed to write SSE event: %v", err) + return + } + c.Writer.Flush() +} + +// sendErrorAndEnd sends an error event and ends the stream +func (s *AccountTestService) sendErrorAndEnd(c *gin.Context, errorMsg string) error { + log.Printf("Account test error: %s", errorMsg) + s.sendEvent(c, TestEvent{Type: "error", Error: errorMsg}) + return fmt.Errorf("%s", errorMsg) +} diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go new file mode 100644 index 00000000..d9ed5609 --- /dev/null +++ b/backend/internal/service/account_usage_service.go @@ -0,0 +1,577 @@ +package service + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" +) + +type UsageLogRepository interface { + // Create creates a usage log and returns whether it was actually inserted. + // inserted is false when the insert was skipped due to conflict (idempotent retries). + Create(ctx context.Context, log *UsageLog) (inserted bool, err error) + GetByID(ctx context.Context, id int64) (*UsageLog, error) + Delete(ctx context.Context, id int64) error + + ListByUser(ctx context.Context, userID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) + ListByAPIKey(ctx context.Context, apiKeyID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) + ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) + + ListByUserAndTimeRange(ctx context.Context, userID int64, startTime, endTime time.Time) ([]UsageLog, *pagination.PaginationResult, error) + ListByAPIKeyAndTimeRange(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) ([]UsageLog, *pagination.PaginationResult, error) + ListByAccountAndTimeRange(ctx context.Context, accountID int64, startTime, endTime time.Time) ([]UsageLog, *pagination.PaginationResult, error) + ListByModelAndTimeRange(ctx context.Context, modelName string, startTime, endTime time.Time) ([]UsageLog, *pagination.PaginationResult, error) + + GetAccountWindowStats(ctx context.Context, accountID int64, startTime time.Time) (*usagestats.AccountStats, error) + GetAccountTodayStats(ctx context.Context, accountID int64) (*usagestats.AccountStats, error) + + // Admin dashboard stats + GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) + GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) + GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) + GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) + GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) + GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) + GetBatchAPIKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*usagestats.BatchAPIKeyUsageStats, error) + + // User dashboard stats + GetUserDashboardStats(ctx context.Context, userID int64) (*usagestats.UserDashboardStats, error) + GetUserUsageTrendByUserID(ctx context.Context, userID int64, startTime, endTime time.Time, granularity string) ([]usagestats.TrendDataPoint, error) + GetUserModelStats(ctx context.Context, userID int64, startTime, endTime time.Time) ([]usagestats.ModelStat, error) + + // Admin usage listing/stats + ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]UsageLog, *pagination.PaginationResult, error) + GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*usagestats.UsageStats, error) + GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) + + // Account stats + GetAccountUsageStats(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.AccountUsageStatsResponse, error) + + // Aggregated stats (optimized) + GetUserStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) + GetAPIKeyStatsAggregated(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) + GetAccountStatsAggregated(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.UsageStats, error) + GetModelStatsAggregated(ctx context.Context, modelName string, startTime, endTime time.Time) (*usagestats.UsageStats, error) + GetDailyStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) ([]map[string]any, error) +} + +// apiUsageCache 缓存从 Anthropic API 获取的使用率数据(utilization, resets_at) +type apiUsageCache struct { + response *ClaudeUsageResponse + timestamp time.Time +} + +// windowStatsCache 缓存从本地数据库查询的窗口统计(requests, tokens, cost) +type windowStatsCache struct { + stats *WindowStats + timestamp time.Time +} + +// antigravityUsageCache 缓存 Antigravity 额度数据 +type antigravityUsageCache struct { + usageInfo *UsageInfo + timestamp time.Time +} + +const ( + apiCacheTTL = 3 * time.Minute + windowStatsCacheTTL = 1 * time.Minute +) + +// UsageCache 封装账户使用量相关的缓存 +type UsageCache struct { + apiCache sync.Map // accountID -> *apiUsageCache + windowStatsCache sync.Map // accountID -> *windowStatsCache + antigravityCache sync.Map // accountID -> *antigravityUsageCache +} + +// NewUsageCache 创建 UsageCache 实例 +func NewUsageCache() *UsageCache { + return &UsageCache{} +} + +// WindowStats 窗口期统计 +// +// cost: 账号口径费用(total_cost * account_rate_multiplier) +// standard_cost: 标准费用(total_cost,不含倍率) +// user_cost: 用户/API Key 口径费用(actual_cost,受分组倍率影响) +type WindowStats struct { + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` + StandardCost float64 `json:"standard_cost"` + UserCost float64 `json:"user_cost"` +} + +// UsageProgress 使用量进度 +type UsageProgress struct { + Utilization float64 `json:"utilization"` // 使用率百分比 (0-100+,100表示100%) + ResetsAt *time.Time `json:"resets_at"` // 重置时间 + RemainingSeconds int `json:"remaining_seconds"` // 距重置剩余秒数 + WindowStats *WindowStats `json:"window_stats,omitempty"` // 窗口期统计(从窗口开始到当前的使用量) + UsedRequests int64 `json:"used_requests,omitempty"` + LimitRequests int64 `json:"limit_requests,omitempty"` +} + +// AntigravityModelQuota Antigravity 单个模型的配额信息 +type AntigravityModelQuota struct { + Utilization int `json:"utilization"` // 使用率 0-100 + ResetTime string `json:"reset_time"` // 重置时间 ISO8601 +} + +// UsageInfo 账号使用量信息 +type UsageInfo struct { + UpdatedAt *time.Time `json:"updated_at,omitempty"` // 更新时间 + FiveHour *UsageProgress `json:"five_hour"` // 5小时窗口 + SevenDay *UsageProgress `json:"seven_day,omitempty"` // 7天窗口 + SevenDaySonnet *UsageProgress `json:"seven_day_sonnet,omitempty"` // 7天Sonnet窗口 + GeminiSharedDaily *UsageProgress `json:"gemini_shared_daily,omitempty"` // Gemini shared pool RPD (Google One / Code Assist) + GeminiProDaily *UsageProgress `json:"gemini_pro_daily,omitempty"` // Gemini Pro 日配额 + GeminiFlashDaily *UsageProgress `json:"gemini_flash_daily,omitempty"` // Gemini Flash 日配额 + GeminiSharedMinute *UsageProgress `json:"gemini_shared_minute,omitempty"` // Gemini shared pool RPM (Google One / Code Assist) + GeminiProMinute *UsageProgress `json:"gemini_pro_minute,omitempty"` // Gemini Pro RPM + GeminiFlashMinute *UsageProgress `json:"gemini_flash_minute,omitempty"` // Gemini Flash RPM + + // Antigravity 多模型配额 + AntigravityQuota map[string]*AntigravityModelQuota `json:"antigravity_quota,omitempty"` +} + +// ClaudeUsageResponse Anthropic API返回的usage结构 +type ClaudeUsageResponse struct { + FiveHour struct { + Utilization float64 `json:"utilization"` + ResetsAt string `json:"resets_at"` + } `json:"five_hour"` + SevenDay struct { + Utilization float64 `json:"utilization"` + ResetsAt string `json:"resets_at"` + } `json:"seven_day"` + SevenDaySonnet struct { + Utilization float64 `json:"utilization"` + ResetsAt string `json:"resets_at"` + } `json:"seven_day_sonnet"` +} + +// ClaudeUsageFetcher fetches usage data from Anthropic OAuth API +type ClaudeUsageFetcher interface { + FetchUsage(ctx context.Context, accessToken, proxyURL string) (*ClaudeUsageResponse, error) +} + +// AccountUsageService 账号使用量查询服务 +type AccountUsageService struct { + accountRepo AccountRepository + usageLogRepo UsageLogRepository + usageFetcher ClaudeUsageFetcher + geminiQuotaService *GeminiQuotaService + antigravityQuotaFetcher *AntigravityQuotaFetcher + cache *UsageCache +} + +// NewAccountUsageService 创建AccountUsageService实例 +func NewAccountUsageService( + accountRepo AccountRepository, + usageLogRepo UsageLogRepository, + usageFetcher ClaudeUsageFetcher, + geminiQuotaService *GeminiQuotaService, + antigravityQuotaFetcher *AntigravityQuotaFetcher, + cache *UsageCache, +) *AccountUsageService { + return &AccountUsageService{ + accountRepo: accountRepo, + usageLogRepo: usageLogRepo, + usageFetcher: usageFetcher, + geminiQuotaService: geminiQuotaService, + antigravityQuotaFetcher: antigravityQuotaFetcher, + cache: cache, + } +} + +// GetUsage 获取账号使用量 +// OAuth账号: 调用Anthropic API获取真实数据(需要profile scope),API响应缓存10分钟,窗口统计缓存1分钟 +// Setup Token账号: 根据session_window推算5h窗口,7d数据不可用(没有profile scope) +// API Key账号: 不支持usage查询 +func (s *AccountUsageService) GetUsage(ctx context.Context, accountID int64) (*UsageInfo, error) { + account, err := s.accountRepo.GetByID(ctx, accountID) + if err != nil { + return nil, fmt.Errorf("get account failed: %w", err) + } + + if account.Platform == PlatformGemini { + return s.getGeminiUsage(ctx, account) + } + + // Antigravity 平台:使用 AntigravityQuotaFetcher 获取额度 + if account.Platform == PlatformAntigravity { + return s.getAntigravityUsage(ctx, account) + } + + // 只有oauth类型账号可以通过API获取usage(有profile scope) + if account.CanGetUsage() { + var apiResp *ClaudeUsageResponse + + // 1. 检查 API 缓存(10 分钟) + if cached, ok := s.cache.apiCache.Load(accountID); ok { + if cache, ok := cached.(*apiUsageCache); ok && time.Since(cache.timestamp) < apiCacheTTL { + apiResp = cache.response + } + } + + // 2. 如果没有缓存,从 API 获取 + if apiResp == nil { + apiResp, err = s.fetchOAuthUsageRaw(ctx, account) + if err != nil { + return nil, err + } + // 缓存 API 响应 + s.cache.apiCache.Store(accountID, &apiUsageCache{ + response: apiResp, + timestamp: time.Now(), + }) + } + + // 3. 构建 UsageInfo(每次都重新计算 RemainingSeconds) + now := time.Now() + usage := s.buildUsageInfo(apiResp, &now) + + // 4. 添加窗口统计(有独立缓存,1 分钟) + s.addWindowStats(ctx, account, usage) + + return usage, nil + } + + // Setup Token账号:根据session_window推算(没有profile scope,无法调用usage API) + if account.Type == AccountTypeSetupToken { + usage := s.estimateSetupTokenUsage(account) + // 添加窗口统计 + s.addWindowStats(ctx, account, usage) + return usage, nil + } + + // API Key账号不支持usage查询 + return nil, fmt.Errorf("account type %s does not support usage query", account.Type) +} + +func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Account) (*UsageInfo, error) { + now := time.Now() + usage := &UsageInfo{ + UpdatedAt: &now, + } + + if s.geminiQuotaService == nil || s.usageLogRepo == nil { + return usage, nil + } + + quota, ok := s.geminiQuotaService.QuotaForAccount(ctx, account) + if !ok { + return usage, nil + } + + dayStart := geminiDailyWindowStart(now) + stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil) + if err != nil { + return nil, fmt.Errorf("get gemini usage stats failed: %w", err) + } + + dayTotals := geminiAggregateUsage(stats) + dailyResetAt := geminiDailyResetTime(now) + + // Daily window (RPD) + if quota.SharedRPD > 0 { + totalReq := dayTotals.ProRequests + dayTotals.FlashRequests + totalTokens := dayTotals.ProTokens + dayTotals.FlashTokens + totalCost := dayTotals.ProCost + dayTotals.FlashCost + usage.GeminiSharedDaily = buildGeminiUsageProgress(totalReq, quota.SharedRPD, dailyResetAt, totalTokens, totalCost, now) + } else { + usage.GeminiProDaily = buildGeminiUsageProgress(dayTotals.ProRequests, quota.ProRPD, dailyResetAt, dayTotals.ProTokens, dayTotals.ProCost, now) + usage.GeminiFlashDaily = buildGeminiUsageProgress(dayTotals.FlashRequests, quota.FlashRPD, dailyResetAt, dayTotals.FlashTokens, dayTotals.FlashCost, now) + } + + // Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m) + minuteStart := now.Truncate(time.Minute) + minuteResetAt := minuteStart.Add(time.Minute) + minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil) + if err != nil { + return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err) + } + minuteTotals := geminiAggregateUsage(minuteStats) + + if quota.SharedRPM > 0 { + totalReq := minuteTotals.ProRequests + minuteTotals.FlashRequests + totalTokens := minuteTotals.ProTokens + minuteTotals.FlashTokens + totalCost := minuteTotals.ProCost + minuteTotals.FlashCost + usage.GeminiSharedMinute = buildGeminiUsageProgress(totalReq, quota.SharedRPM, minuteResetAt, totalTokens, totalCost, now) + } else { + usage.GeminiProMinute = buildGeminiUsageProgress(minuteTotals.ProRequests, quota.ProRPM, minuteResetAt, minuteTotals.ProTokens, minuteTotals.ProCost, now) + usage.GeminiFlashMinute = buildGeminiUsageProgress(minuteTotals.FlashRequests, quota.FlashRPM, minuteResetAt, minuteTotals.FlashTokens, minuteTotals.FlashCost, now) + } + + return usage, nil +} + +// getAntigravityUsage 获取 Antigravity 账户额度 +func (s *AccountUsageService) getAntigravityUsage(ctx context.Context, account *Account) (*UsageInfo, error) { + if s.antigravityQuotaFetcher == nil || !s.antigravityQuotaFetcher.CanFetch(account) { + now := time.Now() + return &UsageInfo{UpdatedAt: &now}, nil + } + + // 1. 检查缓存(10 分钟) + if cached, ok := s.cache.antigravityCache.Load(account.ID); ok { + if cache, ok := cached.(*antigravityUsageCache); ok && time.Since(cache.timestamp) < apiCacheTTL { + // 重新计算 RemainingSeconds + usage := cache.usageInfo + if usage.FiveHour != nil && usage.FiveHour.ResetsAt != nil { + usage.FiveHour.RemainingSeconds = int(time.Until(*usage.FiveHour.ResetsAt).Seconds()) + } + return usage, nil + } + } + + // 2. 获取代理 URL + proxyURL := s.antigravityQuotaFetcher.GetProxyURL(ctx, account) + + // 3. 调用 API 获取额度 + result, err := s.antigravityQuotaFetcher.FetchQuota(ctx, account, proxyURL) + if err != nil { + return nil, fmt.Errorf("fetch antigravity quota failed: %w", err) + } + + // 4. 缓存结果 + s.cache.antigravityCache.Store(account.ID, &antigravityUsageCache{ + usageInfo: result.UsageInfo, + timestamp: time.Now(), + }) + + return result.UsageInfo, nil +} + +// addWindowStats 为 usage 数据添加窗口期统计 +// 使用独立缓存(1 分钟),与 API 缓存分离 +func (s *AccountUsageService) addWindowStats(ctx context.Context, account *Account, usage *UsageInfo) { + // 修复:即使 FiveHour 为 nil,也要尝试获取统计数据 + // 因为 SevenDay/SevenDaySonnet 可能需要 + if usage.FiveHour == nil && usage.SevenDay == nil && usage.SevenDaySonnet == nil { + return + } + + // 检查窗口统计缓存(1 分钟) + var windowStats *WindowStats + if cached, ok := s.cache.windowStatsCache.Load(account.ID); ok { + if cache, ok := cached.(*windowStatsCache); ok && time.Since(cache.timestamp) < windowStatsCacheTTL { + windowStats = cache.stats + } + } + + // 如果没有缓存,从数据库查询 + if windowStats == nil { + var startTime time.Time + if account.SessionWindowStart != nil { + startTime = *account.SessionWindowStart + } else { + startTime = time.Now().Add(-5 * time.Hour) + } + + stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, startTime) + if err != nil { + log.Printf("Failed to get window stats for account %d: %v", account.ID, err) + return + } + + windowStats = &WindowStats{ + Requests: stats.Requests, + Tokens: stats.Tokens, + Cost: stats.Cost, + StandardCost: stats.StandardCost, + UserCost: stats.UserCost, + } + + // 缓存窗口统计(1 分钟) + s.cache.windowStatsCache.Store(account.ID, &windowStatsCache{ + stats: windowStats, + timestamp: time.Now(), + }) + } + + // 为 FiveHour 添加 WindowStats(5h 窗口统计) + if usage.FiveHour != nil { + usage.FiveHour.WindowStats = windowStats + } +} + +// GetTodayStats 获取账号今日统计 +func (s *AccountUsageService) GetTodayStats(ctx context.Context, accountID int64) (*WindowStats, error) { + stats, err := s.usageLogRepo.GetAccountTodayStats(ctx, accountID) + if err != nil { + return nil, fmt.Errorf("get today stats failed: %w", err) + } + + return &WindowStats{ + Requests: stats.Requests, + Tokens: stats.Tokens, + Cost: stats.Cost, + StandardCost: stats.StandardCost, + UserCost: stats.UserCost, + }, nil +} + +func (s *AccountUsageService) GetAccountUsageStats(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.AccountUsageStatsResponse, error) { + stats, err := s.usageLogRepo.GetAccountUsageStats(ctx, accountID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get account usage stats failed: %w", err) + } + return stats, nil +} + +// fetchOAuthUsageRaw 从 Anthropic API 获取原始响应(不构建 UsageInfo) +func (s *AccountUsageService) fetchOAuthUsageRaw(ctx context.Context, account *Account) (*ClaudeUsageResponse, error) { + accessToken := account.GetCredential("access_token") + if accessToken == "" { + return nil, fmt.Errorf("no access token available") + } + + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + return s.usageFetcher.FetchUsage(ctx, accessToken, proxyURL) +} + +// parseTime 尝试多种格式解析时间 +func parseTime(s string) (time.Time, error) { + formats := []string{ + time.RFC3339, + time.RFC3339Nano, + "2006-01-02T15:04:05Z", + "2006-01-02T15:04:05.000Z", + } + for _, format := range formats { + if t, err := time.Parse(format, s); err == nil { + return t, nil + } + } + return time.Time{}, fmt.Errorf("unable to parse time: %s", s) +} + +// buildUsageInfo 构建UsageInfo +func (s *AccountUsageService) buildUsageInfo(resp *ClaudeUsageResponse, updatedAt *time.Time) *UsageInfo { + info := &UsageInfo{ + UpdatedAt: updatedAt, + } + + // 5小时窗口 - 始终创建对象(即使 ResetsAt 为空) + info.FiveHour = &UsageProgress{ + Utilization: resp.FiveHour.Utilization, + } + if resp.FiveHour.ResetsAt != "" { + if fiveHourReset, err := parseTime(resp.FiveHour.ResetsAt); err == nil { + info.FiveHour.ResetsAt = &fiveHourReset + info.FiveHour.RemainingSeconds = int(time.Until(fiveHourReset).Seconds()) + } else { + log.Printf("Failed to parse FiveHour.ResetsAt: %s, error: %v", resp.FiveHour.ResetsAt, err) + } + } + + // 7天窗口 + if resp.SevenDay.ResetsAt != "" { + if sevenDayReset, err := parseTime(resp.SevenDay.ResetsAt); err == nil { + info.SevenDay = &UsageProgress{ + Utilization: resp.SevenDay.Utilization, + ResetsAt: &sevenDayReset, + RemainingSeconds: int(time.Until(sevenDayReset).Seconds()), + } + } else { + log.Printf("Failed to parse SevenDay.ResetsAt: %s, error: %v", resp.SevenDay.ResetsAt, err) + info.SevenDay = &UsageProgress{ + Utilization: resp.SevenDay.Utilization, + } + } + } + + // 7天Sonnet窗口 + if resp.SevenDaySonnet.ResetsAt != "" { + if sonnetReset, err := parseTime(resp.SevenDaySonnet.ResetsAt); err == nil { + info.SevenDaySonnet = &UsageProgress{ + Utilization: resp.SevenDaySonnet.Utilization, + ResetsAt: &sonnetReset, + RemainingSeconds: int(time.Until(sonnetReset).Seconds()), + } + } else { + log.Printf("Failed to parse SevenDaySonnet.ResetsAt: %s, error: %v", resp.SevenDaySonnet.ResetsAt, err) + info.SevenDaySonnet = &UsageProgress{ + Utilization: resp.SevenDaySonnet.Utilization, + } + } + } + + return info +} + +// estimateSetupTokenUsage 根据session_window推算Setup Token账号的使用量 +func (s *AccountUsageService) estimateSetupTokenUsage(account *Account) *UsageInfo { + info := &UsageInfo{} + + // 如果有session_window信息 + if account.SessionWindowEnd != nil { + remaining := int(time.Until(*account.SessionWindowEnd).Seconds()) + if remaining < 0 { + remaining = 0 + } + + // 根据状态估算使用率 (百分比形式,100 = 100%) + var utilization float64 + switch account.SessionWindowStatus { + case "rejected": + utilization = 100.0 + case "allowed_warning": + utilization = 80.0 + default: + utilization = 0.0 + } + + info.FiveHour = &UsageProgress{ + Utilization: utilization, + ResetsAt: account.SessionWindowEnd, + RemainingSeconds: remaining, + } + } else { + // 没有窗口信息,返回空数据 + info.FiveHour = &UsageProgress{ + Utilization: 0, + RemainingSeconds: 0, + } + } + + // Setup Token无法获取7d数据 + return info +} + +func buildGeminiUsageProgress(used, limit int64, resetAt time.Time, tokens int64, cost float64, now time.Time) *UsageProgress { + // limit <= 0 means "no local quota window" (unknown or unlimited). + if limit <= 0 { + return nil + } + utilization := (float64(used) / float64(limit)) * 100 + remainingSeconds := int(resetAt.Sub(now).Seconds()) + if remainingSeconds < 0 { + remainingSeconds = 0 + } + resetCopy := resetAt + return &UsageProgress{ + Utilization: utilization, + ResetsAt: &resetCopy, + RemainingSeconds: remainingSeconds, + UsedRequests: used, + LimitRequests: limit, + WindowStats: &WindowStats{ + Requests: used, + Tokens: tokens, + Cost: cost, + }, + } +} diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go new file mode 100644 index 00000000..1e32699c --- /dev/null +++ b/backend/internal/service/admin_service.go @@ -0,0 +1,1512 @@ +package service + +import ( + "context" + "errors" + "fmt" + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +// AdminService interface defines admin management operations +type AdminService interface { + // User management + ListUsers(ctx context.Context, page, pageSize int, filters UserListFilters) ([]User, int64, error) + GetUser(ctx context.Context, id int64) (*User, error) + CreateUser(ctx context.Context, input *CreateUserInput) (*User, error) + UpdateUser(ctx context.Context, id int64, input *UpdateUserInput) (*User, error) + DeleteUser(ctx context.Context, id int64) error + UpdateUserBalance(ctx context.Context, userID int64, balance float64, operation string, notes string) (*User, error) + GetUserAPIKeys(ctx context.Context, userID int64, page, pageSize int) ([]APIKey, int64, error) + GetUserUsageStats(ctx context.Context, userID int64, period string) (any, error) + + // Group management + ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]Group, int64, error) + GetAllGroups(ctx context.Context) ([]Group, error) + GetAllGroupsByPlatform(ctx context.Context, platform string) ([]Group, error) + GetGroup(ctx context.Context, id int64) (*Group, error) + CreateGroup(ctx context.Context, input *CreateGroupInput) (*Group, error) + UpdateGroup(ctx context.Context, id int64, input *UpdateGroupInput) (*Group, error) + DeleteGroup(ctx context.Context, id int64) error + GetGroupAPIKeys(ctx context.Context, groupID int64, page, pageSize int) ([]APIKey, int64, error) + + // Account management + ListAccounts(ctx context.Context, page, pageSize int, platform, accountType, status, search string) ([]Account, int64, error) + GetAccount(ctx context.Context, id int64) (*Account, error) + GetAccountsByIDs(ctx context.Context, ids []int64) ([]*Account, error) + CreateAccount(ctx context.Context, input *CreateAccountInput) (*Account, error) + UpdateAccount(ctx context.Context, id int64, input *UpdateAccountInput) (*Account, error) + DeleteAccount(ctx context.Context, id int64) error + RefreshAccountCredentials(ctx context.Context, id int64) (*Account, error) + ClearAccountError(ctx context.Context, id int64) (*Account, error) + SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*Account, error) + BulkUpdateAccounts(ctx context.Context, input *BulkUpdateAccountsInput) (*BulkUpdateAccountsResult, error) + + // Proxy management + ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]Proxy, int64, error) + ListProxiesWithAccountCount(ctx context.Context, page, pageSize int, protocol, status, search string) ([]ProxyWithAccountCount, int64, error) + GetAllProxies(ctx context.Context) ([]Proxy, error) + GetAllProxiesWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) + GetProxy(ctx context.Context, id int64) (*Proxy, error) + CreateProxy(ctx context.Context, input *CreateProxyInput) (*Proxy, error) + UpdateProxy(ctx context.Context, id int64, input *UpdateProxyInput) (*Proxy, error) + DeleteProxy(ctx context.Context, id int64) error + BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error) + GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) + CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) + TestProxy(ctx context.Context, id int64) (*ProxyTestResult, error) + + // Redeem code management + ListRedeemCodes(ctx context.Context, page, pageSize int, codeType, status, search string) ([]RedeemCode, int64, error) + GetRedeemCode(ctx context.Context, id int64) (*RedeemCode, error) + GenerateRedeemCodes(ctx context.Context, input *GenerateRedeemCodesInput) ([]RedeemCode, error) + DeleteRedeemCode(ctx context.Context, id int64) error + BatchDeleteRedeemCodes(ctx context.Context, ids []int64) (int64, error) + ExpireRedeemCode(ctx context.Context, id int64) (*RedeemCode, error) +} + +// CreateUserInput represents input for creating a new user via admin operations. +type CreateUserInput struct { + Email string + Password string + Username string + Notes string + Balance float64 + Concurrency int + AllowedGroups []int64 +} + +type UpdateUserInput struct { + Email string + Password string + Username *string + Notes *string + Balance *float64 // 使用指针区分"未提供"和"设置为0" + Concurrency *int // 使用指针区分"未提供"和"设置为0" + Status string + AllowedGroups *[]int64 // 使用指针区分"未提供"和"设置为空数组" +} + +type CreateGroupInput struct { + Name string + Description string + Platform string + RateMultiplier float64 + IsExclusive bool + SubscriptionType string // standard/subscription + DailyLimitUSD *float64 // 日限额 (USD) + WeeklyLimitUSD *float64 // 周限额 (USD) + MonthlyLimitUSD *float64 // 月限额 (USD) + // 图片生成计费配置(仅 antigravity 平台使用) + ImagePrice1K *float64 + ImagePrice2K *float64 + ImagePrice4K *float64 + ClaudeCodeOnly bool // 仅允许 Claude Code 客户端 + FallbackGroupID *int64 // 降级分组 ID +} + +type UpdateGroupInput struct { + Name string + Description string + Platform string + RateMultiplier *float64 // 使用指针以支持设置为0 + IsExclusive *bool + Status string + SubscriptionType string // standard/subscription + DailyLimitUSD *float64 // 日限额 (USD) + WeeklyLimitUSD *float64 // 周限额 (USD) + MonthlyLimitUSD *float64 // 月限额 (USD) + // 图片生成计费配置(仅 antigravity 平台使用) + ImagePrice1K *float64 + ImagePrice2K *float64 + ImagePrice4K *float64 + ClaudeCodeOnly *bool // 仅允许 Claude Code 客户端 + FallbackGroupID *int64 // 降级分组 ID +} + +type CreateAccountInput struct { + Name string + Notes *string + Platform string + Type string + Credentials map[string]any + Extra map[string]any + ProxyID *int64 + Concurrency int + Priority int + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) + GroupIDs []int64 + ExpiresAt *int64 + AutoPauseOnExpired *bool + // SkipMixedChannelCheck skips the mixed channel risk check when binding groups. + // This should only be set when the caller has explicitly confirmed the risk. + SkipMixedChannelCheck bool +} + +type UpdateAccountInput struct { + Name string + Notes *string + Type string // Account type: oauth, setup-token, apikey + Credentials map[string]any + Extra map[string]any + ProxyID *int64 + Concurrency *int // 使用指针区分"未提供"和"设置为0" + Priority *int // 使用指针区分"未提供"和"设置为0" + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) + Status string + GroupIDs *[]int64 + ExpiresAt *int64 + AutoPauseOnExpired *bool + SkipMixedChannelCheck bool // 跳过混合渠道检查(用户已确认风险) +} + +// BulkUpdateAccountsInput describes the payload for bulk updating accounts. +type BulkUpdateAccountsInput struct { + AccountIDs []int64 + Name string + ProxyID *int64 + Concurrency *int + Priority *int + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) + Status string + Schedulable *bool + GroupIDs *[]int64 + Credentials map[string]any + Extra map[string]any + // SkipMixedChannelCheck skips the mixed channel risk check when binding groups. + // This should only be set when the caller has explicitly confirmed the risk. + SkipMixedChannelCheck bool +} + +// BulkUpdateAccountResult captures the result for a single account update. +type BulkUpdateAccountResult struct { + AccountID int64 `json:"account_id"` + Success bool `json:"success"` + Error string `json:"error,omitempty"` +} + +// BulkUpdateAccountsResult is the aggregated response for bulk updates. +type BulkUpdateAccountsResult struct { + Success int `json:"success"` + Failed int `json:"failed"` + SuccessIDs []int64 `json:"success_ids"` + FailedIDs []int64 `json:"failed_ids"` + Results []BulkUpdateAccountResult `json:"results"` +} + +type CreateProxyInput struct { + Name string + Protocol string + Host string + Port int + Username string + Password string +} + +type UpdateProxyInput struct { + Name string + Protocol string + Host string + Port int + Username string + Password string + Status string +} + +type GenerateRedeemCodesInput struct { + Count int + Type string + Value float64 + GroupID *int64 // 订阅类型专用:关联的分组ID + ValidityDays int // 订阅类型专用:有效天数 +} + +type ProxyBatchDeleteResult struct { + DeletedIDs []int64 `json:"deleted_ids"` + Skipped []ProxyBatchDeleteSkipped `json:"skipped"` +} + +type ProxyBatchDeleteSkipped struct { + ID int64 `json:"id"` + Reason string `json:"reason"` +} + +// ProxyTestResult represents the result of testing a proxy +type ProxyTestResult struct { + Success bool `json:"success"` + Message string `json:"message"` + LatencyMs int64 `json:"latency_ms,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + City string `json:"city,omitempty"` + Region string `json:"region,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` +} + +// ProxyExitInfo represents proxy exit information from ip-api.com +type ProxyExitInfo struct { + IP string + City string + Region string + Country string + CountryCode string +} + +// ProxyExitInfoProber tests proxy connectivity and retrieves exit information +type ProxyExitInfoProber interface { + ProbeProxy(ctx context.Context, proxyURL string) (*ProxyExitInfo, int64, error) +} + +// adminServiceImpl implements AdminService +type adminServiceImpl struct { + userRepo UserRepository + groupRepo GroupRepository + accountRepo AccountRepository + proxyRepo ProxyRepository + apiKeyRepo APIKeyRepository + redeemCodeRepo RedeemCodeRepository + billingCacheService *BillingCacheService + proxyProber ProxyExitInfoProber + proxyLatencyCache ProxyLatencyCache + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewAdminService creates a new AdminService +func NewAdminService( + userRepo UserRepository, + groupRepo GroupRepository, + accountRepo AccountRepository, + proxyRepo ProxyRepository, + apiKeyRepo APIKeyRepository, + redeemCodeRepo RedeemCodeRepository, + billingCacheService *BillingCacheService, + proxyProber ProxyExitInfoProber, + proxyLatencyCache ProxyLatencyCache, + authCacheInvalidator APIKeyAuthCacheInvalidator, +) AdminService { + return &adminServiceImpl{ + userRepo: userRepo, + groupRepo: groupRepo, + accountRepo: accountRepo, + proxyRepo: proxyRepo, + apiKeyRepo: apiKeyRepo, + redeemCodeRepo: redeemCodeRepo, + billingCacheService: billingCacheService, + proxyProber: proxyProber, + proxyLatencyCache: proxyLatencyCache, + authCacheInvalidator: authCacheInvalidator, + } +} + +// User management implementations +func (s *adminServiceImpl) ListUsers(ctx context.Context, page, pageSize int, filters UserListFilters) ([]User, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + users, result, err := s.userRepo.ListWithFilters(ctx, params, filters) + if err != nil { + return nil, 0, err + } + return users, result.Total, nil +} + +func (s *adminServiceImpl) GetUser(ctx context.Context, id int64) (*User, error) { + return s.userRepo.GetByID(ctx, id) +} + +func (s *adminServiceImpl) CreateUser(ctx context.Context, input *CreateUserInput) (*User, error) { + user := &User{ + Email: input.Email, + Username: input.Username, + Notes: input.Notes, + Role: RoleUser, // Always create as regular user, never admin + Balance: input.Balance, + Concurrency: input.Concurrency, + Status: StatusActive, + AllowedGroups: input.AllowedGroups, + } + if err := user.SetPassword(input.Password); err != nil { + return nil, err + } + if err := s.userRepo.Create(ctx, user); err != nil { + return nil, err + } + return user, nil +} + +func (s *adminServiceImpl) UpdateUser(ctx context.Context, id int64, input *UpdateUserInput) (*User, error) { + user, err := s.userRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + // Protect admin users: cannot disable admin accounts + if user.Role == "admin" && input.Status == "disabled" { + return nil, errors.New("cannot disable admin user") + } + + oldConcurrency := user.Concurrency + oldStatus := user.Status + oldRole := user.Role + + if input.Email != "" { + user.Email = input.Email + } + if input.Password != "" { + if err := user.SetPassword(input.Password); err != nil { + return nil, err + } + } + + if input.Username != nil { + user.Username = *input.Username + } + if input.Notes != nil { + user.Notes = *input.Notes + } + + if input.Status != "" { + user.Status = input.Status + } + + if input.Concurrency != nil { + user.Concurrency = *input.Concurrency + } + + if input.AllowedGroups != nil { + user.AllowedGroups = *input.AllowedGroups + } + + if err := s.userRepo.Update(ctx, user); err != nil { + return nil, err + } + if s.authCacheInvalidator != nil { + if user.Concurrency != oldConcurrency || user.Status != oldStatus || user.Role != oldRole { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, user.ID) + } + } + + concurrencyDiff := user.Concurrency - oldConcurrency + if concurrencyDiff != 0 { + code, err := GenerateRedeemCode() + if err != nil { + log.Printf("failed to generate adjustment redeem code: %v", err) + return user, nil + } + adjustmentRecord := &RedeemCode{ + Code: code, + Type: AdjustmentTypeAdminConcurrency, + Value: float64(concurrencyDiff), + Status: StatusUsed, + UsedBy: &user.ID, + } + now := time.Now() + adjustmentRecord.UsedAt = &now + if err := s.redeemCodeRepo.Create(ctx, adjustmentRecord); err != nil { + log.Printf("failed to create concurrency adjustment redeem code: %v", err) + } + } + + return user, nil +} + +func (s *adminServiceImpl) DeleteUser(ctx context.Context, id int64) error { + // Protect admin users: cannot delete admin accounts + user, err := s.userRepo.GetByID(ctx, id) + if err != nil { + return err + } + if user.Role == "admin" { + return errors.New("cannot delete admin user") + } + if err := s.userRepo.Delete(ctx, id); err != nil { + log.Printf("delete user failed: user_id=%d err=%v", id, err) + return err + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, id) + } + return nil +} + +func (s *adminServiceImpl) UpdateUserBalance(ctx context.Context, userID int64, balance float64, operation string, notes string) (*User, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, err + } + + oldBalance := user.Balance + + switch operation { + case "set": + user.Balance = balance + case "add": + user.Balance += balance + case "subtract": + user.Balance -= balance + } + + if user.Balance < 0 { + return nil, fmt.Errorf("balance cannot be negative, current balance: %.2f, requested operation would result in: %.2f", oldBalance, user.Balance) + } + + if err := s.userRepo.Update(ctx, user); err != nil { + return nil, err + } + balanceDiff := user.Balance - oldBalance + if s.authCacheInvalidator != nil && balanceDiff != 0 { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + + if s.billingCacheService != nil { + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.billingCacheService.InvalidateUserBalance(cacheCtx, userID); err != nil { + log.Printf("invalidate user balance cache failed: user_id=%d err=%v", userID, err) + } + }() + } + + if balanceDiff != 0 { + code, err := GenerateRedeemCode() + if err != nil { + log.Printf("failed to generate adjustment redeem code: %v", err) + return user, nil + } + + adjustmentRecord := &RedeemCode{ + Code: code, + Type: AdjustmentTypeAdminBalance, + Value: balanceDiff, + Status: StatusUsed, + UsedBy: &user.ID, + Notes: notes, + } + now := time.Now() + adjustmentRecord.UsedAt = &now + + if err := s.redeemCodeRepo.Create(ctx, adjustmentRecord); err != nil { + log.Printf("failed to create balance adjustment redeem code: %v", err) + } + } + + return user, nil +} + +func (s *adminServiceImpl) GetUserAPIKeys(ctx context.Context, userID int64, page, pageSize int) ([]APIKey, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + keys, result, err := s.apiKeyRepo.ListByUserID(ctx, userID, params) + if err != nil { + return nil, 0, err + } + return keys, result.Total, nil +} + +func (s *adminServiceImpl) GetUserUsageStats(ctx context.Context, userID int64, period string) (any, error) { + // Return mock data for now + return map[string]any{ + "period": period, + "total_requests": 0, + "total_cost": 0.0, + "total_tokens": 0, + "avg_duration_ms": 0, + }, nil +} + +// Group management implementations +func (s *adminServiceImpl) ListGroups(ctx context.Context, page, pageSize int, platform, status, search string, isExclusive *bool) ([]Group, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + groups, result, err := s.groupRepo.ListWithFilters(ctx, params, platform, status, search, isExclusive) + if err != nil { + return nil, 0, err + } + return groups, result.Total, nil +} + +func (s *adminServiceImpl) GetAllGroups(ctx context.Context) ([]Group, error) { + return s.groupRepo.ListActive(ctx) +} + +func (s *adminServiceImpl) GetAllGroupsByPlatform(ctx context.Context, platform string) ([]Group, error) { + return s.groupRepo.ListActiveByPlatform(ctx, platform) +} + +func (s *adminServiceImpl) GetGroup(ctx context.Context, id int64) (*Group, error) { + return s.groupRepo.GetByID(ctx, id) +} + +func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupInput) (*Group, error) { + platform := input.Platform + if platform == "" { + platform = PlatformAnthropic + } + + subscriptionType := input.SubscriptionType + if subscriptionType == "" { + subscriptionType = SubscriptionTypeStandard + } + + // 限额字段:0 和 nil 都表示"无限制" + dailyLimit := normalizeLimit(input.DailyLimitUSD) + weeklyLimit := normalizeLimit(input.WeeklyLimitUSD) + monthlyLimit := normalizeLimit(input.MonthlyLimitUSD) + + // 图片价格:负数表示清除(使用默认价格),0 保留(表示免费) + imagePrice1K := normalizePrice(input.ImagePrice1K) + imagePrice2K := normalizePrice(input.ImagePrice2K) + imagePrice4K := normalizePrice(input.ImagePrice4K) + + // 校验降级分组 + if input.FallbackGroupID != nil { + if err := s.validateFallbackGroup(ctx, 0, *input.FallbackGroupID); err != nil { + return nil, err + } + } + + group := &Group{ + Name: input.Name, + Description: input.Description, + Platform: platform, + RateMultiplier: input.RateMultiplier, + IsExclusive: input.IsExclusive, + Status: StatusActive, + SubscriptionType: subscriptionType, + DailyLimitUSD: dailyLimit, + WeeklyLimitUSD: weeklyLimit, + MonthlyLimitUSD: monthlyLimit, + ImagePrice1K: imagePrice1K, + ImagePrice2K: imagePrice2K, + ImagePrice4K: imagePrice4K, + ClaudeCodeOnly: input.ClaudeCodeOnly, + FallbackGroupID: input.FallbackGroupID, + } + if err := s.groupRepo.Create(ctx, group); err != nil { + return nil, err + } + return group, nil +} + +// normalizeLimit 将 0 或负数转换为 nil(表示无限制) +func normalizeLimit(limit *float64) *float64 { + if limit == nil || *limit <= 0 { + return nil + } + return limit +} + +// normalizePrice 将负数转换为 nil(表示使用默认价格),0 保留(表示免费) +func normalizePrice(price *float64) *float64 { + if price == nil || *price < 0 { + return nil + } + return price +} + +// validateFallbackGroup 校验降级分组的有效性 +// currentGroupID: 当前分组 ID(新建时为 0) +// fallbackGroupID: 降级分组 ID +func (s *adminServiceImpl) validateFallbackGroup(ctx context.Context, currentGroupID, fallbackGroupID int64) error { + // 不能将自己设置为降级分组 + if currentGroupID > 0 && currentGroupID == fallbackGroupID { + return fmt.Errorf("cannot set self as fallback group") + } + + visited := map[int64]struct{}{} + nextID := fallbackGroupID + for { + if _, seen := visited[nextID]; seen { + return fmt.Errorf("fallback group cycle detected") + } + visited[nextID] = struct{}{} + if currentGroupID > 0 && nextID == currentGroupID { + return fmt.Errorf("fallback group cycle detected") + } + + // 检查降级分组是否存在 + fallbackGroup, err := s.groupRepo.GetByIDLite(ctx, nextID) + if err != nil { + return fmt.Errorf("fallback group not found: %w", err) + } + + // 降级分组不能启用 claude_code_only,否则会造成死循环 + if nextID == fallbackGroupID && fallbackGroup.ClaudeCodeOnly { + return fmt.Errorf("fallback group cannot have claude_code_only enabled") + } + + if fallbackGroup.FallbackGroupID == nil { + return nil + } + nextID = *fallbackGroup.FallbackGroupID + } +} + +func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *UpdateGroupInput) (*Group, error) { + group, err := s.groupRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Name != "" { + group.Name = input.Name + } + if input.Description != "" { + group.Description = input.Description + } + if input.Platform != "" { + group.Platform = input.Platform + } + if input.RateMultiplier != nil { + group.RateMultiplier = *input.RateMultiplier + } + if input.IsExclusive != nil { + group.IsExclusive = *input.IsExclusive + } + if input.Status != "" { + group.Status = input.Status + } + + // 订阅相关字段 + if input.SubscriptionType != "" { + group.SubscriptionType = input.SubscriptionType + } + // 限额字段:0 和 nil 都表示"无限制",正数表示具体限额 + if input.DailyLimitUSD != nil { + group.DailyLimitUSD = normalizeLimit(input.DailyLimitUSD) + } + if input.WeeklyLimitUSD != nil { + group.WeeklyLimitUSD = normalizeLimit(input.WeeklyLimitUSD) + } + if input.MonthlyLimitUSD != nil { + group.MonthlyLimitUSD = normalizeLimit(input.MonthlyLimitUSD) + } + // 图片生成计费配置:负数表示清除(使用默认价格) + if input.ImagePrice1K != nil { + group.ImagePrice1K = normalizePrice(input.ImagePrice1K) + } + if input.ImagePrice2K != nil { + group.ImagePrice2K = normalizePrice(input.ImagePrice2K) + } + if input.ImagePrice4K != nil { + group.ImagePrice4K = normalizePrice(input.ImagePrice4K) + } + + // Claude Code 客户端限制 + if input.ClaudeCodeOnly != nil { + group.ClaudeCodeOnly = *input.ClaudeCodeOnly + } + if input.FallbackGroupID != nil { + // 校验降级分组 + if *input.FallbackGroupID > 0 { + if err := s.validateFallbackGroup(ctx, id, *input.FallbackGroupID); err != nil { + return nil, err + } + group.FallbackGroupID = input.FallbackGroupID + } else { + // 传入 0 或负数表示清除降级分组 + group.FallbackGroupID = nil + } + } + + if err := s.groupRepo.Update(ctx, group); err != nil { + return nil, err + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } + return group, nil +} + +func (s *adminServiceImpl) DeleteGroup(ctx context.Context, id int64) error { + var groupKeys []string + if s.authCacheInvalidator != nil { + keys, err := s.apiKeyRepo.ListKeysByGroupID(ctx, id) + if err == nil { + groupKeys = keys + } + } + + affectedUserIDs, err := s.groupRepo.DeleteCascade(ctx, id) + if err != nil { + return err + } + + // 事务成功后,异步失效受影响用户的订阅缓存 + if len(affectedUserIDs) > 0 && s.billingCacheService != nil { + groupID := id + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + for _, userID := range affectedUserIDs { + if err := s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID); err != nil { + log.Printf("invalidate subscription cache failed: user_id=%d group_id=%d err=%v", userID, groupID, err) + } + } + }() + } + if s.authCacheInvalidator != nil { + for _, key := range groupKeys { + s.authCacheInvalidator.InvalidateAuthCacheByKey(ctx, key) + } + } + + return nil +} + +func (s *adminServiceImpl) GetGroupAPIKeys(ctx context.Context, groupID int64, page, pageSize int) ([]APIKey, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + keys, result, err := s.apiKeyRepo.ListByGroupID(ctx, groupID, params) + if err != nil { + return nil, 0, err + } + return keys, result.Total, nil +} + +// Account management implementations +func (s *adminServiceImpl) ListAccounts(ctx context.Context, page, pageSize int, platform, accountType, status, search string) ([]Account, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + accounts, result, err := s.accountRepo.ListWithFilters(ctx, params, platform, accountType, status, search) + if err != nil { + return nil, 0, err + } + return accounts, result.Total, nil +} + +func (s *adminServiceImpl) GetAccount(ctx context.Context, id int64) (*Account, error) { + return s.accountRepo.GetByID(ctx, id) +} + +func (s *adminServiceImpl) GetAccountsByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + if len(ids) == 0 { + return []*Account{}, nil + } + + accounts, err := s.accountRepo.GetByIDs(ctx, ids) + if err != nil { + return nil, fmt.Errorf("failed to get accounts by IDs: %w", err) + } + + return accounts, nil +} + +func (s *adminServiceImpl) CreateAccount(ctx context.Context, input *CreateAccountInput) (*Account, error) { + // 绑定分组 + groupIDs := input.GroupIDs + // 如果没有指定分组,自动绑定对应平台的默认分组 + if len(groupIDs) == 0 { + defaultGroupName := input.Platform + "-default" + groups, err := s.groupRepo.ListActiveByPlatform(ctx, input.Platform) + if err == nil { + for _, g := range groups { + if g.Name == defaultGroupName { + groupIDs = []int64{g.ID} + break + } + } + } + } + + // 检查混合渠道风险(除非用户已确认) + if len(groupIDs) > 0 && !input.SkipMixedChannelCheck { + if err := s.checkMixedChannelRisk(ctx, 0, input.Platform, groupIDs); err != nil { + return nil, err + } + } + + account := &Account{ + Name: input.Name, + Notes: normalizeAccountNotes(input.Notes), + Platform: input.Platform, + Type: input.Type, + Credentials: input.Credentials, + Extra: input.Extra, + ProxyID: input.ProxyID, + Concurrency: input.Concurrency, + Priority: input.Priority, + Status: StatusActive, + Schedulable: true, + } + if input.ExpiresAt != nil && *input.ExpiresAt > 0 { + expiresAt := time.Unix(*input.ExpiresAt, 0) + account.ExpiresAt = &expiresAt + } + if input.AutoPauseOnExpired != nil { + account.AutoPauseOnExpired = *input.AutoPauseOnExpired + } else { + account.AutoPauseOnExpired = true + } + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + account.RateMultiplier = input.RateMultiplier + } + if err := s.accountRepo.Create(ctx, account); err != nil { + return nil, err + } + + // 绑定分组 + if len(groupIDs) > 0 { + if err := s.accountRepo.BindGroups(ctx, account.ID, groupIDs); err != nil { + return nil, err + } + } + + return account, nil +} + +func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *UpdateAccountInput) (*Account, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Name != "" { + account.Name = input.Name + } + if input.Type != "" { + account.Type = input.Type + } + if input.Notes != nil { + account.Notes = normalizeAccountNotes(input.Notes) + } + if len(input.Credentials) > 0 { + account.Credentials = input.Credentials + } + if len(input.Extra) > 0 { + account.Extra = input.Extra + } + if input.ProxyID != nil { + // 0 表示清除代理(前端发送 0 而不是 null 来表达清除意图) + if *input.ProxyID == 0 { + account.ProxyID = nil + } else { + account.ProxyID = input.ProxyID + } + account.Proxy = nil // 清除关联对象,防止 GORM Save 时根据 Proxy.ID 覆盖 ProxyID + } + // 只在指针非 nil 时更新 Concurrency(支持设置为 0) + if input.Concurrency != nil { + account.Concurrency = *input.Concurrency + } + // 只在指针非 nil 时更新 Priority(支持设置为 0) + if input.Priority != nil { + account.Priority = *input.Priority + } + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + account.RateMultiplier = input.RateMultiplier + } + if input.Status != "" { + account.Status = input.Status + } + if input.ExpiresAt != nil { + if *input.ExpiresAt <= 0 { + account.ExpiresAt = nil + } else { + expiresAt := time.Unix(*input.ExpiresAt, 0) + account.ExpiresAt = &expiresAt + } + } + if input.AutoPauseOnExpired != nil { + account.AutoPauseOnExpired = *input.AutoPauseOnExpired + } + + // 先验证分组是否存在(在任何写操作之前) + if input.GroupIDs != nil { + for _, groupID := range *input.GroupIDs { + if _, err := s.groupRepo.GetByID(ctx, groupID); err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + } + + // 检查混合渠道风险(除非用户已确认) + if !input.SkipMixedChannelCheck { + if err := s.checkMixedChannelRisk(ctx, account.ID, account.Platform, *input.GroupIDs); err != nil { + return nil, err + } + } + } + + if err := s.accountRepo.Update(ctx, account); err != nil { + return nil, err + } + + // 绑定分组 + if input.GroupIDs != nil { + if err := s.accountRepo.BindGroups(ctx, account.ID, *input.GroupIDs); err != nil { + return nil, err + } + } + + // 重新查询以确保返回完整数据(包括正确的 Proxy 关联对象) + return s.accountRepo.GetByID(ctx, id) +} + +// BulkUpdateAccounts updates multiple accounts in one request. +// It merges credentials/extra keys instead of overwriting the whole object. +func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUpdateAccountsInput) (*BulkUpdateAccountsResult, error) { + result := &BulkUpdateAccountsResult{ + SuccessIDs: make([]int64, 0, len(input.AccountIDs)), + FailedIDs: make([]int64, 0, len(input.AccountIDs)), + Results: make([]BulkUpdateAccountResult, 0, len(input.AccountIDs)), + } + + if len(input.AccountIDs) == 0 { + return result, nil + } + + // Preload account platforms for mixed channel risk checks if group bindings are requested. + platformByID := map[int64]string{} + if input.GroupIDs != nil && !input.SkipMixedChannelCheck { + accounts, err := s.accountRepo.GetByIDs(ctx, input.AccountIDs) + if err != nil { + return nil, err + } + for _, account := range accounts { + if account != nil { + platformByID[account.ID] = account.Platform + } + } + } + + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + } + + // Prepare bulk updates for columns and JSONB fields. + repoUpdates := AccountBulkUpdate{ + Credentials: input.Credentials, + Extra: input.Extra, + } + if input.Name != "" { + repoUpdates.Name = &input.Name + } + if input.ProxyID != nil { + repoUpdates.ProxyID = input.ProxyID + } + if input.Concurrency != nil { + repoUpdates.Concurrency = input.Concurrency + } + if input.Priority != nil { + repoUpdates.Priority = input.Priority + } + if input.RateMultiplier != nil { + repoUpdates.RateMultiplier = input.RateMultiplier + } + if input.Status != "" { + repoUpdates.Status = &input.Status + } + if input.Schedulable != nil { + repoUpdates.Schedulable = input.Schedulable + } + + // Run bulk update for column/jsonb fields first. + if _, err := s.accountRepo.BulkUpdate(ctx, input.AccountIDs, repoUpdates); err != nil { + return nil, err + } + + // Handle group bindings per account (requires individual operations). + for _, accountID := range input.AccountIDs { + entry := BulkUpdateAccountResult{AccountID: accountID} + + if input.GroupIDs != nil { + // 检查混合渠道风险(除非用户已确认) + if !input.SkipMixedChannelCheck { + platform := platformByID[accountID] + if platform == "" { + account, err := s.accountRepo.GetByID(ctx, accountID) + if err != nil { + entry.Success = false + entry.Error = err.Error() + result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) + result.Results = append(result.Results, entry) + continue + } + platform = account.Platform + } + if err := s.checkMixedChannelRisk(ctx, accountID, platform, *input.GroupIDs); err != nil { + entry.Success = false + entry.Error = err.Error() + result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) + result.Results = append(result.Results, entry) + continue + } + } + + if err := s.accountRepo.BindGroups(ctx, accountID, *input.GroupIDs); err != nil { + entry.Success = false + entry.Error = err.Error() + result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) + result.Results = append(result.Results, entry) + continue + } + } + + entry.Success = true + result.Success++ + result.SuccessIDs = append(result.SuccessIDs, accountID) + result.Results = append(result.Results, entry) + } + + return result, nil +} + +func (s *adminServiceImpl) DeleteAccount(ctx context.Context, id int64) error { + return s.accountRepo.Delete(ctx, id) +} + +func (s *adminServiceImpl) RefreshAccountCredentials(ctx context.Context, id int64) (*Account, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + // TODO: Implement refresh logic + return account, nil +} + +func (s *adminServiceImpl) ClearAccountError(ctx context.Context, id int64) (*Account, error) { + account, err := s.accountRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + account.Status = StatusActive + account.ErrorMessage = "" + if err := s.accountRepo.Update(ctx, account); err != nil { + return nil, err + } + return account, nil +} + +func (s *adminServiceImpl) SetAccountSchedulable(ctx context.Context, id int64, schedulable bool) (*Account, error) { + if err := s.accountRepo.SetSchedulable(ctx, id, schedulable); err != nil { + return nil, err + } + return s.accountRepo.GetByID(ctx, id) +} + +// Proxy management implementations +func (s *adminServiceImpl) ListProxies(ctx context.Context, page, pageSize int, protocol, status, search string) ([]Proxy, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + proxies, result, err := s.proxyRepo.ListWithFilters(ctx, params, protocol, status, search) + if err != nil { + return nil, 0, err + } + return proxies, result.Total, nil +} + +func (s *adminServiceImpl) ListProxiesWithAccountCount(ctx context.Context, page, pageSize int, protocol, status, search string) ([]ProxyWithAccountCount, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + proxies, result, err := s.proxyRepo.ListWithFiltersAndAccountCount(ctx, params, protocol, status, search) + if err != nil { + return nil, 0, err + } + s.attachProxyLatency(ctx, proxies) + return proxies, result.Total, nil +} + +func (s *adminServiceImpl) GetAllProxies(ctx context.Context) ([]Proxy, error) { + return s.proxyRepo.ListActive(ctx) +} + +func (s *adminServiceImpl) GetAllProxiesWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) { + proxies, err := s.proxyRepo.ListActiveWithAccountCount(ctx) + if err != nil { + return nil, err + } + s.attachProxyLatency(ctx, proxies) + return proxies, nil +} + +func (s *adminServiceImpl) GetProxy(ctx context.Context, id int64) (*Proxy, error) { + return s.proxyRepo.GetByID(ctx, id) +} + +func (s *adminServiceImpl) CreateProxy(ctx context.Context, input *CreateProxyInput) (*Proxy, error) { + proxy := &Proxy{ + Name: input.Name, + Protocol: input.Protocol, + Host: input.Host, + Port: input.Port, + Username: input.Username, + Password: input.Password, + Status: StatusActive, + } + if err := s.proxyRepo.Create(ctx, proxy); err != nil { + return nil, err + } + // Probe latency asynchronously so creation isn't blocked by network timeout. + go s.probeProxyLatency(context.Background(), proxy) + return proxy, nil +} + +func (s *adminServiceImpl) UpdateProxy(ctx context.Context, id int64, input *UpdateProxyInput) (*Proxy, error) { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Name != "" { + proxy.Name = input.Name + } + if input.Protocol != "" { + proxy.Protocol = input.Protocol + } + if input.Host != "" { + proxy.Host = input.Host + } + if input.Port != 0 { + proxy.Port = input.Port + } + if input.Username != "" { + proxy.Username = input.Username + } + if input.Password != "" { + proxy.Password = input.Password + } + if input.Status != "" { + proxy.Status = input.Status + } + + if err := s.proxyRepo.Update(ctx, proxy); err != nil { + return nil, err + } + return proxy, nil +} + +func (s *adminServiceImpl) DeleteProxy(ctx context.Context, id int64) error { + count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id) + if err != nil { + return err + } + if count > 0 { + return ErrProxyInUse + } + return s.proxyRepo.Delete(ctx, id) +} + +func (s *adminServiceImpl) BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error) { + result := &ProxyBatchDeleteResult{} + if len(ids) == 0 { + return result, nil + } + + for _, id := range ids { + count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id) + if err != nil { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: err.Error(), + }) + continue + } + if count > 0 { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: ErrProxyInUse.Error(), + }) + continue + } + if err := s.proxyRepo.Delete(ctx, id); err != nil { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: err.Error(), + }) + continue + } + result.DeletedIDs = append(result.DeletedIDs, id) + } + + return result, nil +} + +func (s *adminServiceImpl) GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) { + return s.proxyRepo.ListAccountSummariesByProxyID(ctx, proxyID) +} + +func (s *adminServiceImpl) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) { + return s.proxyRepo.ExistsByHostPortAuth(ctx, host, port, username, password) +} + +// Redeem code management implementations +func (s *adminServiceImpl) ListRedeemCodes(ctx context.Context, page, pageSize int, codeType, status, search string) ([]RedeemCode, int64, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + codes, result, err := s.redeemCodeRepo.ListWithFilters(ctx, params, codeType, status, search) + if err != nil { + return nil, 0, err + } + return codes, result.Total, nil +} + +func (s *adminServiceImpl) GetRedeemCode(ctx context.Context, id int64) (*RedeemCode, error) { + return s.redeemCodeRepo.GetByID(ctx, id) +} + +func (s *adminServiceImpl) GenerateRedeemCodes(ctx context.Context, input *GenerateRedeemCodesInput) ([]RedeemCode, error) { + // 如果是订阅类型,验证必须有 GroupID + if input.Type == RedeemTypeSubscription { + if input.GroupID == nil { + return nil, errors.New("group_id is required for subscription type") + } + // 验证分组存在且为订阅类型 + group, err := s.groupRepo.GetByID(ctx, *input.GroupID) + if err != nil { + return nil, fmt.Errorf("group not found: %w", err) + } + if !group.IsSubscriptionType() { + return nil, errors.New("group must be subscription type") + } + } + + codes := make([]RedeemCode, 0, input.Count) + for i := 0; i < input.Count; i++ { + codeValue, err := GenerateRedeemCode() + if err != nil { + return nil, err + } + code := RedeemCode{ + Code: codeValue, + Type: input.Type, + Value: input.Value, + Status: StatusUnused, + } + // 订阅类型专用字段 + if input.Type == RedeemTypeSubscription { + code.GroupID = input.GroupID + code.ValidityDays = input.ValidityDays + if code.ValidityDays <= 0 { + code.ValidityDays = 30 // 默认30天 + } + } + if err := s.redeemCodeRepo.Create(ctx, &code); err != nil { + return nil, err + } + codes = append(codes, code) + } + return codes, nil +} + +func (s *adminServiceImpl) DeleteRedeemCode(ctx context.Context, id int64) error { + return s.redeemCodeRepo.Delete(ctx, id) +} + +func (s *adminServiceImpl) BatchDeleteRedeemCodes(ctx context.Context, ids []int64) (int64, error) { + var deleted int64 + for _, id := range ids { + if err := s.redeemCodeRepo.Delete(ctx, id); err == nil { + deleted++ + } + } + return deleted, nil +} + +func (s *adminServiceImpl) ExpireRedeemCode(ctx context.Context, id int64) (*RedeemCode, error) { + code, err := s.redeemCodeRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + code.Status = StatusExpired + if err := s.redeemCodeRepo.Update(ctx, code); err != nil { + return nil, err + } + return code, nil +} + +func (s *adminServiceImpl) TestProxy(ctx context.Context, id int64) (*ProxyTestResult, error) { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + proxyURL := proxy.URL() + exitInfo, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxyURL) + if err != nil { + s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{ + Success: false, + Message: err.Error(), + UpdatedAt: time.Now(), + }) + return &ProxyTestResult{ + Success: false, + Message: err.Error(), + }, nil + } + + latency := latencyMs + s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{ + Success: true, + LatencyMs: &latency, + Message: "Proxy is accessible", + IPAddress: exitInfo.IP, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, + Region: exitInfo.Region, + City: exitInfo.City, + UpdatedAt: time.Now(), + }) + return &ProxyTestResult{ + Success: true, + Message: "Proxy is accessible", + LatencyMs: latencyMs, + IPAddress: exitInfo.IP, + City: exitInfo.City, + Region: exitInfo.Region, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, + }, nil +} + +func (s *adminServiceImpl) probeProxyLatency(ctx context.Context, proxy *Proxy) { + if s.proxyProber == nil || proxy == nil { + return + } + exitInfo, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxy.URL()) + if err != nil { + s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{ + Success: false, + Message: err.Error(), + UpdatedAt: time.Now(), + }) + return + } + + latency := latencyMs + s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{ + Success: true, + LatencyMs: &latency, + Message: "Proxy is accessible", + IPAddress: exitInfo.IP, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, + Region: exitInfo.Region, + City: exitInfo.City, + UpdatedAt: time.Now(), + }) +} + +// checkMixedChannelRisk 检查分组中是否存在混合渠道(Antigravity + Anthropic) +// 如果存在混合,返回错误提示用户确认 +func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { + // 判断当前账号的渠道类型(基于 platform 字段,而不是 type 字段) + currentPlatform := getAccountPlatform(currentAccountPlatform) + if currentPlatform == "" { + // 不是 Antigravity 或 Anthropic,无需检查 + return nil + } + + // 检查每个分组中的其他账号 + for _, groupID := range groupIDs { + accounts, err := s.accountRepo.ListByGroup(ctx, groupID) + if err != nil { + return fmt.Errorf("get accounts in group %d: %w", groupID, err) + } + + // 检查是否存在不同渠道的账号 + for _, account := range accounts { + if currentAccountID > 0 && account.ID == currentAccountID { + continue // 跳过当前账号 + } + + otherPlatform := getAccountPlatform(account.Platform) + if otherPlatform == "" { + continue // 不是 Antigravity 或 Anthropic,跳过 + } + + // 检测混合渠道 + if currentPlatform != otherPlatform { + group, _ := s.groupRepo.GetByID(ctx, groupID) + groupName := fmt.Sprintf("Group %d", groupID) + if group != nil { + groupName = group.Name + } + + return &MixedChannelError{ + GroupID: groupID, + GroupName: groupName, + CurrentPlatform: currentPlatform, + OtherPlatform: otherPlatform, + } + } + } + } + + return nil +} + +func (s *adminServiceImpl) attachProxyLatency(ctx context.Context, proxies []ProxyWithAccountCount) { + if s.proxyLatencyCache == nil || len(proxies) == 0 { + return + } + + ids := make([]int64, 0, len(proxies)) + for i := range proxies { + ids = append(ids, proxies[i].ID) + } + + latencies, err := s.proxyLatencyCache.GetProxyLatencies(ctx, ids) + if err != nil { + log.Printf("Warning: load proxy latency cache failed: %v", err) + return + } + + for i := range proxies { + info := latencies[proxies[i].ID] + if info == nil { + continue + } + if info.Success { + proxies[i].LatencyStatus = "success" + proxies[i].LatencyMs = info.LatencyMs + } else { + proxies[i].LatencyStatus = "failed" + } + proxies[i].LatencyMessage = info.Message + proxies[i].IPAddress = info.IPAddress + proxies[i].Country = info.Country + proxies[i].CountryCode = info.CountryCode + proxies[i].Region = info.Region + proxies[i].City = info.City + } +} + +func (s *adminServiceImpl) saveProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) { + if s.proxyLatencyCache == nil || info == nil { + return + } + if err := s.proxyLatencyCache.SetProxyLatency(ctx, proxyID, info); err != nil { + log.Printf("Warning: store proxy latency cache failed: %v", err) + } +} + +// getAccountPlatform 根据账号 platform 判断混合渠道检查用的平台标识 +func getAccountPlatform(accountPlatform string) string { + switch strings.ToLower(strings.TrimSpace(accountPlatform)) { + case PlatformAntigravity: + return "Antigravity" + case PlatformAnthropic, "claude": + return "Anthropic" + default: + return "" + } +} + +// MixedChannelError 混合渠道错误 +type MixedChannelError struct { + GroupID int64 + GroupName string + CurrentPlatform string + OtherPlatform string +} + +func (e *MixedChannelError) Error() string { + return fmt.Sprintf("mixed_channel_warning: Group '%s' contains both %s and %s accounts. Using mixed channels in the same context may cause thinking block signature validation issues, which will fallback to non-thinking mode for historical messages.", + e.GroupName, e.CurrentPlatform, e.OtherPlatform) +} diff --git a/backend/internal/service/admin_service_bulk_update_test.go b/backend/internal/service/admin_service_bulk_update_test.go new file mode 100644 index 00000000..662b95fb --- /dev/null +++ b/backend/internal/service/admin_service_bulk_update_test.go @@ -0,0 +1,80 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +type accountRepoStubForBulkUpdate struct { + accountRepoStub + bulkUpdateErr error + bulkUpdateIDs []int64 + bindGroupErrByID map[int64]error +} + +func (s *accountRepoStubForBulkUpdate) BulkUpdate(_ context.Context, ids []int64, _ AccountBulkUpdate) (int64, error) { + s.bulkUpdateIDs = append([]int64{}, ids...) + if s.bulkUpdateErr != nil { + return 0, s.bulkUpdateErr + } + return int64(len(ids)), nil +} + +func (s *accountRepoStubForBulkUpdate) BindGroups(_ context.Context, accountID int64, _ []int64) error { + if err, ok := s.bindGroupErrByID[accountID]; ok { + return err + } + return nil +} + +// TestAdminService_BulkUpdateAccounts_AllSuccessIDs 验证批量更新成功时返回 success_ids/failed_ids。 +func TestAdminService_BulkUpdateAccounts_AllSuccessIDs(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{} + svc := &adminServiceImpl{accountRepo: repo} + + schedulable := true + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1, 2, 3}, + Schedulable: &schedulable, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.NoError(t, err) + require.Equal(t, 3, result.Success) + require.Equal(t, 0, result.Failed) + require.ElementsMatch(t, []int64{1, 2, 3}, result.SuccessIDs) + require.Empty(t, result.FailedIDs) + require.Len(t, result.Results, 3) +} + +// TestAdminService_BulkUpdateAccounts_PartialFailureIDs 验证部分失败时 success_ids/failed_ids 正确。 +func TestAdminService_BulkUpdateAccounts_PartialFailureIDs(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{ + bindGroupErrByID: map[int64]error{ + 2: errors.New("bind failed"), + }, + } + svc := &adminServiceImpl{accountRepo: repo} + + groupIDs := []int64{10} + schedulable := false + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1, 2, 3}, + GroupIDs: &groupIDs, + Schedulable: &schedulable, + SkipMixedChannelCheck: true, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.NoError(t, err) + require.Equal(t, 2, result.Success) + require.Equal(t, 1, result.Failed) + require.ElementsMatch(t, []int64{1, 3}, result.SuccessIDs) + require.ElementsMatch(t, []int64{2}, result.FailedIDs) + require.Len(t, result.Results, 3) +} diff --git a/backend/internal/service/admin_service_create_user_test.go b/backend/internal/service/admin_service_create_user_test.go new file mode 100644 index 00000000..a0fe4d87 --- /dev/null +++ b/backend/internal/service/admin_service_create_user_test.go @@ -0,0 +1,67 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAdminService_CreateUser_Success(t *testing.T) { + repo := &userRepoStub{nextID: 10} + svc := &adminServiceImpl{userRepo: repo} + + input := &CreateUserInput{ + Email: "user@test.com", + Password: "strong-pass", + Username: "tester", + Notes: "note", + Balance: 12.5, + Concurrency: 7, + AllowedGroups: []int64{3, 5}, + } + + user, err := svc.CreateUser(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, user) + require.Equal(t, int64(10), user.ID) + require.Equal(t, input.Email, user.Email) + require.Equal(t, input.Username, user.Username) + require.Equal(t, input.Notes, user.Notes) + require.Equal(t, input.Balance, user.Balance) + require.Equal(t, input.Concurrency, user.Concurrency) + require.Equal(t, input.AllowedGroups, user.AllowedGroups) + require.Equal(t, RoleUser, user.Role) + require.Equal(t, StatusActive, user.Status) + require.True(t, user.CheckPassword(input.Password)) + require.Len(t, repo.created, 1) + require.Equal(t, user, repo.created[0]) +} + +func TestAdminService_CreateUser_EmailExists(t *testing.T) { + repo := &userRepoStub{createErr: ErrEmailExists} + svc := &adminServiceImpl{userRepo: repo} + + _, err := svc.CreateUser(context.Background(), &CreateUserInput{ + Email: "dup@test.com", + Password: "password", + }) + require.ErrorIs(t, err, ErrEmailExists) + require.Empty(t, repo.created) +} + +func TestAdminService_CreateUser_CreateError(t *testing.T) { + createErr := errors.New("db down") + repo := &userRepoStub{createErr: createErr} + svc := &adminServiceImpl{userRepo: repo} + + _, err := svc.CreateUser(context.Background(), &CreateUserInput{ + Email: "user@test.com", + Password: "password", + }) + require.ErrorIs(t, err, createErr) + require.Empty(t, repo.created) +} diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go new file mode 100644 index 00000000..afa433af --- /dev/null +++ b/backend/internal/service/admin_service_delete_test.go @@ -0,0 +1,489 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +type userRepoStub struct { + user *User + getErr error + createErr error + deleteErr error + exists bool + existsErr error + nextID int64 + created []*User + deletedIDs []int64 +} + +func (s *userRepoStub) Create(ctx context.Context, user *User) error { + if s.createErr != nil { + return s.createErr + } + if s.nextID != 0 && user.ID == 0 { + user.ID = s.nextID + } + s.created = append(s.created, user) + return nil +} + +func (s *userRepoStub) GetByID(ctx context.Context, id int64) (*User, error) { + if s.getErr != nil { + return nil, s.getErr + } + if s.user == nil { + return nil, ErrUserNotFound + } + return s.user, nil +} + +func (s *userRepoStub) GetByEmail(ctx context.Context, email string) (*User, error) { + panic("unexpected GetByEmail call") +} + +func (s *userRepoStub) GetFirstAdmin(ctx context.Context) (*User, error) { + panic("unexpected GetFirstAdmin call") +} + +func (s *userRepoStub) Update(ctx context.Context, user *User) error { + panic("unexpected Update call") +} + +func (s *userRepoStub) Delete(ctx context.Context, id int64) error { + s.deletedIDs = append(s.deletedIDs, id) + return s.deleteErr +} + +func (s *userRepoStub) List(ctx context.Context, params pagination.PaginationParams) ([]User, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *userRepoStub) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters UserListFilters) ([]User, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *userRepoStub) UpdateBalance(ctx context.Context, id int64, amount float64) error { + panic("unexpected UpdateBalance call") +} + +func (s *userRepoStub) DeductBalance(ctx context.Context, id int64, amount float64) error { + panic("unexpected DeductBalance call") +} + +func (s *userRepoStub) UpdateConcurrency(ctx context.Context, id int64, amount int) error { + panic("unexpected UpdateConcurrency call") +} + +func (s *userRepoStub) ExistsByEmail(ctx context.Context, email string) (bool, error) { + if s.existsErr != nil { + return false, s.existsErr + } + return s.exists, nil +} + +func (s *userRepoStub) RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected RemoveGroupFromAllowedGroups call") +} + +type groupRepoStub struct { + affectedUserIDs []int64 + deleteErr error + deleteCalls []int64 +} + +func (s *groupRepoStub) Create(ctx context.Context, group *Group) error { + panic("unexpected Create call") +} + +func (s *groupRepoStub) GetByID(ctx context.Context, id int64) (*Group, error) { + panic("unexpected GetByID call") +} + +func (s *groupRepoStub) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + panic("unexpected GetByIDLite call") +} + +func (s *groupRepoStub) Update(ctx context.Context, group *Group) error { + panic("unexpected Update call") +} + +func (s *groupRepoStub) Delete(ctx context.Context, id int64) error { + panic("unexpected Delete call") +} + +func (s *groupRepoStub) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + s.deleteCalls = append(s.deleteCalls, id) + return s.affectedUserIDs, s.deleteErr +} + +func (s *groupRepoStub) List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *groupRepoStub) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *groupRepoStub) ListActive(ctx context.Context) ([]Group, error) { + panic("unexpected ListActive call") +} + +func (s *groupRepoStub) ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error) { + panic("unexpected ListActiveByPlatform call") +} + +func (s *groupRepoStub) ExistsByName(ctx context.Context, name string) (bool, error) { + panic("unexpected ExistsByName call") +} + +func (s *groupRepoStub) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected GetAccountCount call") +} + +func (s *groupRepoStub) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected DeleteAccountGroupsByGroupID call") +} + +type proxyRepoStub struct { + deleteErr error + countErr error + accountCount int64 + deletedIDs []int64 +} + +func (s *proxyRepoStub) Create(ctx context.Context, proxy *Proxy) error { + panic("unexpected Create call") +} + +func (s *proxyRepoStub) GetByID(ctx context.Context, id int64) (*Proxy, error) { + panic("unexpected GetByID call") +} + +func (s *proxyRepoStub) Update(ctx context.Context, proxy *Proxy) error { + panic("unexpected Update call") +} + +func (s *proxyRepoStub) Delete(ctx context.Context, id int64) error { + s.deletedIDs = append(s.deletedIDs, id) + return s.deleteErr +} + +func (s *proxyRepoStub) List(ctx context.Context, params pagination.PaginationParams) ([]Proxy, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *proxyRepoStub) ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]Proxy, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *proxyRepoStub) ListActive(ctx context.Context) ([]Proxy, error) { + panic("unexpected ListActive call") +} + +func (s *proxyRepoStub) ListActiveWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) { + panic("unexpected ListActiveWithAccountCount call") +} + +func (s *proxyRepoStub) ListWithFiltersAndAccountCount(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]ProxyWithAccountCount, *pagination.PaginationResult, error) { + panic("unexpected ListWithFiltersAndAccountCount call") +} + +func (s *proxyRepoStub) ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) { + panic("unexpected ExistsByHostPortAuth call") +} + +func (s *proxyRepoStub) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { + if s.countErr != nil { + return 0, s.countErr + } + return s.accountCount, nil +} + +func (s *proxyRepoStub) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) { + panic("unexpected ListAccountSummariesByProxyID call") +} + +type redeemRepoStub struct { + deleteErrByID map[int64]error + deletedIDs []int64 +} + +func (s *redeemRepoStub) Create(ctx context.Context, code *RedeemCode) error { + panic("unexpected Create call") +} + +func (s *redeemRepoStub) CreateBatch(ctx context.Context, codes []RedeemCode) error { + panic("unexpected CreateBatch call") +} + +func (s *redeemRepoStub) GetByID(ctx context.Context, id int64) (*RedeemCode, error) { + panic("unexpected GetByID call") +} + +func (s *redeemRepoStub) GetByCode(ctx context.Context, code string) (*RedeemCode, error) { + panic("unexpected GetByCode call") +} + +func (s *redeemRepoStub) Update(ctx context.Context, code *RedeemCode) error { + panic("unexpected Update call") +} + +func (s *redeemRepoStub) Delete(ctx context.Context, id int64) error { + s.deletedIDs = append(s.deletedIDs, id) + if s.deleteErrByID != nil { + if err, ok := s.deleteErrByID[id]; ok { + return err + } + } + return nil +} + +func (s *redeemRepoStub) Use(ctx context.Context, id, userID int64) error { + panic("unexpected Use call") +} + +func (s *redeemRepoStub) List(ctx context.Context, params pagination.PaginationParams) ([]RedeemCode, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *redeemRepoStub) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]RedeemCode, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *redeemRepoStub) ListByUser(ctx context.Context, userID int64, limit int) ([]RedeemCode, error) { + panic("unexpected ListByUser call") +} + +type subscriptionInvalidateCall struct { + userID int64 + groupID int64 +} + +type billingCacheStub struct { + invalidations chan subscriptionInvalidateCall +} + +func newBillingCacheStub(buffer int) *billingCacheStub { + return &billingCacheStub{invalidations: make(chan subscriptionInvalidateCall, buffer)} +} + +func (s *billingCacheStub) GetUserBalance(ctx context.Context, userID int64) (float64, error) { + panic("unexpected GetUserBalance call") +} + +func (s *billingCacheStub) SetUserBalance(ctx context.Context, userID int64, balance float64) error { + panic("unexpected SetUserBalance call") +} + +func (s *billingCacheStub) DeductUserBalance(ctx context.Context, userID int64, amount float64) error { + panic("unexpected DeductUserBalance call") +} + +func (s *billingCacheStub) InvalidateUserBalance(ctx context.Context, userID int64) error { + panic("unexpected InvalidateUserBalance call") +} + +func (s *billingCacheStub) GetSubscriptionCache(ctx context.Context, userID, groupID int64) (*SubscriptionCacheData, error) { + panic("unexpected GetSubscriptionCache call") +} + +func (s *billingCacheStub) SetSubscriptionCache(ctx context.Context, userID, groupID int64, data *SubscriptionCacheData) error { + panic("unexpected SetSubscriptionCache call") +} + +func (s *billingCacheStub) UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, cost float64) error { + panic("unexpected UpdateSubscriptionUsage call") +} + +func (s *billingCacheStub) InvalidateSubscriptionCache(ctx context.Context, userID, groupID int64) error { + s.invalidations <- subscriptionInvalidateCall{userID: userID, groupID: groupID} + return nil +} + +func waitForInvalidations(t *testing.T, ch <-chan subscriptionInvalidateCall, expected int) []subscriptionInvalidateCall { + t.Helper() + calls := make([]subscriptionInvalidateCall, 0, expected) + timeout := time.After(2 * time.Second) + for len(calls) < expected { + select { + case call := <-ch: + calls = append(calls, call) + case <-timeout: + t.Fatalf("timeout waiting for %d invalidations, got %d", expected, len(calls)) + } + } + return calls +} + +func TestAdminService_DeleteUser_Success(t *testing.T) { + repo := &userRepoStub{user: &User{ID: 7, Role: RoleUser}} + svc := &adminServiceImpl{userRepo: repo} + + err := svc.DeleteUser(context.Background(), 7) + require.NoError(t, err) + require.Equal(t, []int64{7}, repo.deletedIDs) +} + +func TestAdminService_DeleteUser_NotFound(t *testing.T) { + repo := &userRepoStub{getErr: ErrUserNotFound} + svc := &adminServiceImpl{userRepo: repo} + + err := svc.DeleteUser(context.Background(), 404) + require.ErrorIs(t, err, ErrUserNotFound) + require.Empty(t, repo.deletedIDs) +} + +func TestAdminService_DeleteUser_AdminGuard(t *testing.T) { + repo := &userRepoStub{user: &User{ID: 1, Role: RoleAdmin}} + svc := &adminServiceImpl{userRepo: repo} + + err := svc.DeleteUser(context.Background(), 1) + require.Error(t, err) + require.ErrorContains(t, err, "cannot delete admin user") + require.Empty(t, repo.deletedIDs) +} + +func TestAdminService_DeleteUser_DeleteError(t *testing.T) { + deleteErr := errors.New("delete failed") + repo := &userRepoStub{ + user: &User{ID: 9, Role: RoleUser}, + deleteErr: deleteErr, + } + svc := &adminServiceImpl{userRepo: repo} + + err := svc.DeleteUser(context.Background(), 9) + require.ErrorIs(t, err, deleteErr) + require.Equal(t, []int64{9}, repo.deletedIDs) +} + +func TestAdminService_DeleteGroup_Success_WithCacheInvalidation(t *testing.T) { + cache := newBillingCacheStub(2) + repo := &groupRepoStub{affectedUserIDs: []int64{11, 12}} + svc := &adminServiceImpl{ + groupRepo: repo, + billingCacheService: &BillingCacheService{cache: cache}, + } + + err := svc.DeleteGroup(context.Background(), 5) + require.NoError(t, err) + require.Equal(t, []int64{5}, repo.deleteCalls) + + calls := waitForInvalidations(t, cache.invalidations, 2) + require.ElementsMatch(t, []subscriptionInvalidateCall{ + {userID: 11, groupID: 5}, + {userID: 12, groupID: 5}, + }, calls) +} + +func TestAdminService_DeleteGroup_NotFound(t *testing.T) { + repo := &groupRepoStub{deleteErr: ErrGroupNotFound} + svc := &adminServiceImpl{groupRepo: repo} + + err := svc.DeleteGroup(context.Background(), 99) + require.ErrorIs(t, err, ErrGroupNotFound) +} + +func TestAdminService_DeleteGroup_Error(t *testing.T) { + deleteErr := errors.New("delete failed") + repo := &groupRepoStub{deleteErr: deleteErr} + svc := &adminServiceImpl{groupRepo: repo} + + err := svc.DeleteGroup(context.Background(), 42) + require.ErrorIs(t, err, deleteErr) +} + +func TestAdminService_DeleteProxy_Success(t *testing.T) { + repo := &proxyRepoStub{} + svc := &adminServiceImpl{proxyRepo: repo} + + err := svc.DeleteProxy(context.Background(), 7) + require.NoError(t, err) + require.Equal(t, []int64{7}, repo.deletedIDs) +} + +func TestAdminService_DeleteProxy_Idempotent(t *testing.T) { + repo := &proxyRepoStub{} + svc := &adminServiceImpl{proxyRepo: repo} + + err := svc.DeleteProxy(context.Background(), 404) + require.NoError(t, err) + require.Equal(t, []int64{404}, repo.deletedIDs) +} + +func TestAdminService_DeleteProxy_InUse(t *testing.T) { + repo := &proxyRepoStub{accountCount: 2} + svc := &adminServiceImpl{proxyRepo: repo} + + err := svc.DeleteProxy(context.Background(), 77) + require.ErrorIs(t, err, ErrProxyInUse) + require.Empty(t, repo.deletedIDs) +} + +func TestAdminService_DeleteProxy_Error(t *testing.T) { + deleteErr := errors.New("delete failed") + repo := &proxyRepoStub{deleteErr: deleteErr} + svc := &adminServiceImpl{proxyRepo: repo} + + err := svc.DeleteProxy(context.Background(), 33) + require.ErrorIs(t, err, deleteErr) +} + +func TestAdminService_DeleteRedeemCode_Success(t *testing.T) { + repo := &redeemRepoStub{} + svc := &adminServiceImpl{redeemCodeRepo: repo} + + err := svc.DeleteRedeemCode(context.Background(), 10) + require.NoError(t, err) + require.Equal(t, []int64{10}, repo.deletedIDs) +} + +func TestAdminService_DeleteRedeemCode_Idempotent(t *testing.T) { + repo := &redeemRepoStub{} + svc := &adminServiceImpl{redeemCodeRepo: repo} + + err := svc.DeleteRedeemCode(context.Background(), 999) + require.NoError(t, err) + require.Equal(t, []int64{999}, repo.deletedIDs) +} + +func TestAdminService_DeleteRedeemCode_Error(t *testing.T) { + deleteErr := errors.New("delete failed") + repo := &redeemRepoStub{deleteErrByID: map[int64]error{1: deleteErr}} + svc := &adminServiceImpl{redeemCodeRepo: repo} + + err := svc.DeleteRedeemCode(context.Background(), 1) + require.ErrorIs(t, err, deleteErr) + require.Equal(t, []int64{1}, repo.deletedIDs) +} + +func TestAdminService_BatchDeleteRedeemCodes_Success(t *testing.T) { + repo := &redeemRepoStub{} + svc := &adminServiceImpl{redeemCodeRepo: repo} + + deleted, err := svc.BatchDeleteRedeemCodes(context.Background(), []int64{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, int64(3), deleted) + require.Equal(t, []int64{1, 2, 3}, repo.deletedIDs) +} + +func TestAdminService_BatchDeleteRedeemCodes_PartialFailures(t *testing.T) { + repo := &redeemRepoStub{ + deleteErrByID: map[int64]error{ + 2: errors.New("db error"), + }, + } + svc := &adminServiceImpl{redeemCodeRepo: repo} + + deleted, err := svc.BatchDeleteRedeemCodes(context.Background(), []int64{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, int64(2), deleted) + require.Equal(t, []int64{1, 2, 3}, repo.deletedIDs) +} diff --git a/backend/internal/service/admin_service_group_test.go b/backend/internal/service/admin_service_group_test.go new file mode 100644 index 00000000..e0574e2e --- /dev/null +++ b/backend/internal/service/admin_service_group_test.go @@ -0,0 +1,380 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +// groupRepoStubForAdmin 用于测试 AdminService 的 GroupRepository Stub +type groupRepoStubForAdmin struct { + created *Group // 记录 Create 调用的参数 + updated *Group // 记录 Update 调用的参数 + getByID *Group // GetByID 返回值 + getErr error // GetByID 返回的错误 + + listWithFiltersCalls int + listWithFiltersParams pagination.PaginationParams + listWithFiltersPlatform string + listWithFiltersStatus string + listWithFiltersSearch string + listWithFiltersIsExclusive *bool + listWithFiltersGroups []Group + listWithFiltersResult *pagination.PaginationResult + listWithFiltersErr error +} + +func (s *groupRepoStubForAdmin) Create(_ context.Context, g *Group) error { + s.created = g + return nil +} + +func (s *groupRepoStubForAdmin) Update(_ context.Context, g *Group) error { + s.updated = g + return nil +} + +func (s *groupRepoStubForAdmin) GetByID(_ context.Context, _ int64) (*Group, error) { + if s.getErr != nil { + return nil, s.getErr + } + return s.getByID, nil +} + +func (s *groupRepoStubForAdmin) GetByIDLite(_ context.Context, _ int64) (*Group, error) { + if s.getErr != nil { + return nil, s.getErr + } + return s.getByID, nil +} + +func (s *groupRepoStubForAdmin) Delete(_ context.Context, _ int64) error { + panic("unexpected Delete call") +} + +func (s *groupRepoStubForAdmin) DeleteCascade(_ context.Context, _ int64) ([]int64, error) { + panic("unexpected DeleteCascade call") +} + +func (s *groupRepoStubForAdmin) List(_ context.Context, _ pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *groupRepoStubForAdmin) ListWithFilters(_ context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) { + s.listWithFiltersCalls++ + s.listWithFiltersParams = params + s.listWithFiltersPlatform = platform + s.listWithFiltersStatus = status + s.listWithFiltersSearch = search + s.listWithFiltersIsExclusive = isExclusive + + if s.listWithFiltersErr != nil { + return nil, nil, s.listWithFiltersErr + } + + result := s.listWithFiltersResult + if result == nil { + result = &pagination.PaginationResult{ + Total: int64(len(s.listWithFiltersGroups)), + Page: params.Page, + PageSize: params.PageSize, + } + } + + return s.listWithFiltersGroups, result, nil +} + +func (s *groupRepoStubForAdmin) ListActive(_ context.Context) ([]Group, error) { + panic("unexpected ListActive call") +} + +func (s *groupRepoStubForAdmin) ListActiveByPlatform(_ context.Context, _ string) ([]Group, error) { + panic("unexpected ListActiveByPlatform call") +} + +func (s *groupRepoStubForAdmin) ExistsByName(_ context.Context, _ string) (bool, error) { + panic("unexpected ExistsByName call") +} + +func (s *groupRepoStubForAdmin) GetAccountCount(_ context.Context, _ int64) (int64, error) { + panic("unexpected GetAccountCount call") +} + +func (s *groupRepoStubForAdmin) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { + panic("unexpected DeleteAccountGroupsByGroupID call") +} + +// TestAdminService_CreateGroup_WithImagePricing 测试创建分组时 ImagePrice 字段正确传递 +func TestAdminService_CreateGroup_WithImagePricing(t *testing.T) { + repo := &groupRepoStubForAdmin{} + svc := &adminServiceImpl{groupRepo: repo} + + price1K := 0.10 + price2K := 0.15 + price4K := 0.30 + + input := &CreateGroupInput{ + Name: "test-group", + Description: "Test group", + Platform: PlatformAntigravity, + RateMultiplier: 1.0, + ImagePrice1K: &price1K, + ImagePrice2K: &price2K, + ImagePrice4K: &price4K, + } + + group, err := svc.CreateGroup(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, group) + + // 验证 repo 收到了正确的字段 + require.NotNil(t, repo.created) + require.NotNil(t, repo.created.ImagePrice1K) + require.NotNil(t, repo.created.ImagePrice2K) + require.NotNil(t, repo.created.ImagePrice4K) + require.InDelta(t, 0.10, *repo.created.ImagePrice1K, 0.0001) + require.InDelta(t, 0.15, *repo.created.ImagePrice2K, 0.0001) + require.InDelta(t, 0.30, *repo.created.ImagePrice4K, 0.0001) +} + +// TestAdminService_CreateGroup_NilImagePricing 测试 ImagePrice 为 nil 时正常创建 +func TestAdminService_CreateGroup_NilImagePricing(t *testing.T) { + repo := &groupRepoStubForAdmin{} + svc := &adminServiceImpl{groupRepo: repo} + + input := &CreateGroupInput{ + Name: "test-group", + Description: "Test group", + Platform: PlatformAntigravity, + RateMultiplier: 1.0, + // ImagePrice 字段全部为 nil + } + + group, err := svc.CreateGroup(context.Background(), input) + require.NoError(t, err) + require.NotNil(t, group) + + // 验证 ImagePrice 字段为 nil + require.NotNil(t, repo.created) + require.Nil(t, repo.created.ImagePrice1K) + require.Nil(t, repo.created.ImagePrice2K) + require.Nil(t, repo.created.ImagePrice4K) +} + +// TestAdminService_UpdateGroup_WithImagePricing 测试更新分组时 ImagePrice 字段正确更新 +func TestAdminService_UpdateGroup_WithImagePricing(t *testing.T) { + existingGroup := &Group{ + ID: 1, + Name: "existing-group", + Platform: PlatformAntigravity, + Status: StatusActive, + } + repo := &groupRepoStubForAdmin{getByID: existingGroup} + svc := &adminServiceImpl{groupRepo: repo} + + price1K := 0.12 + price2K := 0.18 + price4K := 0.36 + + input := &UpdateGroupInput{ + ImagePrice1K: &price1K, + ImagePrice2K: &price2K, + ImagePrice4K: &price4K, + } + + group, err := svc.UpdateGroup(context.Background(), 1, input) + require.NoError(t, err) + require.NotNil(t, group) + + // 验证 repo 收到了更新后的字段 + require.NotNil(t, repo.updated) + require.NotNil(t, repo.updated.ImagePrice1K) + require.NotNil(t, repo.updated.ImagePrice2K) + require.NotNil(t, repo.updated.ImagePrice4K) + require.InDelta(t, 0.12, *repo.updated.ImagePrice1K, 0.0001) + require.InDelta(t, 0.18, *repo.updated.ImagePrice2K, 0.0001) + require.InDelta(t, 0.36, *repo.updated.ImagePrice4K, 0.0001) +} + +// TestAdminService_UpdateGroup_PartialImagePricing 测试仅更新部分 ImagePrice 字段 +func TestAdminService_UpdateGroup_PartialImagePricing(t *testing.T) { + oldPrice2K := 0.15 + existingGroup := &Group{ + ID: 1, + Name: "existing-group", + Platform: PlatformAntigravity, + Status: StatusActive, + ImagePrice2K: &oldPrice2K, // 已有 2K 价格 + } + repo := &groupRepoStubForAdmin{getByID: existingGroup} + svc := &adminServiceImpl{groupRepo: repo} + + // 只更新 1K 价格 + price1K := 0.10 + input := &UpdateGroupInput{ + ImagePrice1K: &price1K, + // ImagePrice2K 和 ImagePrice4K 为 nil,不更新 + } + + group, err := svc.UpdateGroup(context.Background(), 1, input) + require.NoError(t, err) + require.NotNil(t, group) + + // 验证:1K 被更新,2K 保持原值,4K 仍为 nil + require.NotNil(t, repo.updated) + require.NotNil(t, repo.updated.ImagePrice1K) + require.InDelta(t, 0.10, *repo.updated.ImagePrice1K, 0.0001) + require.NotNil(t, repo.updated.ImagePrice2K) + require.InDelta(t, 0.15, *repo.updated.ImagePrice2K, 0.0001) // 原值保持 + require.Nil(t, repo.updated.ImagePrice4K) +} + +func TestAdminService_ListGroups_WithSearch(t *testing.T) { + // 测试: + // 1. search 参数正常传递到 repository 层 + // 2. search 为空字符串时的行为 + // 3. search 与其他过滤条件组合使用 + + t.Run("search 参数正常传递到 repository 层", func(t *testing.T) { + repo := &groupRepoStubForAdmin{ + listWithFiltersGroups: []Group{{ID: 1, Name: "alpha"}}, + listWithFiltersResult: &pagination.PaginationResult{Total: 1}, + } + svc := &adminServiceImpl{groupRepo: repo} + + groups, total, err := svc.ListGroups(context.Background(), 1, 20, "", "", "alpha", nil) + require.NoError(t, err) + require.Equal(t, int64(1), total) + require.Equal(t, []Group{{ID: 1, Name: "alpha"}}, groups) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 1, PageSize: 20}, repo.listWithFiltersParams) + require.Equal(t, "alpha", repo.listWithFiltersSearch) + require.Nil(t, repo.listWithFiltersIsExclusive) + }) + + t.Run("search 为空字符串时传递空字符串", func(t *testing.T) { + repo := &groupRepoStubForAdmin{ + listWithFiltersGroups: []Group{}, + listWithFiltersResult: &pagination.PaginationResult{Total: 0}, + } + svc := &adminServiceImpl{groupRepo: repo} + + groups, total, err := svc.ListGroups(context.Background(), 2, 10, "", "", "", nil) + require.NoError(t, err) + require.Empty(t, groups) + require.Equal(t, int64(0), total) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 2, PageSize: 10}, repo.listWithFiltersParams) + require.Equal(t, "", repo.listWithFiltersSearch) + require.Nil(t, repo.listWithFiltersIsExclusive) + }) + + t.Run("search 与其他过滤条件组合使用", func(t *testing.T) { + isExclusive := true + repo := &groupRepoStubForAdmin{ + listWithFiltersGroups: []Group{{ID: 2, Name: "beta"}}, + listWithFiltersResult: &pagination.PaginationResult{Total: 42}, + } + svc := &adminServiceImpl{groupRepo: repo} + + groups, total, err := svc.ListGroups(context.Background(), 3, 50, PlatformAntigravity, StatusActive, "beta", &isExclusive) + require.NoError(t, err) + require.Equal(t, int64(42), total) + require.Equal(t, []Group{{ID: 2, Name: "beta"}}, groups) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 3, PageSize: 50}, repo.listWithFiltersParams) + require.Equal(t, PlatformAntigravity, repo.listWithFiltersPlatform) + require.Equal(t, StatusActive, repo.listWithFiltersStatus) + require.Equal(t, "beta", repo.listWithFiltersSearch) + require.NotNil(t, repo.listWithFiltersIsExclusive) + require.True(t, *repo.listWithFiltersIsExclusive) + }) +} + +func TestAdminService_ValidateFallbackGroup_DetectsCycle(t *testing.T) { + groupID := int64(1) + fallbackID := int64(2) + repo := &groupRepoStubForFallbackCycle{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + FallbackGroupID: &fallbackID, + }, + fallbackID: { + ID: fallbackID, + FallbackGroupID: &groupID, + }, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + err := svc.validateFallbackGroup(context.Background(), groupID, fallbackID) + require.Error(t, err) + require.Contains(t, err.Error(), "fallback group cycle") +} + +type groupRepoStubForFallbackCycle struct { + groups map[int64]*Group +} + +func (s *groupRepoStubForFallbackCycle) Create(_ context.Context, _ *Group) error { + panic("unexpected Create call") +} + +func (s *groupRepoStubForFallbackCycle) Update(_ context.Context, _ *Group) error { + panic("unexpected Update call") +} + +func (s *groupRepoStubForFallbackCycle) GetByID(ctx context.Context, id int64) (*Group, error) { + return s.GetByIDLite(ctx, id) +} + +func (s *groupRepoStubForFallbackCycle) GetByIDLite(_ context.Context, id int64) (*Group, error) { + if g, ok := s.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (s *groupRepoStubForFallbackCycle) Delete(_ context.Context, _ int64) error { + panic("unexpected Delete call") +} + +func (s *groupRepoStubForFallbackCycle) DeleteCascade(_ context.Context, _ int64) ([]int64, error) { + panic("unexpected DeleteCascade call") +} + +func (s *groupRepoStubForFallbackCycle) List(_ context.Context, _ pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *groupRepoStubForFallbackCycle) ListWithFilters(_ context.Context, _ pagination.PaginationParams, _, _, _ string, _ *bool) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *groupRepoStubForFallbackCycle) ListActive(_ context.Context) ([]Group, error) { + panic("unexpected ListActive call") +} + +func (s *groupRepoStubForFallbackCycle) ListActiveByPlatform(_ context.Context, _ string) ([]Group, error) { + panic("unexpected ListActiveByPlatform call") +} + +func (s *groupRepoStubForFallbackCycle) ExistsByName(_ context.Context, _ string) (bool, error) { + panic("unexpected ExistsByName call") +} + +func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int64) (int64, error) { + panic("unexpected GetAccountCount call") +} + +func (s *groupRepoStubForFallbackCycle) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { + panic("unexpected DeleteAccountGroupsByGroupID call") +} diff --git a/backend/internal/service/admin_service_search_test.go b/backend/internal/service/admin_service_search_test.go new file mode 100644 index 00000000..7506c6db --- /dev/null +++ b/backend/internal/service/admin_service_search_test.go @@ -0,0 +1,238 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +type accountRepoStubForAdminList struct { + accountRepoStub + + listWithFiltersCalls int + listWithFiltersParams pagination.PaginationParams + listWithFiltersPlatform string + listWithFiltersType string + listWithFiltersStatus string + listWithFiltersSearch string + listWithFiltersAccounts []Account + listWithFiltersResult *pagination.PaginationResult + listWithFiltersErr error +} + +func (s *accountRepoStubForAdminList) ListWithFilters(_ context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]Account, *pagination.PaginationResult, error) { + s.listWithFiltersCalls++ + s.listWithFiltersParams = params + s.listWithFiltersPlatform = platform + s.listWithFiltersType = accountType + s.listWithFiltersStatus = status + s.listWithFiltersSearch = search + + if s.listWithFiltersErr != nil { + return nil, nil, s.listWithFiltersErr + } + + result := s.listWithFiltersResult + if result == nil { + result = &pagination.PaginationResult{ + Total: int64(len(s.listWithFiltersAccounts)), + Page: params.Page, + PageSize: params.PageSize, + } + } + + return s.listWithFiltersAccounts, result, nil +} + +type proxyRepoStubForAdminList struct { + proxyRepoStub + + listWithFiltersCalls int + listWithFiltersParams pagination.PaginationParams + listWithFiltersProtocol string + listWithFiltersStatus string + listWithFiltersSearch string + listWithFiltersProxies []Proxy + listWithFiltersResult *pagination.PaginationResult + listWithFiltersErr error + + listWithFiltersAndAccountCountCalls int + listWithFiltersAndAccountCountParams pagination.PaginationParams + listWithFiltersAndAccountCountProtocol string + listWithFiltersAndAccountCountStatus string + listWithFiltersAndAccountCountSearch string + listWithFiltersAndAccountCountProxies []ProxyWithAccountCount + listWithFiltersAndAccountCountResult *pagination.PaginationResult + listWithFiltersAndAccountCountErr error +} + +func (s *proxyRepoStubForAdminList) ListWithFilters(_ context.Context, params pagination.PaginationParams, protocol, status, search string) ([]Proxy, *pagination.PaginationResult, error) { + s.listWithFiltersCalls++ + s.listWithFiltersParams = params + s.listWithFiltersProtocol = protocol + s.listWithFiltersStatus = status + s.listWithFiltersSearch = search + + if s.listWithFiltersErr != nil { + return nil, nil, s.listWithFiltersErr + } + + result := s.listWithFiltersResult + if result == nil { + result = &pagination.PaginationResult{ + Total: int64(len(s.listWithFiltersProxies)), + Page: params.Page, + PageSize: params.PageSize, + } + } + + return s.listWithFiltersProxies, result, nil +} + +func (s *proxyRepoStubForAdminList) ListWithFiltersAndAccountCount(_ context.Context, params pagination.PaginationParams, protocol, status, search string) ([]ProxyWithAccountCount, *pagination.PaginationResult, error) { + s.listWithFiltersAndAccountCountCalls++ + s.listWithFiltersAndAccountCountParams = params + s.listWithFiltersAndAccountCountProtocol = protocol + s.listWithFiltersAndAccountCountStatus = status + s.listWithFiltersAndAccountCountSearch = search + + if s.listWithFiltersAndAccountCountErr != nil { + return nil, nil, s.listWithFiltersAndAccountCountErr + } + + result := s.listWithFiltersAndAccountCountResult + if result == nil { + result = &pagination.PaginationResult{ + Total: int64(len(s.listWithFiltersAndAccountCountProxies)), + Page: params.Page, + PageSize: params.PageSize, + } + } + + return s.listWithFiltersAndAccountCountProxies, result, nil +} + +type redeemRepoStubForAdminList struct { + redeemRepoStub + + listWithFiltersCalls int + listWithFiltersParams pagination.PaginationParams + listWithFiltersType string + listWithFiltersStatus string + listWithFiltersSearch string + listWithFiltersCodes []RedeemCode + listWithFiltersResult *pagination.PaginationResult + listWithFiltersErr error +} + +func (s *redeemRepoStubForAdminList) ListWithFilters(_ context.Context, params pagination.PaginationParams, codeType, status, search string) ([]RedeemCode, *pagination.PaginationResult, error) { + s.listWithFiltersCalls++ + s.listWithFiltersParams = params + s.listWithFiltersType = codeType + s.listWithFiltersStatus = status + s.listWithFiltersSearch = search + + if s.listWithFiltersErr != nil { + return nil, nil, s.listWithFiltersErr + } + + result := s.listWithFiltersResult + if result == nil { + result = &pagination.PaginationResult{ + Total: int64(len(s.listWithFiltersCodes)), + Page: params.Page, + PageSize: params.PageSize, + } + } + + return s.listWithFiltersCodes, result, nil +} + +func TestAdminService_ListAccounts_WithSearch(t *testing.T) { + t.Run("search 参数正常传递到 repository 层", func(t *testing.T) { + repo := &accountRepoStubForAdminList{ + listWithFiltersAccounts: []Account{{ID: 1, Name: "acc"}}, + listWithFiltersResult: &pagination.PaginationResult{Total: 10}, + } + svc := &adminServiceImpl{accountRepo: repo} + + accounts, total, err := svc.ListAccounts(context.Background(), 1, 20, PlatformGemini, AccountTypeOAuth, StatusActive, "acc") + require.NoError(t, err) + require.Equal(t, int64(10), total) + require.Equal(t, []Account{{ID: 1, Name: "acc"}}, accounts) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 1, PageSize: 20}, repo.listWithFiltersParams) + require.Equal(t, PlatformGemini, repo.listWithFiltersPlatform) + require.Equal(t, AccountTypeOAuth, repo.listWithFiltersType) + require.Equal(t, StatusActive, repo.listWithFiltersStatus) + require.Equal(t, "acc", repo.listWithFiltersSearch) + }) +} + +func TestAdminService_ListProxies_WithSearch(t *testing.T) { + t.Run("search 参数正常传递到 repository 层", func(t *testing.T) { + repo := &proxyRepoStubForAdminList{ + listWithFiltersProxies: []Proxy{{ID: 2, Name: "p1"}}, + listWithFiltersResult: &pagination.PaginationResult{Total: 7}, + } + svc := &adminServiceImpl{proxyRepo: repo} + + proxies, total, err := svc.ListProxies(context.Background(), 3, 50, "http", StatusActive, "p1") + require.NoError(t, err) + require.Equal(t, int64(7), total) + require.Equal(t, []Proxy{{ID: 2, Name: "p1"}}, proxies) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 3, PageSize: 50}, repo.listWithFiltersParams) + require.Equal(t, "http", repo.listWithFiltersProtocol) + require.Equal(t, StatusActive, repo.listWithFiltersStatus) + require.Equal(t, "p1", repo.listWithFiltersSearch) + }) +} + +func TestAdminService_ListProxiesWithAccountCount_WithSearch(t *testing.T) { + t.Run("search 参数正常传递到 repository 层", func(t *testing.T) { + repo := &proxyRepoStubForAdminList{ + listWithFiltersAndAccountCountProxies: []ProxyWithAccountCount{{Proxy: Proxy{ID: 3, Name: "p2"}, AccountCount: 5}}, + listWithFiltersAndAccountCountResult: &pagination.PaginationResult{Total: 9}, + } + svc := &adminServiceImpl{proxyRepo: repo} + + proxies, total, err := svc.ListProxiesWithAccountCount(context.Background(), 2, 10, "socks5", StatusDisabled, "p2") + require.NoError(t, err) + require.Equal(t, int64(9), total) + require.Equal(t, []ProxyWithAccountCount{{Proxy: Proxy{ID: 3, Name: "p2"}, AccountCount: 5}}, proxies) + + require.Equal(t, 1, repo.listWithFiltersAndAccountCountCalls) + require.Equal(t, pagination.PaginationParams{Page: 2, PageSize: 10}, repo.listWithFiltersAndAccountCountParams) + require.Equal(t, "socks5", repo.listWithFiltersAndAccountCountProtocol) + require.Equal(t, StatusDisabled, repo.listWithFiltersAndAccountCountStatus) + require.Equal(t, "p2", repo.listWithFiltersAndAccountCountSearch) + }) +} + +func TestAdminService_ListRedeemCodes_WithSearch(t *testing.T) { + t.Run("search 参数正常传递到 repository 层", func(t *testing.T) { + repo := &redeemRepoStubForAdminList{ + listWithFiltersCodes: []RedeemCode{{ID: 4, Code: "ABC"}}, + listWithFiltersResult: &pagination.PaginationResult{Total: 3}, + } + svc := &adminServiceImpl{redeemCodeRepo: repo} + + codes, total, err := svc.ListRedeemCodes(context.Background(), 1, 20, RedeemTypeBalance, StatusUnused, "ABC") + require.NoError(t, err) + require.Equal(t, int64(3), total) + require.Equal(t, []RedeemCode{{ID: 4, Code: "ABC"}}, codes) + + require.Equal(t, 1, repo.listWithFiltersCalls) + require.Equal(t, pagination.PaginationParams{Page: 1, PageSize: 20}, repo.listWithFiltersParams) + require.Equal(t, RedeemTypeBalance, repo.listWithFiltersType) + require.Equal(t, StatusUnused, repo.listWithFiltersStatus) + require.Equal(t, "ABC", repo.listWithFiltersSearch) + }) +} diff --git a/backend/internal/service/admin_service_update_balance_test.go b/backend/internal/service/admin_service_update_balance_test.go new file mode 100644 index 00000000..d3b3c700 --- /dev/null +++ b/backend/internal/service/admin_service_update_balance_test.go @@ -0,0 +1,97 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +type balanceUserRepoStub struct { + *userRepoStub + updateErr error + updated []*User +} + +func (s *balanceUserRepoStub) Update(ctx context.Context, user *User) error { + if s.updateErr != nil { + return s.updateErr + } + if user == nil { + return nil + } + clone := *user + s.updated = append(s.updated, &clone) + if s.userRepoStub != nil { + s.userRepoStub.user = &clone + } + return nil +} + +type balanceRedeemRepoStub struct { + *redeemRepoStub + created []*RedeemCode +} + +func (s *balanceRedeemRepoStub) Create(ctx context.Context, code *RedeemCode) error { + if code == nil { + return nil + } + clone := *code + s.created = append(s.created, &clone) + return nil +} + +type authCacheInvalidatorStub struct { + userIDs []int64 + groupIDs []int64 + keys []string +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByKey(ctx context.Context, key string) { + s.keys = append(s.keys, key) +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByUserID(ctx context.Context, userID int64) { + s.userIDs = append(s.userIDs, userID) +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) { + s.groupIDs = append(s.groupIDs, groupID) +} + +func TestAdminService_UpdateUserBalance_InvalidatesAuthCache(t *testing.T) { + baseRepo := &userRepoStub{user: &User{ID: 7, Balance: 10}} + repo := &balanceUserRepoStub{userRepoStub: baseRepo} + redeemRepo := &balanceRedeemRepoStub{redeemRepoStub: &redeemRepoStub{}} + invalidator := &authCacheInvalidatorStub{} + svc := &adminServiceImpl{ + userRepo: repo, + redeemCodeRepo: redeemRepo, + authCacheInvalidator: invalidator, + } + + _, err := svc.UpdateUserBalance(context.Background(), 7, 5, "add", "") + require.NoError(t, err) + require.Equal(t, []int64{7}, invalidator.userIDs) + require.Len(t, redeemRepo.created, 1) +} + +func TestAdminService_UpdateUserBalance_NoChangeNoInvalidate(t *testing.T) { + baseRepo := &userRepoStub{user: &User{ID: 7, Balance: 10}} + repo := &balanceUserRepoStub{userRepoStub: baseRepo} + redeemRepo := &balanceRedeemRepoStub{redeemRepoStub: &redeemRepoStub{}} + invalidator := &authCacheInvalidatorStub{} + svc := &adminServiceImpl{ + userRepo: repo, + redeemCodeRepo: redeemRepo, + authCacheInvalidator: invalidator, + } + + _, err := svc.UpdateUserBalance(context.Background(), 7, 10, "set", "") + require.NoError(t, err) + require.Empty(t, invalidator.userIDs) + require.Empty(t, redeemRepo.created) +} diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go new file mode 100644 index 00000000..7f3e97a2 --- /dev/null +++ b/backend/internal/service/antigravity_gateway_service.go @@ -0,0 +1,2474 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + mathrand "math/rand" + "net" + "net/http" + "strings" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +const ( + antigravityStickySessionTTL = time.Hour + antigravityMaxRetries = 3 + antigravityRetryBaseDelay = 1 * time.Second + antigravityRetryMaxDelay = 16 * time.Second +) + +// isAntigravityConnectionError 判断是否为连接错误(网络超时、DNS 失败、连接拒绝) +func isAntigravityConnectionError(err error) bool { + if err == nil { + return false + } + + // 检查超时错误 + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + return true + } + + // 检查连接错误(DNS 失败、连接拒绝) + var opErr *net.OpError + return errors.As(err, &opErr) +} + +// shouldAntigravityFallbackToNextURL 判断是否应切换到下一个 URL +// 仅连接错误和 HTTP 429 触发 URL 降级 +func shouldAntigravityFallbackToNextURL(err error, statusCode int) bool { + if isAntigravityConnectionError(err) { + return true + } + return statusCode == http.StatusTooManyRequests +} + +// getSessionID 从 gin.Context 获取 session_id(用于日志追踪) +func getSessionID(c *gin.Context) string { + if c == nil { + return "" + } + return c.GetHeader("session_id") +} + +// logPrefix 生成统一的日志前缀 +func logPrefix(sessionID, accountName string) string { + if sessionID != "" { + return fmt.Sprintf("[antigravity-Forward] session=%s account=%s", sessionID, accountName) + } + return fmt.Sprintf("[antigravity-Forward] account=%s", accountName) +} + +// Antigravity 直接支持的模型(精确匹配透传) +var antigravitySupportedModels = map[string]bool{ + "claude-opus-4-5-thinking": true, + "claude-sonnet-4-5": true, + "claude-sonnet-4-5-thinking": true, + "gemini-2.5-flash": true, + "gemini-2.5-flash-lite": true, + "gemini-2.5-flash-thinking": true, + "gemini-3-flash": true, + "gemini-3-pro-low": true, + "gemini-3-pro-high": true, + "gemini-3-pro-image": true, +} + +// Antigravity 前缀映射表(按前缀长度降序排列,确保最长匹配优先) +// 用于处理模型版本号变化(如 -20251111, -thinking, -preview 等后缀) +var antigravityPrefixMapping = []struct { + prefix string + target string +}{ + // 长前缀优先 + {"gemini-2.5-flash-image", "gemini-3-pro-image"}, // gemini-2.5-flash-image → 3-pro-image + {"gemini-3-pro-image", "gemini-3-pro-image"}, // gemini-3-pro-image-preview 等 + {"gemini-3-flash", "gemini-3-flash"}, // gemini-3-flash-preview 等 → gemini-3-flash + {"claude-3-5-sonnet", "claude-sonnet-4-5"}, // 旧版 claude-3-5-sonnet-xxx + {"claude-sonnet-4-5", "claude-sonnet-4-5"}, // claude-sonnet-4-5-xxx + {"claude-haiku-4-5", "claude-sonnet-4-5"}, // claude-haiku-4-5-xxx → sonnet + {"claude-opus-4-5", "claude-opus-4-5-thinking"}, + {"claude-3-haiku", "claude-sonnet-4-5"}, // 旧版 claude-3-haiku-xxx → sonnet + {"claude-sonnet-4", "claude-sonnet-4-5"}, + {"claude-haiku-4", "claude-sonnet-4-5"}, // → sonnet + {"claude-opus-4", "claude-opus-4-5-thinking"}, + {"gemini-3-pro", "gemini-3-pro-high"}, // gemini-3-pro, gemini-3-pro-preview 等 +} + +// AntigravityGatewayService 处理 Antigravity 平台的 API 转发 +type AntigravityGatewayService struct { + accountRepo AccountRepository + tokenProvider *AntigravityTokenProvider + rateLimitService *RateLimitService + httpUpstream HTTPUpstream + settingService *SettingService +} + +func NewAntigravityGatewayService( + accountRepo AccountRepository, + _ GatewayCache, + tokenProvider *AntigravityTokenProvider, + rateLimitService *RateLimitService, + httpUpstream HTTPUpstream, + settingService *SettingService, +) *AntigravityGatewayService { + return &AntigravityGatewayService{ + accountRepo: accountRepo, + tokenProvider: tokenProvider, + rateLimitService: rateLimitService, + httpUpstream: httpUpstream, + settingService: settingService, + } +} + +// GetTokenProvider 返回 token provider +func (s *AntigravityGatewayService) GetTokenProvider() *AntigravityTokenProvider { + return s.tokenProvider +} + +// getMappedModel 获取映射后的模型名 +// 逻辑:账户映射 → 直接支持透传 → 前缀映射 → gemini透传 → 默认值 +func (s *AntigravityGatewayService) getMappedModel(account *Account, requestedModel string) string { + // 1. 账户级映射(用户自定义优先) + if mapped := account.GetMappedModel(requestedModel); mapped != requestedModel { + return mapped + } + + // 2. 直接支持的模型透传 + if antigravitySupportedModels[requestedModel] { + return requestedModel + } + + // 3. 前缀映射(处理版本号变化,如 -20251111, -thinking, -preview) + for _, pm := range antigravityPrefixMapping { + if strings.HasPrefix(requestedModel, pm.prefix) { + return pm.target + } + } + + // 4. Gemini 模型透传(未匹配到前缀的 gemini 模型) + if strings.HasPrefix(requestedModel, "gemini-") { + return requestedModel + } + + // 5. 默认值 + return "claude-sonnet-4-5" +} + +// IsModelSupported 检查模型是否被支持 +// 所有 claude- 和 gemini- 前缀的模型都能通过映射或透传支持 +func (s *AntigravityGatewayService) IsModelSupported(requestedModel string) bool { + return strings.HasPrefix(requestedModel, "claude-") || + strings.HasPrefix(requestedModel, "gemini-") +} + +// TestConnectionResult 测试连接结果 +type TestConnectionResult struct { + Text string // 响应文本 + MappedModel string // 实际使用的模型 +} + +// TestConnection 测试 Antigravity 账号连接(非流式,无重试、无计费) +// 支持 Claude 和 Gemini 两种协议,根据 modelID 前缀自动选择 +func (s *AntigravityGatewayService) TestConnection(ctx context.Context, account *Account, modelID string) (*TestConnectionResult, error) { + // 获取 token + if s.tokenProvider == nil { + return nil, errors.New("antigravity token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, fmt.Errorf("获取 access_token 失败: %w", err) + } + + // 获取 project_id(部分账户类型可能没有) + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + // 模型映射 + mappedModel := s.getMappedModel(account, modelID) + + // 构建请求体 + var requestBody []byte + if strings.HasPrefix(modelID, "gemini-") { + // Gemini 模型:直接使用 Gemini 格式 + requestBody, err = s.buildGeminiTestRequest(projectID, mappedModel) + } else { + // Claude 模型:使用协议转换 + requestBody, err = s.buildClaudeTestRequest(projectID, mappedModel) + } + if err != nil { + return nil, fmt.Errorf("构建请求失败: %w", err) + } + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // URL fallback 循环 + availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = antigravity.BaseURLs // 所有 URL 都不可用时,重试所有 + } + + var lastErr error + for urlIdx, baseURL := range availableURLs { + // 构建 HTTP 请求(总是使用流式 endpoint,与官方客户端一致) + req, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, "streamGenerateContent", accessToken, requestBody) + if err != nil { + lastErr = err + continue + } + + // 调试日志:Test 请求信息 + log.Printf("[antigravity-Test] account=%s request_size=%d url=%s", account.Name, len(requestBody), req.URL.String()) + + // 发送请求 + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + lastErr = fmt.Errorf("请求失败: %w", err) + if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity-Test] URL fallback: %s -> %s", baseURL, availableURLs[urlIdx+1]) + continue + } + return nil, lastErr + } + + // 读取响应 + respBody, err := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() // 立即关闭,避免循环内 defer 导致的资源泄漏 + if err != nil { + return nil, fmt.Errorf("读取响应失败: %w", err) + } + + // 检查是否需要 URL 降级 + if shouldAntigravityFallbackToNextURL(nil, resp.StatusCode) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("[antigravity-Test] URL fallback (HTTP %d): %s -> %s", resp.StatusCode, baseURL, availableURLs[urlIdx+1]) + continue + } + + if resp.StatusCode >= 400 { + return nil, fmt.Errorf("API 返回 %d: %s", resp.StatusCode, string(respBody)) + } + + // 解析流式响应,提取文本 + text := extractTextFromSSEResponse(respBody) + + return &TestConnectionResult{ + Text: text, + MappedModel: mappedModel, + }, nil + } + + return nil, lastErr +} + +// buildGeminiTestRequest 构建 Gemini 格式测试请求 +func (s *AntigravityGatewayService) buildGeminiTestRequest(projectID, model string) ([]byte, error) { + payload := map[string]any{ + "contents": []map[string]any{ + { + "role": "user", + "parts": []map[string]any{ + {"text": "hi"}, + }, + }, + }, + // Antigravity 上游要求必须包含身份提示词 + "systemInstruction": map[string]any{ + "parts": []map[string]any{ + {"text": antigravity.GetDefaultIdentityPatch()}, + }, + }, + } + payloadBytes, _ := json.Marshal(payload) + return s.wrapV1InternalRequest(projectID, model, payloadBytes) +} + +// buildClaudeTestRequest 构建 Claude 格式测试请求并转换为 Gemini 格式 +func (s *AntigravityGatewayService) buildClaudeTestRequest(projectID, mappedModel string) ([]byte, error) { + claudeReq := &antigravity.ClaudeRequest{ + Model: mappedModel, + Messages: []antigravity.ClaudeMessage{ + { + Role: "user", + Content: json.RawMessage(`"hi"`), + }, + }, + MaxTokens: 1024, + Stream: false, + } + return antigravity.TransformClaudeToGemini(claudeReq, projectID, mappedModel) +} + +func (s *AntigravityGatewayService) getClaudeTransformOptions(ctx context.Context) antigravity.TransformOptions { + opts := antigravity.DefaultTransformOptions() + if s.settingService == nil { + return opts + } + opts.EnableIdentityPatch = s.settingService.IsIdentityPatchEnabled(ctx) + opts.IdentityPatch = s.settingService.GetIdentityPatchPrompt(ctx) + return opts +} + +// extractTextFromSSEResponse 从 SSE 流式响应中提取文本 +func extractTextFromSSEResponse(respBody []byte) string { + var texts []string + lines := bytes.Split(respBody, []byte("\n")) + + for _, line := range lines { + line = bytes.TrimSpace(line) + if len(line) == 0 { + continue + } + + // 跳过 SSE 前缀 + if bytes.HasPrefix(line, []byte("data:")) { + line = bytes.TrimPrefix(line, []byte("data:")) + line = bytes.TrimSpace(line) + } + + // 跳过非 JSON 行 + if len(line) == 0 || line[0] != '{' { + continue + } + + // 解析 JSON + var data map[string]any + if err := json.Unmarshal(line, &data); err != nil { + continue + } + + // 尝试从 response.candidates[0].content.parts[].text 提取 + response, ok := data["response"].(map[string]any) + if !ok { + // 尝试直接从 candidates 提取(某些响应格式) + response = data + } + + candidates, ok := response["candidates"].([]any) + if !ok || len(candidates) == 0 { + continue + } + + candidate, ok := candidates[0].(map[string]any) + if !ok { + continue + } + + content, ok := candidate["content"].(map[string]any) + if !ok { + continue + } + + parts, ok := content["parts"].([]any) + if !ok { + continue + } + + for _, part := range parts { + if partMap, ok := part.(map[string]any); ok { + if text, ok := partMap["text"].(string); ok && text != "" { + texts = append(texts, text) + } + } + } + } + + return strings.Join(texts, "") +} + +// injectIdentityPatchToGeminiRequest 为 Gemini 格式请求注入身份提示词 +// 如果请求中已包含 "You are Antigravity" 则不重复注入 +func injectIdentityPatchToGeminiRequest(body []byte) ([]byte, error) { + var request map[string]any + if err := json.Unmarshal(body, &request); err != nil { + return nil, fmt.Errorf("解析 Gemini 请求失败: %w", err) + } + + // 检查现有 systemInstruction 是否已包含身份提示词 + if sysInst, ok := request["systemInstruction"].(map[string]any); ok { + if parts, ok := sysInst["parts"].([]any); ok { + for _, part := range parts { + if partMap, ok := part.(map[string]any); ok { + if text, ok := partMap["text"].(string); ok { + if strings.Contains(text, "You are Antigravity") { + // 已包含身份提示词,直接返回原始请求 + return body, nil + } + } + } + } + } + } + + // 获取默认身份提示词 + identityPatch := antigravity.GetDefaultIdentityPatch() + + // 构建新的 systemInstruction + newPart := map[string]any{"text": identityPatch} + + if existing, ok := request["systemInstruction"].(map[string]any); ok { + // 已有 systemInstruction,在开头插入身份提示词 + if parts, ok := existing["parts"].([]any); ok { + existing["parts"] = append([]any{newPart}, parts...) + } else { + existing["parts"] = []any{newPart} + } + } else { + // 没有 systemInstruction,创建新的 + request["systemInstruction"] = map[string]any{ + "parts": []any{newPart}, + } + } + + return json.Marshal(request) +} + +// wrapV1InternalRequest 包装请求为 v1internal 格式 +func (s *AntigravityGatewayService) wrapV1InternalRequest(projectID, model string, originalBody []byte) ([]byte, error) { + var request any + if err := json.Unmarshal(originalBody, &request); err != nil { + return nil, fmt.Errorf("解析请求体失败: %w", err) + } + + wrapped := map[string]any{ + "project": projectID, + "requestId": "agent-" + uuid.New().String(), + "userAgent": "antigravity", // 固定值,与官方客户端一致 + "requestType": "agent", + "model": model, + "request": request, + } + + return json.Marshal(wrapped) +} + +// unwrapV1InternalResponse 解包 v1internal 响应 +func (s *AntigravityGatewayService) unwrapV1InternalResponse(body []byte) ([]byte, error) { + var outer map[string]any + if err := json.Unmarshal(body, &outer); err != nil { + return nil, err + } + + if resp, ok := outer["response"]; ok { + return json.Marshal(resp) + } + + return body, nil +} + +// isModelNotFoundError 检测是否为模型不存在的 404 错误 +func isModelNotFoundError(statusCode int, body []byte) bool { + if statusCode != 404 { + return false + } + + bodyStr := strings.ToLower(string(body)) + keywords := []string{"model not found", "unknown model", "not found"} + for _, keyword := range keywords { + if strings.Contains(bodyStr, keyword) { + return true + } + } + return true // 404 without specific message also treated as model not found +} + +// Forward 转发 Claude 协议请求(Claude → Gemini 转换) +func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, body []byte) (*ForwardResult, error) { + startTime := time.Now() + sessionID := getSessionID(c) + prefix := logPrefix(sessionID, account.Name) + + // 解析 Claude 请求 + var claudeReq antigravity.ClaudeRequest + if err := json.Unmarshal(body, &claudeReq); err != nil { + return nil, fmt.Errorf("parse claude request: %w", err) + } + if strings.TrimSpace(claudeReq.Model) == "" { + return nil, fmt.Errorf("missing model") + } + + originalModel := claudeReq.Model + mappedModel := s.getMappedModel(account, claudeReq.Model) + quotaScope, _ := resolveAntigravityQuotaScope(originalModel) + + // 获取 access_token + if s.tokenProvider == nil { + return nil, errors.New("antigravity token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, fmt.Errorf("获取 access_token 失败: %w", err) + } + + // 获取 project_id(部分账户类型可能没有) + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // Sanitize thinking blocks (clean cache_control and flatten history thinking) + sanitizeThinkingBlocks(&claudeReq) + + // 获取转换选项 + // Antigravity 上游要求必须包含身份提示词,否则会返回 429 + transformOpts := s.getClaudeTransformOptions(ctx) + transformOpts.EnableIdentityPatch = true // 强制启用,Antigravity 上游必需 + + // 转换 Claude 请求为 Gemini 格式 + geminiBody, err := antigravity.TransformClaudeToGeminiWithOptions(&claudeReq, projectID, mappedModel, transformOpts) + if err != nil { + return nil, fmt.Errorf("transform request: %w", err) + } + + // Safety net: ensure no cache_control leaked into Gemini request + geminiBody = cleanCacheControlFromGeminiJSON(geminiBody) + + // Antigravity 上游只支持流式请求,统一使用 streamGenerateContent + // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后转换返回 + action := "streamGenerateContent" + + // URL fallback 循环 + availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = antigravity.BaseURLs // 所有 URL 都不可用时,重试所有 + } + + // 重试循环 + var resp *http.Response +urlFallbackLoop: + for urlIdx, baseURL := range availableURLs { + for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { + // 检查 context 是否已取消(客户端断开连接) + select { + case <-ctx.Done(): + log.Printf("%s status=context_canceled error=%v", prefix, ctx.Err()) + return nil, ctx.Err() + default: + } + + upstreamReq, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, action, accessToken, geminiBody) + // Capture upstream request body for ops retry of this attempt. + if c != nil { + c.Set(OpsUpstreamRequestBodyKey, string(geminiBody)) + } + if err != nil { + return nil, err + } + + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + // 检查是否应触发 URL 降级 + if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (connection error): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + if attempt < antigravityMaxRetries { + log.Printf("%s status=request_failed retry=%d/%d error=%v", prefix, attempt, antigravityMaxRetries, err) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", prefix) + return nil, ctx.Err() + } + continue + } + log.Printf("%s status=request_failed retries_exhausted error=%v", prefix, err) + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries") + } + + // 检查是否应触发 URL 降级(仅 429) + if resp.StatusCode == http.StatusTooManyRequests && urlIdx < len(availableURLs)-1 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) + continue urlFallbackLoop + } + + if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + + if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + log.Printf("%s status=%d retry=%d/%d body=%s", prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", prefix) + return nil, ctx.Err() + } + continue + } + // 所有重试都失败,标记限流状态 + if resp.StatusCode == 429 { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + } + // 最后一次尝试也失败 + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop + } + + break urlFallbackLoop + } + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + // 优先检测 thinking block 的 signature 相关错误(400)并重试一次: + // Antigravity /v1internal 链路在部分场景会对 thought/thinking signature 做严格校验, + // 当历史消息携带的 signature 不合法时会直接 400;去除 thinking 后可继续完成请求。 + if resp.StatusCode == http.StatusBadRequest && isSignatureRelatedError(respBody) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "signature_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + // Conservative two-stage fallback: + // 1) Disable top-level thinking + thinking->text + // 2) Only if still signature-related 400: also downgrade tool_use/tool_result to text. + + retryStages := []struct { + name string + strip func(*antigravity.ClaudeRequest) (bool, error) + }{ + {name: "thinking-only", strip: stripThinkingFromClaudeRequest}, + {name: "thinking+tools", strip: stripSignatureSensitiveBlocksFromClaudeRequest}, + } + + for _, stage := range retryStages { + retryClaudeReq := claudeReq + retryClaudeReq.Messages = append([]antigravity.ClaudeMessage(nil), claudeReq.Messages...) + + stripped, stripErr := stage.strip(&retryClaudeReq) + if stripErr != nil || !stripped { + continue + } + + log.Printf("Antigravity account %d: detected signature-related 400, retrying once (%s)", account.ID, stage.name) + + retryGeminiBody, txErr := antigravity.TransformClaudeToGeminiWithOptions(&retryClaudeReq, projectID, mappedModel, s.getClaudeTransformOptions(ctx)) + if txErr != nil { + continue + } + retryReq, buildErr := antigravity.NewAPIRequest(ctx, action, accessToken, retryGeminiBody) + if buildErr != nil { + continue + } + retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + if retryErr != nil { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "signature_retry_request_error", + Message: sanitizeUpstreamErrorMessage(retryErr.Error()), + }) + log.Printf("Antigravity account %d: signature retry request failed (%s): %v", account.ID, stage.name, retryErr) + continue + } + + if retryResp.StatusCode < 400 { + _ = resp.Body.Close() + resp = retryResp + respBody = nil + break + } + + retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) + _ = retryResp.Body.Close() + kind := "signature_retry" + if strings.TrimSpace(stage.name) != "" { + kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") + } + retryUpstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(retryBody)) + retryUpstreamMsg = sanitizeUpstreamErrorMessage(retryUpstreamMsg) + retryUpstreamDetail := "" + if logBody { + retryUpstreamDetail = truncateString(string(retryBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: retryResp.StatusCode, + UpstreamRequestID: retryResp.Header.Get("x-request-id"), + Kind: kind, + Message: retryUpstreamMsg, + Detail: retryUpstreamDetail, + }) + + // If this stage fixed the signature issue, we stop; otherwise we may try the next stage. + if retryResp.StatusCode != http.StatusBadRequest || !isSignatureRelatedError(retryBody) { + respBody = retryBody + resp = &http.Response{ + StatusCode: retryResp.StatusCode, + Header: retryResp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(retryBody)), + } + break + } + + // Still signature-related; capture context and allow next stage. + respBody = retryBody + resp = &http.Response{ + StatusCode: retryResp.StatusCode, + Header: retryResp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(retryBody)), + } + } + } + + // 处理错误响应(重试后仍失败或不触发重试) + if resp.StatusCode >= 400 { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + + if s.shouldFailoverUpstreamError(resp.StatusCode) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + return nil, s.writeMappedClaudeError(c, account, resp.StatusCode, resp.Header.Get("x-request-id"), respBody) + } + } + + requestID := resp.Header.Get("x-request-id") + if requestID != "" { + c.Header("x-request-id", requestID) + } + + var usage *ClaudeUsage + var firstTokenMs *int + if claudeReq.Stream { + // 客户端要求流式,直接透传转换 + streamRes, err := s.handleClaudeStreamingResponse(c, resp, startTime, originalModel) + if err != nil { + log.Printf("%s status=stream_error error=%v", prefix, err) + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } else { + // 客户端要求非流式,收集流式响应后转换返回 + streamRes, err := s.handleClaudeStreamToNonStreaming(c, resp, startTime, originalModel) + if err != nil { + log.Printf("%s status=stream_collect_error error=%v", prefix, err) + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } + + return &ForwardResult{ + RequestID: requestID, + Usage: *usage, + Model: originalModel, // 使用原始模型用于计费和日志 + Stream: claudeReq.Stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func isSignatureRelatedError(respBody []byte) bool { + msg := strings.ToLower(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) + if msg == "" { + // Fallback: best-effort scan of the raw payload. + msg = strings.ToLower(string(respBody)) + } + + // Keep this intentionally broad: different upstreams may use "signature" or "thought_signature". + if strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") { + return true + } + + // Also detect thinking block structural errors: + // "Expected `thinking` or `redacted_thinking`, but found `text`" + if strings.Contains(msg, "expected") && (strings.Contains(msg, "thinking") || strings.Contains(msg, "redacted_thinking")) { + return true + } + + return false +} + +func extractAntigravityErrorMessage(body []byte) string { + var payload map[string]any + if err := json.Unmarshal(body, &payload); err != nil { + return "" + } + + // Google-style: {"error": {"message": "..."}} + if errObj, ok := payload["error"].(map[string]any); ok { + if msg, ok := errObj["message"].(string); ok && strings.TrimSpace(msg) != "" { + return msg + } + } + + // Fallback: top-level message + if msg, ok := payload["message"].(string); ok && strings.TrimSpace(msg) != "" { + return msg + } + + return "" +} + +// cleanCacheControlFromGeminiJSON removes cache_control from Gemini JSON (emergency fix) +// This should not be needed if transformation is correct, but serves as a safety net +func cleanCacheControlFromGeminiJSON(body []byte) []byte { + // Try a more robust approach: parse and clean + var data map[string]any + if err := json.Unmarshal(body, &data); err != nil { + log.Printf("[Antigravity] Failed to parse Gemini JSON for cache_control cleaning: %v", err) + return body + } + + cleaned := removeCacheControlFromAny(data) + if !cleaned { + return body + } + + if result, err := json.Marshal(data); err == nil { + log.Printf("[Antigravity] Successfully cleaned cache_control from Gemini JSON") + return result + } + + return body +} + +// removeCacheControlFromAny recursively removes cache_control fields +func removeCacheControlFromAny(v any) bool { + cleaned := false + + switch val := v.(type) { + case map[string]any: + for k, child := range val { + if k == "cache_control" { + delete(val, k) + cleaned = true + } else if removeCacheControlFromAny(child) { + cleaned = true + } + } + case []any: + for _, item := range val { + if removeCacheControlFromAny(item) { + cleaned = true + } + } + } + + return cleaned +} + +// sanitizeThinkingBlocks cleans cache_control and flattens history thinking blocks +// Thinking blocks do NOT support cache_control field (Anthropic API/Vertex AI requirement) +// Additionally, history thinking blocks are flattened to text to avoid upstream validation errors +func sanitizeThinkingBlocks(req *antigravity.ClaudeRequest) { + if req == nil { + return + } + + log.Printf("[Antigravity] sanitizeThinkingBlocks: processing request with %d messages", len(req.Messages)) + + // Clean system blocks + if len(req.System) > 0 { + var systemBlocks []map[string]any + if err := json.Unmarshal(req.System, &systemBlocks); err == nil { + for i := range systemBlocks { + if blockType, _ := systemBlocks[i]["type"].(string); blockType == "thinking" || systemBlocks[i]["thinking"] != nil { + if removeCacheControlFromAny(systemBlocks[i]) { + log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in system[%d]", i) + } + } + } + // Marshal back + if cleaned, err := json.Marshal(systemBlocks); err == nil { + req.System = cleaned + } + } + } + + // Clean message content blocks and flatten history + lastMsgIdx := len(req.Messages) - 1 + for msgIdx := range req.Messages { + raw := req.Messages[msgIdx].Content + if len(raw) == 0 { + continue + } + + // Try to parse as blocks array + var blocks []map[string]any + if err := json.Unmarshal(raw, &blocks); err != nil { + continue + } + + cleaned := false + for blockIdx := range blocks { + blockType, _ := blocks[blockIdx]["type"].(string) + + // Check for thinking blocks (typed or untyped) + if blockType == "thinking" || blocks[blockIdx]["thinking"] != nil { + // 1. Clean cache_control + if removeCacheControlFromAny(blocks[blockIdx]) { + log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in messages[%d].content[%d]", msgIdx, blockIdx) + cleaned = true + } + + // 2. Flatten to text if it's a history message (not the last one) + if msgIdx < lastMsgIdx { + log.Printf("[Antigravity] Flattening history thinking block to text at messages[%d].content[%d]", msgIdx, blockIdx) + + // Extract thinking content + var textContent string + if t, ok := blocks[blockIdx]["thinking"].(string); ok { + textContent = t + } else { + // Fallback for non-string content (marshal it) + if b, err := json.Marshal(blocks[blockIdx]["thinking"]); err == nil { + textContent = string(b) + } + } + + // Convert to text block + blocks[blockIdx]["type"] = "text" + blocks[blockIdx]["text"] = textContent + delete(blocks[blockIdx], "thinking") + delete(blocks[blockIdx], "signature") + delete(blocks[blockIdx], "cache_control") // Ensure it's gone + cleaned = true + } + } + } + + // Marshal back if modified + if cleaned { + if marshaled, err := json.Marshal(blocks); err == nil { + req.Messages[msgIdx].Content = marshaled + } + } + } +} + +// stripThinkingFromClaudeRequest converts thinking blocks to text blocks in a Claude Messages request. +// This preserves the thinking content while avoiding signature validation errors. +// Note: redacted_thinking blocks are removed because they cannot be converted to text. +// It also disables top-level `thinking` to avoid upstream structural constraints for thinking mode. +func stripThinkingFromClaudeRequest(req *antigravity.ClaudeRequest) (bool, error) { + if req == nil { + return false, nil + } + + changed := false + if req.Thinking != nil { + req.Thinking = nil + changed = true + } + + for i := range req.Messages { + raw := req.Messages[i].Content + if len(raw) == 0 { + continue + } + + // If content is a string, nothing to strip. + var str string + if json.Unmarshal(raw, &str) == nil { + continue + } + + // Otherwise treat as an array of blocks and convert thinking blocks to text. + var blocks []map[string]any + if err := json.Unmarshal(raw, &blocks); err != nil { + continue + } + + filtered := make([]map[string]any, 0, len(blocks)) + modifiedAny := false + for _, block := range blocks { + t, _ := block["type"].(string) + switch t { + case "thinking": + thinkingText, _ := block["thinking"].(string) + if thinkingText != "" { + filtered = append(filtered, map[string]any{ + "type": "text", + "text": thinkingText, + }) + } + modifiedAny = true + case "redacted_thinking": + modifiedAny = true + case "": + if thinkingText, hasThinking := block["thinking"].(string); hasThinking { + if thinkingText != "" { + filtered = append(filtered, map[string]any{ + "type": "text", + "text": thinkingText, + }) + } + modifiedAny = true + } else { + filtered = append(filtered, block) + } + default: + filtered = append(filtered, block) + } + } + + if !modifiedAny { + continue + } + + if len(filtered) == 0 { + filtered = append(filtered, map[string]any{ + "type": "text", + "text": "(content removed)", + }) + } + + newRaw, err := json.Marshal(filtered) + if err != nil { + return changed, err + } + req.Messages[i].Content = newRaw + changed = true + } + + return changed, nil +} + +// stripSignatureSensitiveBlocksFromClaudeRequest is a stronger retry degradation that additionally converts +// tool blocks to plain text. Use this only after a thinking-only retry still fails with signature errors. +func stripSignatureSensitiveBlocksFromClaudeRequest(req *antigravity.ClaudeRequest) (bool, error) { + if req == nil { + return false, nil + } + + changed := false + if req.Thinking != nil { + req.Thinking = nil + changed = true + } + + for i := range req.Messages { + raw := req.Messages[i].Content + if len(raw) == 0 { + continue + } + + // If content is a string, nothing to strip. + var str string + if json.Unmarshal(raw, &str) == nil { + continue + } + + // Otherwise treat as an array of blocks and convert signature-sensitive blocks to text. + var blocks []map[string]any + if err := json.Unmarshal(raw, &blocks); err != nil { + continue + } + + filtered := make([]map[string]any, 0, len(blocks)) + modifiedAny := false + for _, block := range blocks { + t, _ := block["type"].(string) + switch t { + case "thinking": + // Convert thinking to text, skip if empty + thinkingText, _ := block["thinking"].(string) + if thinkingText != "" { + filtered = append(filtered, map[string]any{ + "type": "text", + "text": thinkingText, + }) + } + modifiedAny = true + case "redacted_thinking": + // Remove redacted_thinking (cannot convert encrypted content) + modifiedAny = true + case "tool_use": + // Convert tool_use to text to avoid upstream signature/thought_signature validation errors. + // This is a retry-only degradation path, so we prioritise request validity over tool semantics. + name, _ := block["name"].(string) + id, _ := block["id"].(string) + input := block["input"] + inputJSON, _ := json.Marshal(input) + text := "(tool_use)" + if name != "" { + text += " name=" + name + } + if id != "" { + text += " id=" + id + } + if len(inputJSON) > 0 && string(inputJSON) != "null" { + text += " input=" + string(inputJSON) + } + filtered = append(filtered, map[string]any{ + "type": "text", + "text": text, + }) + modifiedAny = true + case "tool_result": + // Convert tool_result to text so it stays consistent when tool_use is downgraded. + toolUseID, _ := block["tool_use_id"].(string) + isError, _ := block["is_error"].(bool) + content := block["content"] + contentJSON, _ := json.Marshal(content) + text := "(tool_result)" + if toolUseID != "" { + text += " tool_use_id=" + toolUseID + } + if isError { + text += " is_error=true" + } + if len(contentJSON) > 0 && string(contentJSON) != "null" { + text += "\n" + string(contentJSON) + } + filtered = append(filtered, map[string]any{ + "type": "text", + "text": text, + }) + modifiedAny = true + case "": + // Handle untyped block with "thinking" field + if thinkingText, hasThinking := block["thinking"].(string); hasThinking { + if thinkingText != "" { + filtered = append(filtered, map[string]any{ + "type": "text", + "text": thinkingText, + }) + } + modifiedAny = true + } else { + filtered = append(filtered, block) + } + default: + filtered = append(filtered, block) + } + } + + if !modifiedAny { + continue + } + + if len(filtered) == 0 { + // Keep request valid: upstream rejects empty content arrays. + filtered = append(filtered, map[string]any{ + "type": "text", + "text": "(content removed)", + }) + } + + newRaw, err := json.Marshal(filtered) + if err != nil { + return changed, err + } + req.Messages[i].Content = newRaw + changed = true + } + + return changed, nil +} + +// ForwardGemini 转发 Gemini 协议请求 +func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Context, account *Account, originalModel string, action string, stream bool, body []byte) (*ForwardResult, error) { + startTime := time.Now() + sessionID := getSessionID(c) + prefix := logPrefix(sessionID, account.Name) + + if strings.TrimSpace(originalModel) == "" { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Missing model in URL") + } + if strings.TrimSpace(action) == "" { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Missing action in URL") + } + if len(body) == 0 { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Request body is empty") + } + quotaScope, _ := resolveAntigravityQuotaScope(originalModel) + + // 解析请求以获取 image_size(用于图片计费) + imageSize := s.extractImageSize(body) + + switch action { + case "generateContent", "streamGenerateContent": + // ok + case "countTokens": + // 直接返回空值,不透传上游 + c.JSON(http.StatusOK, map[string]any{"totalTokens": 0}) + return &ForwardResult{ + RequestID: "", + Usage: ClaudeUsage{}, + Model: originalModel, + Stream: false, + Duration: time.Since(time.Now()), + FirstTokenMs: nil, + }, nil + default: + return nil, s.writeGoogleError(c, http.StatusNotFound, "Unsupported action: "+action) + } + + mappedModel := s.getMappedModel(account, originalModel) + + // 获取 access_token + if s.tokenProvider == nil { + return nil, errors.New("antigravity token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, fmt.Errorf("获取 access_token 失败: %w", err) + } + + // 获取 project_id(部分账户类型可能没有) + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + // 代理 URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // Antigravity 上游要求必须包含身份提示词,注入到请求中 + injectedBody, err := injectIdentityPatchToGeminiRequest(body) + if err != nil { + return nil, err + } + + // 包装请求 + wrappedBody, err := s.wrapV1InternalRequest(projectID, mappedModel, injectedBody) + if err != nil { + return nil, err + } + + // Antigravity 上游只支持流式请求,统一使用 streamGenerateContent + // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后返回 + upstreamAction := "streamGenerateContent" + + // URL fallback 循环 + availableURLs := antigravity.DefaultURLAvailability.GetAvailableURLs() + if len(availableURLs) == 0 { + availableURLs = antigravity.BaseURLs // 所有 URL 都不可用时,重试所有 + } + + // 重试循环 + var resp *http.Response +urlFallbackLoop: + for urlIdx, baseURL := range availableURLs { + for attempt := 1; attempt <= antigravityMaxRetries; attempt++ { + // 检查 context 是否已取消(客户端断开连接) + select { + case <-ctx.Done(): + log.Printf("%s status=context_canceled error=%v", prefix, ctx.Err()) + return nil, ctx.Err() + default: + } + + upstreamReq, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, upstreamAction, accessToken, wrappedBody) + if err != nil { + return nil, err + } + + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + // 检查是否应触发 URL 降级 + if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (connection error): %s -> %s", prefix, baseURL, availableURLs[urlIdx+1]) + continue urlFallbackLoop + } + if attempt < antigravityMaxRetries { + log.Printf("%s status=request_failed retry=%d/%d error=%v", prefix, attempt, antigravityMaxRetries, err) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", prefix) + return nil, ctx.Err() + } + continue + } + log.Printf("%s status=request_failed retries_exhausted error=%v", prefix, err) + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries") + } + + // 检查是否应触发 URL 降级(仅 429) + if resp.StatusCode == http.StatusTooManyRequests && urlIdx < len(availableURLs)-1 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) + log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) + continue urlFallbackLoop + } + + if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + + if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + log.Printf("%s status=%d retry=%d/%d", prefix, resp.StatusCode, attempt, antigravityMaxRetries) + if !sleepAntigravityBackoffWithContext(ctx, attempt) { + log.Printf("%s status=context_canceled_during_backoff", prefix) + return nil, ctx.Err() + } + continue + } + // 所有重试都失败,标记限流状态 + if resp.StatusCode == 429 { + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + } + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break urlFallbackLoop + } + + break urlFallbackLoop + } + } + defer func() { + if resp != nil && resp.Body != nil { + _ = resp.Body.Close() + } + }() + + // 处理错误响应 + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + // 尽早关闭原始响应体,释放连接;后续逻辑仍可能需要读取 body,因此用内存副本重新包装。 + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + // 模型兜底:模型不存在且开启 fallback 时,自动用 fallback 模型重试一次 + if s.settingService != nil && s.settingService.IsModelFallbackEnabled(ctx) && + isModelNotFoundError(resp.StatusCode, respBody) { + fallbackModel := s.settingService.GetFallbackModel(ctx, PlatformAntigravity) + if fallbackModel != "" && fallbackModel != mappedModel { + log.Printf("[Antigravity] Model not found (%s), retrying with fallback model %s (account: %s)", mappedModel, fallbackModel, account.Name) + + fallbackWrapped, err := s.wrapV1InternalRequest(projectID, fallbackModel, injectedBody) + if err == nil { + fallbackReq, err := antigravity.NewAPIRequest(ctx, upstreamAction, accessToken, fallbackWrapped) + if err == nil { + fallbackResp, err := s.httpUpstream.Do(fallbackReq, proxyURL, account.ID, account.Concurrency) + if err == nil && fallbackResp.StatusCode < 400 { + _ = resp.Body.Close() + resp = fallbackResp + } else if fallbackResp != nil { + _ = fallbackResp.Body.Close() + } + } + } + } + } + + // fallback 成功:继续按正常响应处理 + if resp.StatusCode < 400 { + goto handleSuccess + } + + s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) + + requestID := resp.Header.Get("x-request-id") + if requestID != "" { + c.Header("x-request-id", requestID) + } + + unwrapped, unwrapErr := s.unwrapV1InternalResponse(respBody) + unwrappedForOps := unwrapped + if unwrapErr != nil || len(unwrappedForOps) == 0 { + unwrappedForOps = respBody + } + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(unwrappedForOps)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(unwrappedForOps), maxBytes) + } + + // Always record upstream context for Ops error logs, even when we will failover. + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + + if s.shouldFailoverUpstreamError(resp.StatusCode) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/json" + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + c.Data(resp.StatusCode, contentType, unwrappedForOps) + return nil, fmt.Errorf("antigravity upstream error: %d", resp.StatusCode) + } + +handleSuccess: + requestID := resp.Header.Get("x-request-id") + if requestID != "" { + c.Header("x-request-id", requestID) + } + + var usage *ClaudeUsage + var firstTokenMs *int + + if stream { + // 客户端要求流式,直接透传 + streamRes, err := s.handleGeminiStreamingResponse(c, resp, startTime) + if err != nil { + log.Printf("%s status=stream_error error=%v", prefix, err) + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } else { + // 客户端要求非流式,收集流式响应后返回 + streamRes, err := s.handleGeminiStreamToNonStreaming(c, resp, startTime) + if err != nil { + log.Printf("%s status=stream_collect_error error=%v", prefix, err) + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } + + if usage == nil { + usage = &ClaudeUsage{} + } + + // 判断是否为图片生成模型 + imageCount := 0 + if isImageGenerationModel(mappedModel) { + // Gemini 图片生成 API 每次请求只生成一张图片(API 限制) + imageCount = 1 + } + + return &ForwardResult{ + RequestID: requestID, + Usage: *usage, + Model: originalModel, + Stream: stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + ImageCount: imageCount, + ImageSize: imageSize, + }, nil +} + +func (s *AntigravityGatewayService) shouldRetryUpstreamError(statusCode int) bool { + switch statusCode { + case 429, 500, 502, 503, 504, 529: + return true + default: + return false + } +} + +func (s *AntigravityGatewayService) shouldFailoverUpstreamError(statusCode int) bool { + switch statusCode { + case 401, 403, 429, 529: + return true + default: + return statusCode >= 500 + } +} + +// sleepAntigravityBackoffWithContext 带 context 取消检查的退避等待 +// 返回 true 表示正常完成等待,false 表示 context 已取消 +func sleepAntigravityBackoffWithContext(ctx context.Context, attempt int) bool { + delay := antigravityRetryBaseDelay * time.Duration(1< antigravityRetryMaxDelay { + delay = antigravityRetryMaxDelay + } + + // +/- 20% jitter + r := mathrand.New(mathrand.NewSource(time.Now().UnixNano())) + jitter := time.Duration(float64(delay) * 0.2 * (r.Float64()*2 - 1)) + sleepFor := delay + jitter + if sleepFor < 0 { + sleepFor = 0 + } + + select { + case <-ctx.Done(): + return false + case <-time.After(sleepFor): + return true + } +} + +func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { + // 429 使用 Gemini 格式解析(从 body 解析重置时间) + if statusCode == 429 { + resetAt := ParseGeminiRateLimitResetTime(body) + if resetAt == nil { + // 解析失败:Gemini 有重试时间用 5 分钟,Claude 没有用 1 分钟 + defaultDur := 1 * time.Minute + if bytes.Contains(body, []byte("Please retry in")) || bytes.Contains(body, []byte("retryDelay")) { + defaultDur = 5 * time.Minute + } + ra := time.Now().Add(defaultDur) + log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) + if quotaScope == "" { + return + } + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } + return + } + resetTime := time.Unix(*resetAt, 0) + log.Printf("%s status=429 rate_limited scope=%s reset_at=%v reset_in=%v", prefix, quotaScope, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) + if quotaScope == "" { + return + } + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, resetTime); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } + return + } + // 其他错误码继续使用 rateLimitService + if s.rateLimitService == nil { + return + } + shouldDisable := s.rateLimitService.HandleUpstreamError(ctx, account, statusCode, headers, body) + if shouldDisable { + log.Printf("%s status=%d marked_error", prefix, statusCode) + } +} + +type antigravityStreamResult struct { + usage *ClaudeUsage + firstTokenMs *int +} + +func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time) (*antigravityStreamResult, error) { + c.Status(resp.StatusCode) + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "text/event-stream; charset=utf-8" + } + c.Header("Content-Type", contentType) + + flusher, ok := c.Writer.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + // 使用 Scanner 并限制单行大小,避免 ReadString 无上限导致 OOM + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.settingService.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + usage := &ClaudeUsage{} + var firstTokenMs *int + + type scanEvent struct { + line string + err error + } + // 独立 goroutine 读取上游,避免读取阻塞影响超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + // 上游数据间隔超时保护(防止上游挂起长期占用连接) + streamInterval := time.Duration(0) + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.settingService.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + // 仅发送一次错误事件,避免多次写入导致协议混乱 + errorEventSent := false + sendErrorEvent := func(reason string) { + if errorEventSent { + return + } + errorEventSent = true + _, _ = fmt.Fprintf(c.Writer, "event: error\ndata: {\"error\":\"%s\"}\n\n", reason) + flusher.Flush() + } + + for { + select { + case ev, ok := <-events: + if !ok { + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil + } + if ev.err != nil { + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long (antigravity): max_size=%d error=%v", maxLineSize, ev.err) + sendErrorEvent("response_too_large") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, ev.err + } + sendErrorEvent("stream_read_error") + return nil, ev.err + } + + line := ev.line + trimmed := strings.TrimRight(line, "\r\n") + if strings.HasPrefix(trimmed, "data:") { + payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) + if payload == "" || payload == "[DONE]" { + if _, err := fmt.Fprintf(c.Writer, "%s\n", line); err != nil { + sendErrorEvent("write_failed") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + continue + } + + // 解包 v1internal 响应 + inner, parseErr := s.unwrapV1InternalResponse([]byte(payload)) + if parseErr == nil && inner != nil { + payload = string(inner) + } + + // 解析 usage + var parsed map[string]any + if json.Unmarshal(inner, &parsed) == nil { + if u := extractGeminiUsage(parsed); u != nil { + usage = u + } + } + + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + if _, err := fmt.Fprintf(c.Writer, "data: %s\n\n", payload); err != nil { + sendErrorEvent("write_failed") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + continue + } + + if _, err := fmt.Fprintf(c.Writer, "%s\n", line); err != nil { + sendErrorEvent("write_failed") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + log.Printf("Stream data interval timeout (antigravity)") + // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout + sendErrorEvent("stream_timeout") + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") + } + } +} + +// handleGeminiStreamToNonStreaming 读取上游流式响应,合并为非流式响应返回给客户端 +// Gemini 流式响应中每个 chunk 都包含累积的完整文本,只需保留最后一个有效响应 +func (s *AntigravityGatewayService) handleGeminiStreamToNonStreaming(c *gin.Context, resp *http.Response, startTime time.Time) (*antigravityStreamResult, error) { + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.settingService.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + usage := &ClaudeUsage{} + var firstTokenMs *int + var last map[string]any + var lastWithParts map[string]any + + type scanEvent struct { + line string + err error + } + + // 独立 goroutine 读取上游,避免读取阻塞影响超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + // 上游数据间隔超时保护(防止上游挂起长期占用连接) + streamInterval := time.Duration(0) + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.settingService.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + for { + select { + case ev, ok := <-events: + if !ok { + // 流结束,返回收集的响应 + goto returnResponse + } + if ev.err != nil { + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long (antigravity non-stream): max_size=%d error=%v", maxLineSize, ev.err) + } + return nil, ev.err + } + + line := ev.line + trimmed := strings.TrimRight(line, "\r\n") + + if !strings.HasPrefix(trimmed, "data:") { + continue + } + + payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) + if payload == "" || payload == "[DONE]" { + continue + } + + // 解包 v1internal 响应 + inner, parseErr := s.unwrapV1InternalResponse([]byte(payload)) + if parseErr != nil { + continue + } + + var parsed map[string]any + if err := json.Unmarshal(inner, &parsed); err != nil { + continue + } + + // 记录首 token 时间 + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + last = parsed + + // 提取 usage + if u := extractGeminiUsage(parsed); u != nil { + usage = u + } + + // 保留最后一个有 parts 的响应 + if parts := extractGeminiParts(parsed); len(parts) > 0 { + lastWithParts = parsed + } + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + log.Printf("Stream data interval timeout (antigravity non-stream)") + return nil, fmt.Errorf("stream data interval timeout") + } + } + +returnResponse: + // 选择最后一个有效响应 + finalResponse := pickGeminiCollectResult(last, lastWithParts) + + // 处理空响应情况 + if last == nil && lastWithParts == nil { + log.Printf("[antigravity-Forward] warning: empty stream response, no valid chunks received") + } + + respBody, err := json.Marshal(finalResponse) + if err != nil { + return nil, fmt.Errorf("failed to marshal response: %w", err) + } + c.Data(http.StatusOK, "application/json", respBody) + + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil +} + +func (s *AntigravityGatewayService) writeClaudeError(c *gin.Context, status int, errType, message string) error { + c.JSON(status, gin.H{ + "type": "error", + "error": gin.H{"type": errType, "message": message}, + }) + return fmt.Errorf("%s", message) +} + +func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, account *Account, upstreamStatus int, upstreamRequestID string, body []byte) error { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, upstreamStatus, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: upstreamStatus, + UpstreamRequestID: upstreamRequestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + // 记录上游错误详情便于排障(可选:由配置控制;不回显到客户端) + if logBody { + log.Printf("[antigravity-Forward] upstream_error status=%d body=%s", upstreamStatus, truncateForLog(body, maxBytes)) + } + + var statusCode int + var errType, errMsg string + + switch upstreamStatus { + case 400: + statusCode = http.StatusBadRequest + errType = "invalid_request_error" + errMsg = "Invalid request" + case 401: + statusCode = http.StatusBadGateway + errType = "authentication_error" + errMsg = "Upstream authentication failed" + case 403: + statusCode = http.StatusBadGateway + errType = "permission_error" + errMsg = "Upstream access forbidden" + case 429: + statusCode = http.StatusTooManyRequests + errType = "rate_limit_error" + errMsg = "Upstream rate limit exceeded" + case 529: + statusCode = http.StatusServiceUnavailable + errType = "overloaded_error" + errMsg = "Upstream service overloaded" + default: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream request failed" + } + + c.JSON(statusCode, gin.H{ + "type": "error", + "error": gin.H{"type": errType, "message": errMsg}, + }) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", upstreamStatus) + } + return fmt.Errorf("upstream error: %d message=%s", upstreamStatus, upstreamMsg) +} + +func (s *AntigravityGatewayService) writeGoogleError(c *gin.Context, status int, message string) error { + statusStr := "UNKNOWN" + switch status { + case 400: + statusStr = "INVALID_ARGUMENT" + case 404: + statusStr = "NOT_FOUND" + case 429: + statusStr = "RESOURCE_EXHAUSTED" + case 500: + statusStr = "INTERNAL" + case 502, 503: + statusStr = "UNAVAILABLE" + } + + c.JSON(status, gin.H{ + "error": gin.H{ + "code": status, + "message": message, + "status": statusStr, + }, + }) + return fmt.Errorf("%s", message) +} + +// handleClaudeStreamToNonStreaming 收集上游流式响应,转换为 Claude 非流式格式返回 +// 用于处理客户端非流式请求但上游只支持流式的情况 +func (s *AntigravityGatewayService) handleClaudeStreamToNonStreaming(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string) (*antigravityStreamResult, error) { + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.settingService.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + var firstTokenMs *int + var last map[string]any + var lastWithParts map[string]any + + type scanEvent struct { + line string + err error + } + + // 独立 goroutine 读取上游,避免读取阻塞影响超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + // 上游数据间隔超时保护(防止上游挂起长期占用连接) + streamInterval := time.Duration(0) + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.settingService.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + for { + select { + case ev, ok := <-events: + if !ok { + // 流结束,转换并返回响应 + goto returnResponse + } + if ev.err != nil { + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long (antigravity claude non-stream): max_size=%d error=%v", maxLineSize, ev.err) + } + return nil, ev.err + } + + line := ev.line + trimmed := strings.TrimRight(line, "\r\n") + + if !strings.HasPrefix(trimmed, "data:") { + continue + } + + payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) + if payload == "" || payload == "[DONE]" { + continue + } + + // 解包 v1internal 响应 + inner, parseErr := s.unwrapV1InternalResponse([]byte(payload)) + if parseErr != nil { + continue + } + + var parsed map[string]any + if err := json.Unmarshal(inner, &parsed); err != nil { + continue + } + + // 记录首 token 时间 + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + last = parsed + + // 保留最后一个有 parts 的响应 + if parts := extractGeminiParts(parsed); len(parts) > 0 { + lastWithParts = parsed + } + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + log.Printf("Stream data interval timeout (antigravity claude non-stream)") + return nil, fmt.Errorf("stream data interval timeout") + } + } + +returnResponse: + // 选择最后一个有效响应 + finalResponse := pickGeminiCollectResult(last, lastWithParts) + + // 处理空响应情况 + if last == nil && lastWithParts == nil { + log.Printf("[antigravity-Forward] warning: empty stream response, no valid chunks received") + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Empty response from upstream") + } + + // 序列化为 JSON(Gemini 格式) + geminiBody, err := json.Marshal(finalResponse) + if err != nil { + return nil, fmt.Errorf("failed to marshal gemini response: %w", err) + } + + // 转换 Gemini 响应为 Claude 格式 + claudeResp, agUsage, err := antigravity.TransformGeminiToClaude(geminiBody, originalModel) + if err != nil { + log.Printf("[antigravity-Forward] transform_error error=%v body=%s", err, string(geminiBody)) + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Failed to parse upstream response") + } + + c.Data(http.StatusOK, "application/json", claudeResp) + + // 转换为 service.ClaudeUsage + usage := &ClaudeUsage{ + InputTokens: agUsage.InputTokens, + OutputTokens: agUsage.OutputTokens, + CacheCreationInputTokens: agUsage.CacheCreationInputTokens, + CacheReadInputTokens: agUsage.CacheReadInputTokens, + } + + return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil +} + +// handleClaudeStreamingResponse 处理 Claude 流式响应(Gemini SSE → Claude SSE 转换) +func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string) (*antigravityStreamResult, error) { + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + c.Status(http.StatusOK) + + flusher, ok := c.Writer.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + processor := antigravity.NewStreamingProcessor(originalModel) + var firstTokenMs *int + // 使用 Scanner 并限制单行大小,避免 ReadString 无上限导致 OOM + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.settingService.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + // 辅助函数:转换 antigravity.ClaudeUsage 到 service.ClaudeUsage + convertUsage := func(agUsage *antigravity.ClaudeUsage) *ClaudeUsage { + if agUsage == nil { + return &ClaudeUsage{} + } + return &ClaudeUsage{ + InputTokens: agUsage.InputTokens, + OutputTokens: agUsage.OutputTokens, + CacheCreationInputTokens: agUsage.CacheCreationInputTokens, + CacheReadInputTokens: agUsage.CacheReadInputTokens, + } + } + + type scanEvent struct { + line string + err error + } + // 独立 goroutine 读取上游,避免读取阻塞影响超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + streamInterval := time.Duration(0) + if s.settingService.cfg != nil && s.settingService.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.settingService.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + // 仅发送一次错误事件,避免多次写入导致协议混乱 + errorEventSent := false + sendErrorEvent := func(reason string) { + if errorEventSent { + return + } + errorEventSent = true + _, _ = fmt.Fprintf(c.Writer, "event: error\ndata: {\"error\":\"%s\"}\n\n", reason) + flusher.Flush() + } + + for { + select { + case ev, ok := <-events: + if !ok { + // 发送结束事件 + finalEvents, agUsage := processor.Finish() + if len(finalEvents) > 0 { + _, _ = c.Writer.Write(finalEvents) + flusher.Flush() + } + return &antigravityStreamResult{usage: convertUsage(agUsage), firstTokenMs: firstTokenMs}, nil + } + if ev.err != nil { + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long (antigravity): max_size=%d error=%v", maxLineSize, ev.err) + sendErrorEvent("response_too_large") + return &antigravityStreamResult{usage: convertUsage(nil), firstTokenMs: firstTokenMs}, ev.err + } + sendErrorEvent("stream_read_error") + return nil, fmt.Errorf("stream read error: %w", ev.err) + } + + line := ev.line + // 处理 SSE 行,转换为 Claude 格式 + claudeEvents := processor.ProcessLine(strings.TrimRight(line, "\r\n")) + + if len(claudeEvents) > 0 { + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + if _, writeErr := c.Writer.Write(claudeEvents); writeErr != nil { + finalEvents, agUsage := processor.Finish() + if len(finalEvents) > 0 { + _, _ = c.Writer.Write(finalEvents) + } + sendErrorEvent("write_failed") + return &antigravityStreamResult{usage: convertUsage(agUsage), firstTokenMs: firstTokenMs}, writeErr + } + flusher.Flush() + } + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + log.Printf("Stream data interval timeout (antigravity)") + // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout + sendErrorEvent("stream_timeout") + return &antigravityStreamResult{usage: convertUsage(nil), firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") + } + } + +} + +// extractImageSize 从 Gemini 请求中提取 image_size 参数 +func (s *AntigravityGatewayService) extractImageSize(body []byte) string { + var req antigravity.GeminiRequest + if err := json.Unmarshal(body, &req); err != nil { + return "2K" // 默认 2K + } + + if req.GenerationConfig != nil && req.GenerationConfig.ImageConfig != nil { + size := strings.ToUpper(strings.TrimSpace(req.GenerationConfig.ImageConfig.ImageSize)) + if size == "1K" || size == "2K" || size == "4K" { + return size + } + } + + return "2K" // 默认 2K +} + +// isImageGenerationModel 判断模型是否为图片生成模型 +// 支持的模型:gemini-3-pro-image, gemini-3-pro-image-preview, gemini-2.5-flash-image 等 +func isImageGenerationModel(model string) bool { + modelLower := strings.ToLower(model) + // 移除 models/ 前缀 + modelLower = strings.TrimPrefix(modelLower, "models/") + + // 精确匹配或前缀匹配 + return modelLower == "gemini-3-pro-image" || + modelLower == "gemini-3-pro-image-preview" || + strings.HasPrefix(modelLower, "gemini-3-pro-image-") || + modelLower == "gemini-2.5-flash-image" || + modelLower == "gemini-2.5-flash-image-preview" || + strings.HasPrefix(modelLower, "gemini-2.5-flash-image-") +} diff --git a/backend/internal/service/antigravity_gateway_service_test.go b/backend/internal/service/antigravity_gateway_service_test.go new file mode 100644 index 00000000..05ad9bbd --- /dev/null +++ b/backend/internal/service/antigravity_gateway_service_test.go @@ -0,0 +1,83 @@ +package service + +import ( + "encoding/json" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" + "github.com/stretchr/testify/require" +) + +func TestStripSignatureSensitiveBlocksFromClaudeRequest(t *testing.T) { + req := &antigravity.ClaudeRequest{ + Model: "claude-sonnet-4-5", + Thinking: &antigravity.ThinkingConfig{ + Type: "enabled", + BudgetTokens: 1024, + }, + Messages: []antigravity.ClaudeMessage{ + { + Role: "assistant", + Content: json.RawMessage(`[ + {"type":"thinking","thinking":"secret plan","signature":""}, + {"type":"tool_use","id":"t1","name":"Bash","input":{"command":"ls"}} + ]`), + }, + { + Role: "user", + Content: json.RawMessage(`[ + {"type":"tool_result","tool_use_id":"t1","content":"ok","is_error":false}, + {"type":"redacted_thinking","data":"..."} + ]`), + }, + }, + } + + changed, err := stripSignatureSensitiveBlocksFromClaudeRequest(req) + require.NoError(t, err) + require.True(t, changed) + require.Nil(t, req.Thinking) + + require.Len(t, req.Messages, 2) + + var blocks0 []map[string]any + require.NoError(t, json.Unmarshal(req.Messages[0].Content, &blocks0)) + require.Len(t, blocks0, 2) + require.Equal(t, "text", blocks0[0]["type"]) + require.Equal(t, "secret plan", blocks0[0]["text"]) + require.Equal(t, "text", blocks0[1]["type"]) + + var blocks1 []map[string]any + require.NoError(t, json.Unmarshal(req.Messages[1].Content, &blocks1)) + require.Len(t, blocks1, 1) + require.Equal(t, "text", blocks1[0]["type"]) + require.NotEmpty(t, blocks1[0]["text"]) +} + +func TestStripThinkingFromClaudeRequest_DoesNotDowngradeTools(t *testing.T) { + req := &antigravity.ClaudeRequest{ + Model: "claude-sonnet-4-5", + Thinking: &antigravity.ThinkingConfig{ + Type: "enabled", + BudgetTokens: 1024, + }, + Messages: []antigravity.ClaudeMessage{ + { + Role: "assistant", + Content: json.RawMessage(`[{"type":"thinking","thinking":"secret plan"},{"type":"tool_use","id":"t1","name":"Bash","input":{"command":"ls"}}]`), + }, + }, + } + + changed, err := stripThinkingFromClaudeRequest(req) + require.NoError(t, err) + require.True(t, changed) + require.Nil(t, req.Thinking) + + var blocks []map[string]any + require.NoError(t, json.Unmarshal(req.Messages[0].Content, &blocks)) + require.Len(t, blocks, 2) + require.Equal(t, "text", blocks[0]["type"]) + require.Equal(t, "secret plan", blocks[0]["text"]) + require.Equal(t, "tool_use", blocks[1]["type"]) +} diff --git a/backend/internal/service/antigravity_image_test.go b/backend/internal/service/antigravity_image_test.go new file mode 100644 index 00000000..7fd2f843 --- /dev/null +++ b/backend/internal/service/antigravity_image_test.go @@ -0,0 +1,123 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestIsImageGenerationModel_GeminiProImage 测试 gemini-3-pro-image 识别 +func TestIsImageGenerationModel_GeminiProImage(t *testing.T) { + require.True(t, isImageGenerationModel("gemini-3-pro-image")) + require.True(t, isImageGenerationModel("gemini-3-pro-image-preview")) + require.True(t, isImageGenerationModel("models/gemini-3-pro-image")) +} + +// TestIsImageGenerationModel_GeminiFlashImage 测试 gemini-2.5-flash-image 识别 +func TestIsImageGenerationModel_GeminiFlashImage(t *testing.T) { + require.True(t, isImageGenerationModel("gemini-2.5-flash-image")) + require.True(t, isImageGenerationModel("gemini-2.5-flash-image-preview")) +} + +// TestIsImageGenerationModel_RegularModel 测试普通模型不被识别为图片模型 +func TestIsImageGenerationModel_RegularModel(t *testing.T) { + require.False(t, isImageGenerationModel("claude-3-opus")) + require.False(t, isImageGenerationModel("claude-sonnet-4-20250514")) + require.False(t, isImageGenerationModel("gpt-4o")) + require.False(t, isImageGenerationModel("gemini-2.5-pro")) // 非图片模型 + require.False(t, isImageGenerationModel("gemini-2.5-flash")) + // 验证不会误匹配包含关键词的自定义模型名 + require.False(t, isImageGenerationModel("my-gemini-3-pro-image-test")) + require.False(t, isImageGenerationModel("custom-gemini-2.5-flash-image-wrapper")) +} + +// TestIsImageGenerationModel_CaseInsensitive 测试大小写不敏感 +func TestIsImageGenerationModel_CaseInsensitive(t *testing.T) { + require.True(t, isImageGenerationModel("GEMINI-3-PRO-IMAGE")) + require.True(t, isImageGenerationModel("Gemini-3-Pro-Image")) + require.True(t, isImageGenerationModel("GEMINI-2.5-FLASH-IMAGE")) +} + +// TestExtractImageSize_ValidSizes 测试有效尺寸解析 +func TestExtractImageSize_ValidSizes(t *testing.T) { + svc := &AntigravityGatewayService{} + + // 1K + body := []byte(`{"generationConfig":{"imageConfig":{"imageSize":"1K"}}}`) + require.Equal(t, "1K", svc.extractImageSize(body)) + + // 2K + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":"2K"}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + // 4K + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":"4K"}}}`) + require.Equal(t, "4K", svc.extractImageSize(body)) +} + +// TestExtractImageSize_CaseInsensitive 测试大小写不敏感 +func TestExtractImageSize_CaseInsensitive(t *testing.T) { + svc := &AntigravityGatewayService{} + + body := []byte(`{"generationConfig":{"imageConfig":{"imageSize":"1k"}}}`) + require.Equal(t, "1K", svc.extractImageSize(body)) + + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":"4k"}}}`) + require.Equal(t, "4K", svc.extractImageSize(body)) +} + +// TestExtractImageSize_Default 测试无 imageConfig 返回默认 2K +func TestExtractImageSize_Default(t *testing.T) { + svc := &AntigravityGatewayService{} + + // 无 generationConfig + body := []byte(`{"contents":[]}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + // 有 generationConfig 但无 imageConfig + body = []byte(`{"generationConfig":{"temperature":0.7}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + // 有 imageConfig 但无 imageSize + body = []byte(`{"generationConfig":{"imageConfig":{}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) +} + +// TestExtractImageSize_InvalidJSON 测试非法 JSON 返回默认 2K +func TestExtractImageSize_InvalidJSON(t *testing.T) { + svc := &AntigravityGatewayService{} + + body := []byte(`not valid json`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + body = []byte(`{"broken":`) + require.Equal(t, "2K", svc.extractImageSize(body)) +} + +// TestExtractImageSize_EmptySize 测试空 imageSize 返回默认 2K +func TestExtractImageSize_EmptySize(t *testing.T) { + svc := &AntigravityGatewayService{} + + body := []byte(`{"generationConfig":{"imageConfig":{"imageSize":""}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + // 空格 + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":" "}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) +} + +// TestExtractImageSize_InvalidSize 测试无效尺寸返回默认 2K +func TestExtractImageSize_InvalidSize(t *testing.T) { + svc := &AntigravityGatewayService{} + + body := []byte(`{"generationConfig":{"imageConfig":{"imageSize":"3K"}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":"8K"}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) + + body = []byte(`{"generationConfig":{"imageConfig":{"imageSize":"invalid"}}}`) + require.Equal(t, "2K", svc.extractImageSize(body)) +} diff --git a/backend/internal/service/antigravity_model_mapping_test.go b/backend/internal/service/antigravity_model_mapping_test.go new file mode 100644 index 00000000..39000e4f --- /dev/null +++ b/backend/internal/service/antigravity_model_mapping_test.go @@ -0,0 +1,269 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsAntigravityModelSupported(t *testing.T) { + tests := []struct { + name string + model string + expected bool + }{ + // 直接支持的模型 + {"直接支持 - claude-sonnet-4-5", "claude-sonnet-4-5", true}, + {"直接支持 - claude-opus-4-5-thinking", "claude-opus-4-5-thinking", true}, + {"直接支持 - claude-sonnet-4-5-thinking", "claude-sonnet-4-5-thinking", true}, + {"直接支持 - gemini-2.5-flash", "gemini-2.5-flash", true}, + {"直接支持 - gemini-2.5-flash-lite", "gemini-2.5-flash-lite", true}, + {"直接支持 - gemini-3-pro-high", "gemini-3-pro-high", true}, + + // 可映射的模型 + {"可映射 - claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20241022", true}, + {"可映射 - claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20240620", true}, + {"可映射 - claude-opus-4", "claude-opus-4", true}, + {"可映射 - claude-haiku-4", "claude-haiku-4", true}, + {"可映射 - claude-3-haiku-20240307", "claude-3-haiku-20240307", true}, + + // Gemini 前缀透传 + {"Gemini前缀 - gemini-1.5-pro", "gemini-1.5-pro", true}, + {"Gemini前缀 - gemini-unknown-model", "gemini-unknown-model", true}, + {"Gemini前缀 - gemini-future-version", "gemini-future-version", true}, + + // Claude 前缀兜底 + {"Claude前缀 - claude-unknown-model", "claude-unknown-model", true}, + {"Claude前缀 - claude-3-opus-20240229", "claude-3-opus-20240229", true}, + {"Claude前缀 - claude-future-version", "claude-future-version", true}, + + // 不支持的模型 + {"不支持 - gpt-4", "gpt-4", false}, + {"不支持 - gpt-4o", "gpt-4o", false}, + {"不支持 - llama-3", "llama-3", false}, + {"不支持 - mistral-7b", "mistral-7b", false}, + {"不支持 - 空字符串", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsAntigravityModelSupported(tt.model) + require.Equal(t, tt.expected, got, "model: %s", tt.model) + }) + } +} + +func TestAntigravityGatewayService_GetMappedModel(t *testing.T) { + svc := &AntigravityGatewayService{} + + tests := []struct { + name string + requestedModel string + accountMapping map[string]string + expected string + }{ + // 1. 账户级映射优先(注意:model_mapping 在 credentials 中存储为 map[string]any) + { + name: "账户映射优先", + requestedModel: "claude-3-5-sonnet-20241022", + accountMapping: map[string]string{"claude-3-5-sonnet-20241022": "custom-model"}, + expected: "custom-model", + }, + { + name: "账户映射覆盖系统映射", + requestedModel: "claude-opus-4", + accountMapping: map[string]string{"claude-opus-4": "my-opus"}, + expected: "my-opus", + }, + + // 2. 系统默认映射 + { + name: "系统映射 - claude-3-5-sonnet-20241022", + requestedModel: "claude-3-5-sonnet-20241022", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-3-5-sonnet-20240620", + requestedModel: "claude-3-5-sonnet-20240620", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-opus-4", + requestedModel: "claude-opus-4", + accountMapping: nil, + expected: "claude-opus-4-5-thinking", + }, + { + name: "系统映射 - claude-opus-4-5-20251101", + requestedModel: "claude-opus-4-5-20251101", + accountMapping: nil, + expected: "claude-opus-4-5-thinking", + }, + { + name: "系统映射 - claude-haiku-4 → claude-sonnet-4-5", + requestedModel: "claude-haiku-4", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-haiku-4-5 → claude-sonnet-4-5", + requestedModel: "claude-haiku-4-5", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-3-haiku-20240307 → claude-sonnet-4-5", + requestedModel: "claude-3-haiku-20240307", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-haiku-4-5-20251001 → claude-sonnet-4-5", + requestedModel: "claude-haiku-4-5-20251001", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "系统映射 - claude-sonnet-4-5-20250929", + requestedModel: "claude-sonnet-4-5-20250929", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + + // 3. Gemini 透传 + { + name: "Gemini透传 - gemini-2.5-flash", + requestedModel: "gemini-2.5-flash", + accountMapping: nil, + expected: "gemini-2.5-flash", + }, + { + name: "Gemini透传 - gemini-1.5-pro", + requestedModel: "gemini-1.5-pro", + accountMapping: nil, + expected: "gemini-1.5-pro", + }, + { + name: "Gemini透传 - gemini-future-model", + requestedModel: "gemini-future-model", + accountMapping: nil, + expected: "gemini-future-model", + }, + + // 4. 直接支持的模型 + { + name: "直接支持 - claude-sonnet-4-5", + requestedModel: "claude-sonnet-4-5", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "直接支持 - claude-opus-4-5-thinking", + requestedModel: "claude-opus-4-5-thinking", + accountMapping: nil, + expected: "claude-opus-4-5-thinking", + }, + { + name: "直接支持 - claude-sonnet-4-5-thinking", + requestedModel: "claude-sonnet-4-5-thinking", + accountMapping: nil, + expected: "claude-sonnet-4-5-thinking", + }, + + // 5. 默认值 fallback(未知 claude 模型) + { + name: "默认值 - claude-unknown", + requestedModel: "claude-unknown", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + { + name: "默认值 - claude-3-opus-20240229", + requestedModel: "claude-3-opus-20240229", + accountMapping: nil, + expected: "claude-sonnet-4-5", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{ + Platform: PlatformAntigravity, + } + if tt.accountMapping != nil { + // GetModelMapping 期望 model_mapping 是 map[string]any 格式 + mappingAny := make(map[string]any) + for k, v := range tt.accountMapping { + mappingAny[k] = v + } + account.Credentials = map[string]any{ + "model_mapping": mappingAny, + } + } + + got := svc.getMappedModel(account, tt.requestedModel) + require.Equal(t, tt.expected, got, "model: %s", tt.requestedModel) + }) + } +} + +func TestAntigravityGatewayService_GetMappedModel_EdgeCases(t *testing.T) { + svc := &AntigravityGatewayService{} + + tests := []struct { + name string + requestedModel string + expected string + }{ + // 空字符串回退到默认值 + {"空字符串", "", "claude-sonnet-4-5"}, + + // 非 claude/gemini 前缀回退到默认值 + {"非claude/gemini前缀 - gpt", "gpt-4", "claude-sonnet-4-5"}, + {"非claude/gemini前缀 - llama", "llama-3", "claude-sonnet-4-5"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{Platform: PlatformAntigravity} + got := svc.getMappedModel(account, tt.requestedModel) + require.Equal(t, tt.expected, got) + }) + } +} + +func TestAntigravityGatewayService_IsModelSupported(t *testing.T) { + svc := &AntigravityGatewayService{} + + tests := []struct { + name string + model string + expected bool + }{ + // 直接支持 + {"直接支持 - claude-sonnet-4-5", "claude-sonnet-4-5", true}, + {"直接支持 - gemini-3-flash", "gemini-3-flash", true}, + + // 可映射 + {"可映射 - claude-opus-4", "claude-opus-4", true}, + + // 前缀透传 + {"Gemini前缀", "gemini-unknown", true}, + {"Claude前缀", "claude-unknown", true}, + + // 不支持 + {"不支持 - gpt-4", "gpt-4", false}, + {"不支持 - 空字符串", "", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.IsModelSupported(tt.model) + require.Equal(t, tt.expected, got) + }) + } +} diff --git a/backend/internal/service/antigravity_oauth_service.go b/backend/internal/service/antigravity_oauth_service.go new file mode 100644 index 00000000..ecf0a553 --- /dev/null +++ b/backend/internal/service/antigravity_oauth_service.go @@ -0,0 +1,276 @@ +package service + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" +) + +type AntigravityOAuthService struct { + sessionStore *antigravity.SessionStore + proxyRepo ProxyRepository +} + +func NewAntigravityOAuthService(proxyRepo ProxyRepository) *AntigravityOAuthService { + return &AntigravityOAuthService{ + sessionStore: antigravity.NewSessionStore(), + proxyRepo: proxyRepo, + } +} + +// AntigravityAuthURLResult is the result of generating an authorization URL +type AntigravityAuthURLResult struct { + AuthURL string `json:"auth_url"` + SessionID string `json:"session_id"` + State string `json:"state"` +} + +// GenerateAuthURL 生成 Google OAuth 授权链接 +func (s *AntigravityOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64) (*AntigravityAuthURLResult, error) { + state, err := antigravity.GenerateState() + if err != nil { + return nil, fmt.Errorf("生成 state 失败: %w", err) + } + + codeVerifier, err := antigravity.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("生成 code_verifier 失败: %w", err) + } + + sessionID, err := antigravity.GenerateSessionID() + if err != nil { + return nil, fmt.Errorf("生成 session_id 失败: %w", err) + } + + var proxyURL string + if proxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + session := &antigravity.OAuthSession{ + State: state, + CodeVerifier: codeVerifier, + ProxyURL: proxyURL, + CreatedAt: time.Now(), + } + s.sessionStore.Set(sessionID, session) + + codeChallenge := antigravity.GenerateCodeChallenge(codeVerifier) + authURL := antigravity.BuildAuthorizationURL(state, codeChallenge) + + return &AntigravityAuthURLResult{ + AuthURL: authURL, + SessionID: sessionID, + State: state, + }, nil +} + +// AntigravityExchangeCodeInput 交换 code 的输入 +type AntigravityExchangeCodeInput struct { + SessionID string + State string + Code string + ProxyID *int64 +} + +// AntigravityTokenInfo token 信息 +type AntigravityTokenInfo struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + TokenType string `json:"token_type"` + Email string `json:"email,omitempty"` + ProjectID string `json:"project_id,omitempty"` +} + +// ExchangeCode 用 authorization code 交换 token +func (s *AntigravityOAuthService) ExchangeCode(ctx context.Context, input *AntigravityExchangeCodeInput) (*AntigravityTokenInfo, error) { + session, ok := s.sessionStore.Get(input.SessionID) + if !ok { + return nil, fmt.Errorf("session 不存在或已过期") + } + + if strings.TrimSpace(input.State) == "" || input.State != session.State { + return nil, fmt.Errorf("state 无效") + } + + // 确定代理 URL + proxyURL := session.ProxyURL + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + client := antigravity.NewClient(proxyURL) + + // 交换 token + tokenResp, err := client.ExchangeCode(ctx, input.Code, session.CodeVerifier) + if err != nil { + return nil, fmt.Errorf("token 交换失败: %w", err) + } + + // 删除 session + s.sessionStore.Delete(input.SessionID) + + // 计算过期时间(减去 5 分钟安全窗口) + expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - 300 + + result := &AntigravityTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + TokenType: tokenResp.TokenType, + } + + // 获取用户信息 + userInfo, err := client.GetUserInfo(ctx, tokenResp.AccessToken) + if err != nil { + fmt.Printf("[AntigravityOAuth] 警告: 获取用户信息失败: %v\n", err) + } else { + result.Email = userInfo.Email + } + + // 获取 project_id(部分账户类型可能没有) + loadResp, _, err := client.LoadCodeAssist(ctx, tokenResp.AccessToken) + if err != nil { + fmt.Printf("[AntigravityOAuth] 警告: 获取 project_id 失败: %v\n", err) + } else if loadResp != nil && loadResp.CloudAICompanionProject != "" { + result.ProjectID = loadResp.CloudAICompanionProject + } + + // 兜底:随机生成 project_id + if result.ProjectID == "" { + result.ProjectID = antigravity.GenerateMockProjectID() + fmt.Printf("[AntigravityOAuth] 使用随机生成的 project_id: %s\n", result.ProjectID) + } + + return result, nil +} + +// RefreshToken 刷新 token +func (s *AntigravityOAuthService) RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*AntigravityTokenInfo, error) { + var lastErr error + + for attempt := 0; attempt <= 3; attempt++ { + if attempt > 0 { + backoff := time.Duration(1< 30*time.Second { + backoff = 30 * time.Second + } + time.Sleep(backoff) + } + + client := antigravity.NewClient(proxyURL) + tokenResp, err := client.RefreshToken(ctx, refreshToken) + if err == nil { + now := time.Now() + expiresAt := now.Unix() + tokenResp.ExpiresIn - 300 + fmt.Printf("[AntigravityOAuth] Token refreshed: expires_in=%d, expires_at=%d (%s)\n", + tokenResp.ExpiresIn, expiresAt, time.Unix(expiresAt, 0).Format("2006-01-02 15:04:05")) + return &AntigravityTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + TokenType: tokenResp.TokenType, + }, nil + } + + if isNonRetryableAntigravityOAuthError(err) { + return nil, err + } + lastErr = err + } + + return nil, fmt.Errorf("token 刷新失败 (重试后): %w", lastErr) +} + +func isNonRetryableAntigravityOAuthError(err error) bool { + msg := err.Error() + nonRetryable := []string{ + "invalid_grant", + "invalid_client", + "unauthorized_client", + "access_denied", + } + for _, needle := range nonRetryable { + if strings.Contains(msg, needle) { + return true + } + } + return false +} + +// RefreshAccountToken 刷新账户的 token +func (s *AntigravityOAuthService) RefreshAccountToken(ctx context.Context, account *Account) (*AntigravityTokenInfo, error) { + if account.Platform != PlatformAntigravity || account.Type != AccountTypeOAuth { + return nil, fmt.Errorf("非 Antigravity OAuth 账户") + } + + refreshToken := account.GetCredential("refresh_token") + if strings.TrimSpace(refreshToken) == "" { + return nil, fmt.Errorf("无可用的 refresh_token") + } + + var proxyURL string + if account.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *account.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + tokenInfo, err := s.RefreshToken(ctx, refreshToken, proxyURL) + if err != nil { + return nil, err + } + + // 保留原有的 project_id 和 email + existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) + if existingProjectID != "" { + tokenInfo.ProjectID = existingProjectID + } + existingEmail := strings.TrimSpace(account.GetCredential("email")) + if existingEmail != "" { + tokenInfo.Email = existingEmail + } + + return tokenInfo, nil +} + +// BuildAccountCredentials 构建账户凭证 +func (s *AntigravityOAuthService) BuildAccountCredentials(tokenInfo *AntigravityTokenInfo) map[string]any { + creds := map[string]any{ + "access_token": tokenInfo.AccessToken, + "expires_at": strconv.FormatInt(tokenInfo.ExpiresAt, 10), + } + if tokenInfo.RefreshToken != "" { + creds["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.TokenType != "" { + creds["token_type"] = tokenInfo.TokenType + } + if tokenInfo.Email != "" { + creds["email"] = tokenInfo.Email + } + if tokenInfo.ProjectID != "" { + creds["project_id"] = tokenInfo.ProjectID + } + return creds +} + +// Stop 停止服务 +func (s *AntigravityOAuthService) Stop() { + s.sessionStore.Stop() +} diff --git a/backend/internal/service/antigravity_quota_fetcher.go b/backend/internal/service/antigravity_quota_fetcher.go new file mode 100644 index 00000000..c9024e33 --- /dev/null +++ b/backend/internal/service/antigravity_quota_fetcher.go @@ -0,0 +1,111 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" +) + +// AntigravityQuotaFetcher 从 Antigravity API 获取额度 +type AntigravityQuotaFetcher struct { + proxyRepo ProxyRepository +} + +// NewAntigravityQuotaFetcher 创建 AntigravityQuotaFetcher +func NewAntigravityQuotaFetcher(proxyRepo ProxyRepository) *AntigravityQuotaFetcher { + return &AntigravityQuotaFetcher{proxyRepo: proxyRepo} +} + +// CanFetch 检查是否可以获取此账户的额度 +func (f *AntigravityQuotaFetcher) CanFetch(account *Account) bool { + if account.Platform != PlatformAntigravity { + return false + } + accessToken := account.GetCredential("access_token") + return accessToken != "" +} + +// FetchQuota 获取 Antigravity 账户额度信息 +func (f *AntigravityQuotaFetcher) FetchQuota(ctx context.Context, account *Account, proxyURL string) (*QuotaResult, error) { + accessToken := account.GetCredential("access_token") + projectID := account.GetCredential("project_id") + + // 如果没有 project_id,生成一个随机的 + if projectID == "" { + projectID = antigravity.GenerateMockProjectID() + } + + client := antigravity.NewClient(proxyURL) + + // 调用 API 获取配额 + modelsResp, modelsRaw, err := client.FetchAvailableModels(ctx, accessToken, projectID) + if err != nil { + return nil, err + } + + // 转换为 UsageInfo + usageInfo := f.buildUsageInfo(modelsResp) + + return &QuotaResult{ + UsageInfo: usageInfo, + Raw: modelsRaw, + }, nil +} + +// buildUsageInfo 将 API 响应转换为 UsageInfo +func (f *AntigravityQuotaFetcher) buildUsageInfo(modelsResp *antigravity.FetchAvailableModelsResponse) *UsageInfo { + now := time.Now() + info := &UsageInfo{ + UpdatedAt: &now, + AntigravityQuota: make(map[string]*AntigravityModelQuota), + } + + // 遍历所有模型,填充 AntigravityQuota + for modelName, modelInfo := range modelsResp.Models { + if modelInfo.QuotaInfo == nil { + continue + } + + // remainingFraction 是剩余比例 (0.0-1.0),转换为使用率百分比 + utilization := int((1.0 - modelInfo.QuotaInfo.RemainingFraction) * 100) + + info.AntigravityQuota[modelName] = &AntigravityModelQuota{ + Utilization: utilization, + ResetTime: modelInfo.QuotaInfo.ResetTime, + } + } + + // 同时设置 FiveHour 用于兼容展示(取主要模型) + priorityModels := []string{"claude-sonnet-4-20250514", "claude-sonnet-4", "gemini-2.5-pro"} + for _, modelName := range priorityModels { + if modelInfo, ok := modelsResp.Models[modelName]; ok && modelInfo.QuotaInfo != nil { + utilization := (1.0 - modelInfo.QuotaInfo.RemainingFraction) * 100 + progress := &UsageProgress{ + Utilization: utilization, + } + if modelInfo.QuotaInfo.ResetTime != "" { + if resetTime, err := time.Parse(time.RFC3339, modelInfo.QuotaInfo.ResetTime); err == nil { + progress.ResetsAt = &resetTime + progress.RemainingSeconds = int(time.Until(resetTime).Seconds()) + } + } + info.FiveHour = progress + break + } + } + + return info +} + +// GetProxyURL 获取账户的代理 URL +func (f *AntigravityQuotaFetcher) GetProxyURL(ctx context.Context, account *Account) string { + if account.ProxyID == nil || f.proxyRepo == nil { + return "" + } + proxy, err := f.proxyRepo.GetByID(ctx, *account.ProxyID) + if err != nil || proxy == nil { + return "" + } + return proxy.URL() +} diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go new file mode 100644 index 00000000..e9f7184b --- /dev/null +++ b/backend/internal/service/antigravity_quota_scope.go @@ -0,0 +1,88 @@ +package service + +import ( + "strings" + "time" +) + +const antigravityQuotaScopesKey = "antigravity_quota_scopes" + +// AntigravityQuotaScope 表示 Antigravity 的配额域 +type AntigravityQuotaScope string + +const ( + AntigravityQuotaScopeClaude AntigravityQuotaScope = "claude" + AntigravityQuotaScopeGeminiText AntigravityQuotaScope = "gemini_text" + AntigravityQuotaScopeGeminiImage AntigravityQuotaScope = "gemini_image" +) + +// resolveAntigravityQuotaScope 根据模型名称解析配额域 +func resolveAntigravityQuotaScope(requestedModel string) (AntigravityQuotaScope, bool) { + model := normalizeAntigravityModelName(requestedModel) + if model == "" { + return "", false + } + switch { + case strings.HasPrefix(model, "claude-"): + return AntigravityQuotaScopeClaude, true + case strings.HasPrefix(model, "gemini-"): + if isImageGenerationModel(model) { + return AntigravityQuotaScopeGeminiImage, true + } + return AntigravityQuotaScopeGeminiText, true + default: + return "", false + } +} + +func normalizeAntigravityModelName(model string) string { + normalized := strings.ToLower(strings.TrimSpace(model)) + normalized = strings.TrimPrefix(normalized, "models/") + return normalized +} + +// IsSchedulableForModel 结合 Antigravity 配额域限流判断是否可调度 +func (a *Account) IsSchedulableForModel(requestedModel string) bool { + if a == nil { + return false + } + if !a.IsSchedulable() { + return false + } + if a.Platform != PlatformAntigravity { + return true + } + scope, ok := resolveAntigravityQuotaScope(requestedModel) + if !ok { + return true + } + resetAt := a.antigravityQuotaScopeResetAt(scope) + if resetAt == nil { + return true + } + now := time.Now() + return !now.Before(*resetAt) +} + +func (a *Account) antigravityQuotaScopeResetAt(scope AntigravityQuotaScope) *time.Time { + if a == nil || a.Extra == nil || scope == "" { + return nil + } + rawScopes, ok := a.Extra[antigravityQuotaScopesKey].(map[string]any) + if !ok { + return nil + } + rawScope, ok := rawScopes[string(scope)].(map[string]any) + if !ok { + return nil + } + resetAtRaw, ok := rawScope["rate_limit_reset_at"].(string) + if !ok || strings.TrimSpace(resetAtRaw) == "" { + return nil + } + resetAt, err := time.Parse(time.RFC3339, resetAtRaw) + if err != nil { + return nil + } + return &resetAt +} diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go new file mode 100644 index 00000000..c5dc55db --- /dev/null +++ b/backend/internal/service/antigravity_token_provider.go @@ -0,0 +1,130 @@ +package service + +import ( + "context" + "errors" + "log" + "strconv" + "strings" + "time" +) + +const ( + antigravityTokenRefreshSkew = 3 * time.Minute + antigravityTokenCacheSkew = 5 * time.Minute +) + +// AntigravityTokenCache Token 缓存接口(复用 GeminiTokenCache 接口定义) +type AntigravityTokenCache = GeminiTokenCache + +// AntigravityTokenProvider 管理 Antigravity 账户的 access_token +type AntigravityTokenProvider struct { + accountRepo AccountRepository + tokenCache AntigravityTokenCache + antigravityOAuthService *AntigravityOAuthService +} + +func NewAntigravityTokenProvider( + accountRepo AccountRepository, + tokenCache AntigravityTokenCache, + antigravityOAuthService *AntigravityOAuthService, +) *AntigravityTokenProvider { + return &AntigravityTokenProvider{ + accountRepo: accountRepo, + tokenCache: tokenCache, + antigravityOAuthService: antigravityOAuthService, + } +} + +// GetAccessToken 获取有效的 access_token +func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformAntigravity || account.Type != AccountTypeOAuth { + return "", errors.New("not an antigravity oauth account") + } + + cacheKey := AntigravityTokenCacheKey(account) + + // 1. 先尝试缓存 + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + } + + // 2. 如果即将过期则刷新 + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= antigravityTokenRefreshSkew + if needsRefresh && p.tokenCache != nil { + locked, err := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if err == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // 拿到锁后再次检查缓存(另一个 worker 可能已刷新) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + + // 从数据库获取最新账户信息 + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= antigravityTokenRefreshSkew { + if p.antigravityOAuthService == nil { + return "", errors.New("antigravity oauth service not configured") + } + tokenInfo, err := p.antigravityOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return "", err + } + newCredentials := p.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + log.Printf("[AntigravityTokenProvider] Failed to update account credentials: %v", updateErr) + } + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + + accessToken := account.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found in credentials") + } + + // 3. 存入缓存 + if p.tokenCache != nil { + ttl := 30 * time.Minute + if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > antigravityTokenCacheSkew: + ttl = until - antigravityTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) + } + + return accessToken, nil +} + +func AntigravityTokenCacheKey(account *Account) string { + projectID := strings.TrimSpace(account.GetCredential("project_id")) + if projectID != "" { + return "ag:" + projectID + } + return "ag:account:" + strconv.FormatInt(account.ID, 10) +} diff --git a/backend/internal/service/antigravity_token_refresher.go b/backend/internal/service/antigravity_token_refresher.go new file mode 100644 index 00000000..9dd4463f --- /dev/null +++ b/backend/internal/service/antigravity_token_refresher.go @@ -0,0 +1,65 @@ +package service + +import ( + "context" + "fmt" + "time" +) + +const ( + // antigravityRefreshWindow Antigravity token 提前刷新窗口:15分钟 + // Google OAuth token 有效期55分钟,提前15分钟刷新 + antigravityRefreshWindow = 15 * time.Minute +) + +// AntigravityTokenRefresher 实现 TokenRefresher 接口 +type AntigravityTokenRefresher struct { + antigravityOAuthService *AntigravityOAuthService +} + +func NewAntigravityTokenRefresher(antigravityOAuthService *AntigravityOAuthService) *AntigravityTokenRefresher { + return &AntigravityTokenRefresher{ + antigravityOAuthService: antigravityOAuthService, + } +} + +// CanRefresh 检查是否可以刷新此账户 +func (r *AntigravityTokenRefresher) CanRefresh(account *Account) bool { + return account.Platform == PlatformAntigravity && account.Type == AccountTypeOAuth +} + +// NeedsRefresh 检查账户是否需要刷新 +// Antigravity 使用固定的15分钟刷新窗口,忽略全局配置 +func (r *AntigravityTokenRefresher) NeedsRefresh(account *Account, _ time.Duration) bool { + if !r.CanRefresh(account) { + return false + } + expiresAt := account.GetCredentialAsTime("expires_at") + if expiresAt == nil { + return false + } + timeUntilExpiry := time.Until(*expiresAt) + needsRefresh := timeUntilExpiry < antigravityRefreshWindow + if needsRefresh { + fmt.Printf("[AntigravityTokenRefresher] Account %d needs refresh: expires_at=%s, time_until_expiry=%v, window=%v\n", + account.ID, expiresAt.Format("2006-01-02 15:04:05"), timeUntilExpiry, antigravityRefreshWindow) + } + return needsRefresh +} + +// Refresh 执行 token 刷新 +func (r *AntigravityTokenRefresher) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + tokenInfo, err := r.antigravityOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return nil, err + } + + newCredentials := r.antigravityOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + return newCredentials, nil +} diff --git a/backend/internal/service/api_key.go b/backend/internal/service/api_key.go new file mode 100644 index 00000000..8c692d09 --- /dev/null +++ b/backend/internal/service/api_key.go @@ -0,0 +1,22 @@ +package service + +import "time" + +type APIKey struct { + ID int64 + UserID int64 + Key string + Name string + GroupID *int64 + Status string + IPWhitelist []string + IPBlacklist []string + CreatedAt time.Time + UpdatedAt time.Time + User *User + Group *Group +} + +func (k *APIKey) IsActive() bool { + return k.Status == StatusActive +} diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go new file mode 100644 index 00000000..7ce9a8a2 --- /dev/null +++ b/backend/internal/service/api_key_auth_cache.go @@ -0,0 +1,46 @@ +package service + +// APIKeyAuthSnapshot API Key 认证缓存快照(仅包含认证所需字段) +type APIKeyAuthSnapshot struct { + APIKeyID int64 `json:"api_key_id"` + UserID int64 `json:"user_id"` + GroupID *int64 `json:"group_id,omitempty"` + Status string `json:"status"` + IPWhitelist []string `json:"ip_whitelist,omitempty"` + IPBlacklist []string `json:"ip_blacklist,omitempty"` + User APIKeyAuthUserSnapshot `json:"user"` + Group *APIKeyAuthGroupSnapshot `json:"group,omitempty"` +} + +// APIKeyAuthUserSnapshot 用户快照 +type APIKeyAuthUserSnapshot struct { + ID int64 `json:"id"` + Status string `json:"status"` + Role string `json:"role"` + Balance float64 `json:"balance"` + Concurrency int `json:"concurrency"` +} + +// APIKeyAuthGroupSnapshot 分组快照 +type APIKeyAuthGroupSnapshot struct { + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Status string `json:"status"` + SubscriptionType string `json:"subscription_type"` + RateMultiplier float64 `json:"rate_multiplier"` + DailyLimitUSD *float64 `json:"daily_limit_usd,omitempty"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd,omitempty"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd,omitempty"` + ImagePrice1K *float64 `json:"image_price_1k,omitempty"` + ImagePrice2K *float64 `json:"image_price_2k,omitempty"` + ImagePrice4K *float64 `json:"image_price_4k,omitempty"` + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` +} + +// APIKeyAuthCacheEntry 缓存条目,支持负缓存 +type APIKeyAuthCacheEntry struct { + NotFound bool `json:"not_found"` + Snapshot *APIKeyAuthSnapshot `json:"snapshot,omitempty"` +} diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go new file mode 100644 index 00000000..dfc55eeb --- /dev/null +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -0,0 +1,269 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/dgraph-io/ristretto" +) + +type apiKeyAuthCacheConfig struct { + l1Size int + l1TTL time.Duration + l2TTL time.Duration + negativeTTL time.Duration + jitterPercent int + singleflight bool +} + +var ( + jitterRandMu sync.Mutex + // 认证缓存抖动使用独立随机源,避免全局 Seed + jitterRand = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +func newAPIKeyAuthCacheConfig(cfg *config.Config) apiKeyAuthCacheConfig { + if cfg == nil { + return apiKeyAuthCacheConfig{} + } + auth := cfg.APIKeyAuth + return apiKeyAuthCacheConfig{ + l1Size: auth.L1Size, + l1TTL: time.Duration(auth.L1TTLSeconds) * time.Second, + l2TTL: time.Duration(auth.L2TTLSeconds) * time.Second, + negativeTTL: time.Duration(auth.NegativeTTLSeconds) * time.Second, + jitterPercent: auth.JitterPercent, + singleflight: auth.Singleflight, + } +} + +func (c apiKeyAuthCacheConfig) l1Enabled() bool { + return c.l1Size > 0 && c.l1TTL > 0 +} + +func (c apiKeyAuthCacheConfig) l2Enabled() bool { + return c.l2TTL > 0 +} + +func (c apiKeyAuthCacheConfig) negativeEnabled() bool { + return c.negativeTTL > 0 +} + +func (c apiKeyAuthCacheConfig) jitterTTL(ttl time.Duration) time.Duration { + if ttl <= 0 { + return ttl + } + if c.jitterPercent <= 0 { + return ttl + } + percent := c.jitterPercent + if percent > 100 { + percent = 100 + } + delta := float64(percent) / 100 + jitterRandMu.Lock() + randVal := jitterRand.Float64() + jitterRandMu.Unlock() + factor := 1 - delta + randVal*(2*delta) + if factor <= 0 { + return ttl + } + return time.Duration(float64(ttl) * factor) +} + +func (s *APIKeyService) initAuthCache(cfg *config.Config) { + s.authCfg = newAPIKeyAuthCacheConfig(cfg) + if !s.authCfg.l1Enabled() { + return + } + cache, err := ristretto.NewCache(&ristretto.Config{ + NumCounters: int64(s.authCfg.l1Size) * 10, + MaxCost: int64(s.authCfg.l1Size), + BufferItems: 64, + }) + if err != nil { + return + } + s.authCacheL1 = cache +} + +func (s *APIKeyService) authCacheKey(key string) string { + sum := sha256.Sum256([]byte(key)) + return hex.EncodeToString(sum[:]) +} + +func (s *APIKeyService) getAuthCacheEntry(ctx context.Context, cacheKey string) (*APIKeyAuthCacheEntry, bool) { + if s.authCacheL1 != nil { + if val, ok := s.authCacheL1.Get(cacheKey); ok { + if entry, ok := val.(*APIKeyAuthCacheEntry); ok { + return entry, true + } + } + } + if s.cache == nil || !s.authCfg.l2Enabled() { + return nil, false + } + entry, err := s.cache.GetAuthCache(ctx, cacheKey) + if err != nil { + return nil, false + } + s.setAuthCacheL1(cacheKey, entry) + return entry, true +} + +func (s *APIKeyService) setAuthCacheL1(cacheKey string, entry *APIKeyAuthCacheEntry) { + if s.authCacheL1 == nil || entry == nil { + return + } + ttl := s.authCfg.l1TTL + if entry.NotFound && s.authCfg.negativeTTL > 0 && s.authCfg.negativeTTL < ttl { + ttl = s.authCfg.negativeTTL + } + ttl = s.authCfg.jitterTTL(ttl) + _ = s.authCacheL1.SetWithTTL(cacheKey, entry, 1, ttl) +} + +func (s *APIKeyService) setAuthCacheEntry(ctx context.Context, cacheKey string, entry *APIKeyAuthCacheEntry, ttl time.Duration) { + if entry == nil { + return + } + s.setAuthCacheL1(cacheKey, entry) + if s.cache == nil || !s.authCfg.l2Enabled() { + return + } + _ = s.cache.SetAuthCache(ctx, cacheKey, entry, s.authCfg.jitterTTL(ttl)) +} + +func (s *APIKeyService) deleteAuthCache(ctx context.Context, cacheKey string) { + if s.authCacheL1 != nil { + s.authCacheL1.Del(cacheKey) + } + if s.cache == nil { + return + } + _ = s.cache.DeleteAuthCache(ctx, cacheKey) +} + +func (s *APIKeyService) loadAuthCacheEntry(ctx context.Context, key, cacheKey string) (*APIKeyAuthCacheEntry, error) { + apiKey, err := s.apiKeyRepo.GetByKeyForAuth(ctx, key) + if err != nil { + if errors.Is(err, ErrAPIKeyNotFound) { + entry := &APIKeyAuthCacheEntry{NotFound: true} + if s.authCfg.negativeEnabled() { + s.setAuthCacheEntry(ctx, cacheKey, entry, s.authCfg.negativeTTL) + } + return entry, nil + } + return nil, fmt.Errorf("get api key: %w", err) + } + apiKey.Key = key + snapshot := s.snapshotFromAPIKey(apiKey) + if snapshot == nil { + return nil, fmt.Errorf("get api key: %w", ErrAPIKeyNotFound) + } + entry := &APIKeyAuthCacheEntry{Snapshot: snapshot} + s.setAuthCacheEntry(ctx, cacheKey, entry, s.authCfg.l2TTL) + return entry, nil +} + +func (s *APIKeyService) applyAuthCacheEntry(key string, entry *APIKeyAuthCacheEntry) (*APIKey, bool, error) { + if entry == nil { + return nil, false, nil + } + if entry.NotFound { + return nil, true, ErrAPIKeyNotFound + } + if entry.Snapshot == nil { + return nil, false, nil + } + return s.snapshotToAPIKey(key, entry.Snapshot), true, nil +} + +func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { + if apiKey == nil || apiKey.User == nil { + return nil + } + snapshot := &APIKeyAuthSnapshot{ + APIKeyID: apiKey.ID, + UserID: apiKey.UserID, + GroupID: apiKey.GroupID, + Status: apiKey.Status, + IPWhitelist: apiKey.IPWhitelist, + IPBlacklist: apiKey.IPBlacklist, + User: APIKeyAuthUserSnapshot{ + ID: apiKey.User.ID, + Status: apiKey.User.Status, + Role: apiKey.User.Role, + Balance: apiKey.User.Balance, + Concurrency: apiKey.User.Concurrency, + }, + } + if apiKey.Group != nil { + snapshot.Group = &APIKeyAuthGroupSnapshot{ + ID: apiKey.Group.ID, + Name: apiKey.Group.Name, + Platform: apiKey.Group.Platform, + Status: apiKey.Group.Status, + SubscriptionType: apiKey.Group.SubscriptionType, + RateMultiplier: apiKey.Group.RateMultiplier, + DailyLimitUSD: apiKey.Group.DailyLimitUSD, + WeeklyLimitUSD: apiKey.Group.WeeklyLimitUSD, + MonthlyLimitUSD: apiKey.Group.MonthlyLimitUSD, + ImagePrice1K: apiKey.Group.ImagePrice1K, + ImagePrice2K: apiKey.Group.ImagePrice2K, + ImagePrice4K: apiKey.Group.ImagePrice4K, + ClaudeCodeOnly: apiKey.Group.ClaudeCodeOnly, + FallbackGroupID: apiKey.Group.FallbackGroupID, + } + } + return snapshot +} + +func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapshot) *APIKey { + if snapshot == nil { + return nil + } + apiKey := &APIKey{ + ID: snapshot.APIKeyID, + UserID: snapshot.UserID, + GroupID: snapshot.GroupID, + Key: key, + Status: snapshot.Status, + IPWhitelist: snapshot.IPWhitelist, + IPBlacklist: snapshot.IPBlacklist, + User: &User{ + ID: snapshot.User.ID, + Status: snapshot.User.Status, + Role: snapshot.User.Role, + Balance: snapshot.User.Balance, + Concurrency: snapshot.User.Concurrency, + }, + } + if snapshot.Group != nil { + apiKey.Group = &Group{ + ID: snapshot.Group.ID, + Name: snapshot.Group.Name, + Platform: snapshot.Group.Platform, + Status: snapshot.Group.Status, + Hydrated: true, + SubscriptionType: snapshot.Group.SubscriptionType, + RateMultiplier: snapshot.Group.RateMultiplier, + DailyLimitUSD: snapshot.Group.DailyLimitUSD, + WeeklyLimitUSD: snapshot.Group.WeeklyLimitUSD, + MonthlyLimitUSD: snapshot.Group.MonthlyLimitUSD, + ImagePrice1K: snapshot.Group.ImagePrice1K, + ImagePrice2K: snapshot.Group.ImagePrice2K, + ImagePrice4K: snapshot.Group.ImagePrice4K, + ClaudeCodeOnly: snapshot.Group.ClaudeCodeOnly, + FallbackGroupID: snapshot.Group.FallbackGroupID, + } + } + return apiKey +} diff --git a/backend/internal/service/api_key_auth_cache_invalidate.go b/backend/internal/service/api_key_auth_cache_invalidate.go new file mode 100644 index 00000000..aeb58bcc --- /dev/null +++ b/backend/internal/service/api_key_auth_cache_invalidate.go @@ -0,0 +1,48 @@ +package service + +import "context" + +// InvalidateAuthCacheByKey 清除指定 API Key 的认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByKey(ctx context.Context, key string) { + if key == "" { + return + } + cacheKey := s.authCacheKey(key) + s.deleteAuthCache(ctx, cacheKey) +} + +// InvalidateAuthCacheByUserID 清除用户相关的 API Key 认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByUserID(ctx context.Context, userID int64) { + if userID <= 0 { + return + } + keys, err := s.apiKeyRepo.ListKeysByUserID(ctx, userID) + if err != nil { + return + } + s.deleteAuthCacheByKeys(ctx, keys) +} + +// InvalidateAuthCacheByGroupID 清除分组相关的 API Key 认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) { + if groupID <= 0 { + return + } + keys, err := s.apiKeyRepo.ListKeysByGroupID(ctx, groupID) + if err != nil { + return + } + s.deleteAuthCacheByKeys(ctx, keys) +} + +func (s *APIKeyService) deleteAuthCacheByKeys(ctx context.Context, keys []string) { + if len(keys) == 0 { + return + } + for _, key := range keys { + if key == "" { + continue + } + s.deleteAuthCache(ctx, s.authCacheKey(key)) + } +} diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go new file mode 100644 index 00000000..ecc570c7 --- /dev/null +++ b/backend/internal/service/api_key_service.go @@ -0,0 +1,570 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/dgraph-io/ristretto" + "golang.org/x/sync/singleflight" +) + +var ( + ErrAPIKeyNotFound = infraerrors.NotFound("API_KEY_NOT_FOUND", "api key not found") + ErrGroupNotAllowed = infraerrors.Forbidden("GROUP_NOT_ALLOWED", "user is not allowed to bind this group") + ErrAPIKeyExists = infraerrors.Conflict("API_KEY_EXISTS", "api key already exists") + ErrAPIKeyTooShort = infraerrors.BadRequest("API_KEY_TOO_SHORT", "api key must be at least 16 characters") + ErrAPIKeyInvalidChars = infraerrors.BadRequest("API_KEY_INVALID_CHARS", "api key can only contain letters, numbers, underscores, and hyphens") + ErrAPIKeyRateLimited = infraerrors.TooManyRequests("API_KEY_RATE_LIMITED", "too many failed attempts, please try again later") + ErrInvalidIPPattern = infraerrors.BadRequest("INVALID_IP_PATTERN", "invalid IP or CIDR pattern") +) + +const ( + apiKeyMaxErrorsPerHour = 20 +) + +type APIKeyRepository interface { + Create(ctx context.Context, key *APIKey) error + GetByID(ctx context.Context, id int64) (*APIKey, error) + // GetKeyAndOwnerID 仅获取 API Key 的 key 与所有者 ID,用于删除等轻量场景 + GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) + GetByKey(ctx context.Context, key string) (*APIKey, error) + // GetByKeyForAuth 认证专用查询,返回最小字段集 + GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) + Update(ctx context.Context, key *APIKey) error + Delete(ctx context.Context, id int64) error + + ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) + VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) + CountByUserID(ctx context.Context, userID int64) (int64, error) + ExistsByKey(ctx context.Context, key string) (bool, error) + ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) + SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) + ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) + CountByGroupID(ctx context.Context, groupID int64) (int64, error) + ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) + ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) +} + +// APIKeyCache defines cache operations for API key service +type APIKeyCache interface { + GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) + IncrementCreateAttemptCount(ctx context.Context, userID int64) error + DeleteCreateAttemptCount(ctx context.Context, userID int64) error + + IncrementDailyUsage(ctx context.Context, apiKey string) error + SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error + + GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) + SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error + DeleteAuthCache(ctx context.Context, key string) error +} + +// APIKeyAuthCacheInvalidator 提供认证缓存失效能力 +type APIKeyAuthCacheInvalidator interface { + InvalidateAuthCacheByKey(ctx context.Context, key string) + InvalidateAuthCacheByUserID(ctx context.Context, userID int64) + InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) +} + +// CreateAPIKeyRequest 创建API Key请求 +type CreateAPIKeyRequest struct { + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + CustomKey *string `json:"custom_key"` // 可选的自定义key + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 +} + +// UpdateAPIKeyRequest 更新API Key请求 +type UpdateAPIKeyRequest struct { + Name *string `json:"name"` + GroupID *int64 `json:"group_id"` + Status *string `json:"status"` + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单(空数组清空) + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单(空数组清空) +} + +// APIKeyService API Key服务 +type APIKeyService struct { + apiKeyRepo APIKeyRepository + userRepo UserRepository + groupRepo GroupRepository + userSubRepo UserSubscriptionRepository + cache APIKeyCache + cfg *config.Config + authCacheL1 *ristretto.Cache + authCfg apiKeyAuthCacheConfig + authGroup singleflight.Group +} + +// NewAPIKeyService 创建API Key服务实例 +func NewAPIKeyService( + apiKeyRepo APIKeyRepository, + userRepo UserRepository, + groupRepo GroupRepository, + userSubRepo UserSubscriptionRepository, + cache APIKeyCache, + cfg *config.Config, +) *APIKeyService { + svc := &APIKeyService{ + apiKeyRepo: apiKeyRepo, + userRepo: userRepo, + groupRepo: groupRepo, + userSubRepo: userSubRepo, + cache: cache, + cfg: cfg, + } + svc.initAuthCache(cfg) + return svc +} + +// GenerateKey 生成随机API Key +func (s *APIKeyService) GenerateKey() (string, error) { + // 生成32字节随机数据 + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + + // 转换为十六进制字符串并添加前缀 + prefix := s.cfg.Default.APIKeyPrefix + if prefix == "" { + prefix = "sk-" + } + + key := prefix + hex.EncodeToString(bytes) + return key, nil +} + +// ValidateCustomKey 验证自定义API Key格式 +func (s *APIKeyService) ValidateCustomKey(key string) error { + // 检查长度 + if len(key) < 16 { + return ErrAPIKeyTooShort + } + + // 检查字符:只允许字母、数字、下划线、连字符 + for _, c := range key { + if (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '_' || c == '-' { + continue + } + return ErrAPIKeyInvalidChars + } + + return nil +} + +// checkAPIKeyRateLimit 检查用户创建自定义Key的错误次数是否超限 +func (s *APIKeyService) checkAPIKeyRateLimit(ctx context.Context, userID int64) error { + if s.cache == nil { + return nil + } + + count, err := s.cache.GetCreateAttemptCount(ctx, userID) + if err != nil { + // Redis 出错时不阻止用户操作 + return nil + } + + if count >= apiKeyMaxErrorsPerHour { + return ErrAPIKeyRateLimited + } + + return nil +} + +// incrementAPIKeyErrorCount 增加用户创建自定义Key的错误计数 +func (s *APIKeyService) incrementAPIKeyErrorCount(ctx context.Context, userID int64) { + if s.cache == nil { + return + } + + _ = s.cache.IncrementCreateAttemptCount(ctx, userID) +} + +// canUserBindGroup 检查用户是否可以绑定指定分组 +// 对于订阅类型分组:检查用户是否有有效订阅 +// 对于标准类型分组:使用原有的 AllowedGroups 和 IsExclusive 逻辑 +func (s *APIKeyService) canUserBindGroup(ctx context.Context, user *User, group *Group) bool { + // 订阅类型分组:需要有效订阅 + if group.IsSubscriptionType() { + _, err := s.userSubRepo.GetActiveByUserIDAndGroupID(ctx, user.ID, group.ID) + return err == nil // 有有效订阅则允许 + } + // 标准类型分组:使用原有逻辑 + return user.CanBindGroup(group.ID, group.IsExclusive) +} + +// Create 创建API Key +func (s *APIKeyService) Create(ctx context.Context, userID int64, req CreateAPIKeyRequest) (*APIKey, error) { + // 验证用户存在 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + // 验证 IP 白名单格式 + if len(req.IPWhitelist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPWhitelist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 验证 IP 黑名单格式 + if len(req.IPBlacklist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPBlacklist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 验证分组权限(如果指定了分组) + if req.GroupID != nil { + group, err := s.groupRepo.GetByID(ctx, *req.GroupID) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + + // 检查用户是否可以绑定该分组 + if !s.canUserBindGroup(ctx, user, group) { + return nil, ErrGroupNotAllowed + } + } + + var key string + + // 判断是否使用自定义Key + if req.CustomKey != nil && *req.CustomKey != "" { + // 检查限流(仅对自定义key进行限流) + if err := s.checkAPIKeyRateLimit(ctx, userID); err != nil { + return nil, err + } + + // 验证自定义Key格式 + if err := s.ValidateCustomKey(*req.CustomKey); err != nil { + return nil, err + } + + // 检查Key是否已存在 + exists, err := s.apiKeyRepo.ExistsByKey(ctx, *req.CustomKey) + if err != nil { + return nil, fmt.Errorf("check key exists: %w", err) + } + if exists { + // Key已存在,增加错误计数 + s.incrementAPIKeyErrorCount(ctx, userID) + return nil, ErrAPIKeyExists + } + + key = *req.CustomKey + } else { + // 生成随机API Key + var err error + key, err = s.GenerateKey() + if err != nil { + return nil, fmt.Errorf("generate key: %w", err) + } + } + + // 创建API Key记录 + apiKey := &APIKey{ + UserID: userID, + Key: key, + Name: req.Name, + GroupID: req.GroupID, + Status: StatusActive, + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, + } + + if err := s.apiKeyRepo.Create(ctx, apiKey); err != nil { + return nil, fmt.Errorf("create api key: %w", err) + } + + s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + + return apiKey, nil +} + +// List 获取用户的API Key列表 +func (s *APIKeyService) List(ctx context.Context, userID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + keys, pagination, err := s.apiKeyRepo.ListByUserID(ctx, userID, params) + if err != nil { + return nil, nil, fmt.Errorf("list api keys: %w", err) + } + return keys, pagination, nil +} + +func (s *APIKeyService) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + if len(apiKeyIDs) == 0 { + return []int64{}, nil + } + + validIDs, err := s.apiKeyRepo.VerifyOwnership(ctx, userID, apiKeyIDs) + if err != nil { + return nil, fmt.Errorf("verify api key ownership: %w", err) + } + return validIDs, nil +} + +// GetByID 根据ID获取API Key +func (s *APIKeyService) GetByID(ctx context.Context, id int64) (*APIKey, error) { + apiKey, err := s.apiKeyRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil +} + +// GetByKey 根据Key字符串获取API Key(用于认证) +func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, error) { + cacheKey := s.authCacheKey(key) + + if entry, ok := s.getAuthCacheEntry(ctx, cacheKey); ok { + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } + + if s.authCfg.singleflight { + value, err, _ := s.authGroup.Do(cacheKey, func() (any, error) { + return s.loadAuthCacheEntry(ctx, key, cacheKey) + }) + if err != nil { + return nil, err + } + entry, _ := value.(*APIKeyAuthCacheEntry) + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } else { + entry, err := s.loadAuthCacheEntry(ctx, key, cacheKey) + if err != nil { + return nil, err + } + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } + + apiKey, err := s.apiKeyRepo.GetByKeyForAuth(ctx, key) + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + apiKey.Key = key + return apiKey, nil +} + +// Update 更新API Key +func (s *APIKeyService) Update(ctx context.Context, id int64, userID int64, req UpdateAPIKeyRequest) (*APIKey, error) { + apiKey, err := s.apiKeyRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + + // 验证所有权 + if apiKey.UserID != userID { + return nil, ErrInsufficientPerms + } + + // 验证 IP 白名单格式 + if len(req.IPWhitelist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPWhitelist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 验证 IP 黑名单格式 + if len(req.IPBlacklist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPBlacklist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 更新字段 + if req.Name != nil { + apiKey.Name = *req.Name + } + + if req.GroupID != nil { + // 验证分组权限 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + group, err := s.groupRepo.GetByID(ctx, *req.GroupID) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + + if !s.canUserBindGroup(ctx, user, group) { + return nil, ErrGroupNotAllowed + } + + apiKey.GroupID = req.GroupID + } + + if req.Status != nil { + apiKey.Status = *req.Status + // 如果状态改变,清除Redis缓存 + if s.cache != nil { + _ = s.cache.DeleteCreateAttemptCount(ctx, apiKey.UserID) + } + } + + // 更新 IP 限制(空数组会清空设置) + apiKey.IPWhitelist = req.IPWhitelist + apiKey.IPBlacklist = req.IPBlacklist + + if err := s.apiKeyRepo.Update(ctx, apiKey); err != nil { + return nil, fmt.Errorf("update api key: %w", err) + } + + s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + + return apiKey, nil +} + +// Delete 删除API Key +func (s *APIKeyService) Delete(ctx context.Context, id int64, userID int64) error { + key, ownerID, err := s.apiKeyRepo.GetKeyAndOwnerID(ctx, id) + if err != nil { + return fmt.Errorf("get api key: %w", err) + } + + // 验证当前用户是否为该 API Key 的所有者 + if ownerID != userID { + return ErrInsufficientPerms + } + + // 清除Redis缓存(使用 userID 而非 apiKey.UserID) + if s.cache != nil { + _ = s.cache.DeleteCreateAttemptCount(ctx, userID) + } + s.InvalidateAuthCacheByKey(ctx, key) + + if err := s.apiKeyRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete api key: %w", err) + } + + return nil +} + +// ValidateKey 验证API Key是否有效(用于认证中间件) +func (s *APIKeyService) ValidateKey(ctx context.Context, key string) (*APIKey, *User, error) { + // 获取API Key + apiKey, err := s.GetByKey(ctx, key) + if err != nil { + return nil, nil, err + } + + // 检查API Key状态 + if !apiKey.IsActive() { + return nil, nil, infraerrors.Unauthorized("API_KEY_INACTIVE", "api key is not active") + } + + // 获取用户信息 + user, err := s.userRepo.GetByID(ctx, apiKey.UserID) + if err != nil { + return nil, nil, fmt.Errorf("get user: %w", err) + } + + // 检查用户状态 + if !user.IsActive() { + return nil, nil, ErrUserNotActive + } + + return apiKey, user, nil +} + +// IncrementUsage 增加API Key使用次数(可选:用于统计) +func (s *APIKeyService) IncrementUsage(ctx context.Context, keyID int64) error { + // 使用Redis计数器 + if s.cache != nil { + cacheKey := fmt.Sprintf("apikey:usage:%d:%s", keyID, timezone.Now().Format("2006-01-02")) + if err := s.cache.IncrementDailyUsage(ctx, cacheKey); err != nil { + return fmt.Errorf("increment usage: %w", err) + } + // 设置24小时过期 + _ = s.cache.SetDailyUsageExpiry(ctx, cacheKey, 24*time.Hour) + } + return nil +} + +// GetAvailableGroups 获取用户有权限绑定的分组列表 +// 返回用户可以选择的分组: +// - 标准类型分组:公开的(非专属)或用户被明确允许的 +// - 订阅类型分组:用户有有效订阅的 +func (s *APIKeyService) GetAvailableGroups(ctx context.Context, userID int64) ([]Group, error) { + // 获取用户信息 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + // 获取所有活跃分组 + allGroups, err := s.groupRepo.ListActive(ctx) + if err != nil { + return nil, fmt.Errorf("list active groups: %w", err) + } + + // 获取用户的所有有效订阅 + activeSubscriptions, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("list active subscriptions: %w", err) + } + + // 构建订阅分组 ID 集合 + subscribedGroupIDs := make(map[int64]bool) + for _, sub := range activeSubscriptions { + subscribedGroupIDs[sub.GroupID] = true + } + + // 过滤出用户有权限的分组 + availableGroups := make([]Group, 0) + for _, group := range allGroups { + if s.canUserBindGroupInternal(user, &group, subscribedGroupIDs) { + availableGroups = append(availableGroups, group) + } + } + + return availableGroups, nil +} + +// canUserBindGroupInternal 内部方法,检查用户是否可以绑定分组(使用预加载的订阅数据) +func (s *APIKeyService) canUserBindGroupInternal(user *User, group *Group, subscribedGroupIDs map[int64]bool) bool { + // 订阅类型分组:需要有效订阅 + if group.IsSubscriptionType() { + return subscribedGroupIDs[group.ID] + } + // 标准类型分组:使用原有逻辑 + return user.CanBindGroup(group.ID, group.IsExclusive) +} + +func (s *APIKeyService) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) { + keys, err := s.apiKeyRepo.SearchAPIKeys(ctx, userID, keyword, limit) + if err != nil { + return nil, fmt.Errorf("search api keys: %w", err) + } + return keys, nil +} diff --git a/backend/internal/service/api_key_service_cache_test.go b/backend/internal/service/api_key_service_cache_test.go new file mode 100644 index 00000000..3314ca8d --- /dev/null +++ b/backend/internal/service/api_key_service_cache_test.go @@ -0,0 +1,417 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +type authRepoStub struct { + getByKeyForAuth func(ctx context.Context, key string) (*APIKey, error) + listKeysByUserID func(ctx context.Context, userID int64) ([]string, error) + listKeysByGroupID func(ctx context.Context, groupID int64) ([]string, error) +} + +func (s *authRepoStub) Create(ctx context.Context, key *APIKey) error { + panic("unexpected Create call") +} + +func (s *authRepoStub) GetByID(ctx context.Context, id int64) (*APIKey, error) { + panic("unexpected GetByID call") +} + +func (s *authRepoStub) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + panic("unexpected GetKeyAndOwnerID call") +} + +func (s *authRepoStub) GetByKey(ctx context.Context, key string) (*APIKey, error) { + panic("unexpected GetByKey call") +} + +func (s *authRepoStub) GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) { + if s.getByKeyForAuth == nil { + panic("unexpected GetByKeyForAuth call") + } + return s.getByKeyForAuth(ctx, key) +} + +func (s *authRepoStub) Update(ctx context.Context, key *APIKey) error { + panic("unexpected Update call") +} + +func (s *authRepoStub) Delete(ctx context.Context, id int64) error { + panic("unexpected Delete call") +} + +func (s *authRepoStub) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByUserID call") +} + +func (s *authRepoStub) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + panic("unexpected VerifyOwnership call") +} + +func (s *authRepoStub) CountByUserID(ctx context.Context, userID int64) (int64, error) { + panic("unexpected CountByUserID call") +} + +func (s *authRepoStub) ExistsByKey(ctx context.Context, key string) (bool, error) { + panic("unexpected ExistsByKey call") +} + +func (s *authRepoStub) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByGroupID call") +} + +func (s *authRepoStub) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) { + panic("unexpected SearchAPIKeys call") +} + +func (s *authRepoStub) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected ClearGroupIDByGroupID call") +} + +func (s *authRepoStub) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected CountByGroupID call") +} + +func (s *authRepoStub) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + if s.listKeysByUserID == nil { + panic("unexpected ListKeysByUserID call") + } + return s.listKeysByUserID(ctx, userID) +} + +func (s *authRepoStub) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + if s.listKeysByGroupID == nil { + panic("unexpected ListKeysByGroupID call") + } + return s.listKeysByGroupID(ctx, groupID) +} + +type authCacheStub struct { + getAuthCache func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) + setAuthKeys []string + deleteAuthKeys []string +} + +func (s *authCacheStub) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +func (s *authCacheStub) IncrementCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (s *authCacheStub) DeleteCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (s *authCacheStub) IncrementDailyUsage(ctx context.Context, apiKey string) error { + return nil +} + +func (s *authCacheStub) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { + return nil +} + +func (s *authCacheStub) GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + if s.getAuthCache == nil { + return nil, redis.Nil + } + return s.getAuthCache(ctx, key) +} + +func (s *authCacheStub) SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error { + s.setAuthKeys = append(s.setAuthKeys, key) + return nil +} + +func (s *authCacheStub) DeleteAuthCache(ctx context.Context, key string) error { + s.deleteAuthKeys = append(s.deleteAuthKeys, key) + return nil +} + +func TestAPIKeyService_GetByKey_UsesL2Cache(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, errors.New("unexpected repo call") + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + groupID := int64(9) + cacheEntry := &APIKeyAuthCacheEntry{ + Snapshot: &APIKeyAuthSnapshot{ + APIKeyID: 1, + UserID: 2, + GroupID: &groupID, + Status: StatusActive, + User: APIKeyAuthUserSnapshot{ + ID: 2, + Status: StatusActive, + Role: RoleUser, + Balance: 10, + Concurrency: 3, + }, + Group: &APIKeyAuthGroupSnapshot{ + ID: groupID, + Name: "g", + Platform: PlatformAnthropic, + Status: StatusActive, + SubscriptionType: SubscriptionTypeStandard, + RateMultiplier: 1, + }, + }, + } + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return cacheEntry, nil + } + + apiKey, err := svc.GetByKey(context.Background(), "k1") + require.NoError(t, err) + require.Equal(t, int64(1), apiKey.ID) + require.Equal(t, int64(2), apiKey.User.ID) + require.Equal(t, groupID, apiKey.Group.ID) +} + +func TestAPIKeyService_GetByKey_NegativeCache(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, errors.New("unexpected repo call") + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return &APIKeyAuthCacheEntry{NotFound: true}, nil + } + + _, err := svc.GetByKey(context.Background(), "missing") + require.ErrorIs(t, err, ErrAPIKeyNotFound) +} + +func TestAPIKeyService_GetByKey_CacheMissStoresL2(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return &APIKey{ + ID: 5, + UserID: 7, + Status: StatusActive, + User: &User{ + ID: 7, + Status: StatusActive, + Role: RoleUser, + Balance: 12, + Concurrency: 2, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, redis.Nil + } + + apiKey, err := svc.GetByKey(context.Background(), "k2") + require.NoError(t, err) + require.Equal(t, int64(5), apiKey.ID) + require.Len(t, cache.setAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_UsesL1Cache(t *testing.T) { + var calls int32 + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + atomic.AddInt32(&calls, 1) + return &APIKey{ + ID: 21, + UserID: 3, + Status: StatusActive, + User: &User{ + ID: 3, + Status: StatusActive, + Role: RoleUser, + Balance: 5, + Concurrency: 2, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L1Size: 1000, + L1TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + require.NotNil(t, svc.authCacheL1) + + _, err := svc.GetByKey(context.Background(), "k-l1") + require.NoError(t, err) + svc.authCacheL1.Wait() + cacheKey := svc.authCacheKey("k-l1") + _, ok := svc.authCacheL1.Get(cacheKey) + require.True(t, ok) + _, err = svc.GetByKey(context.Background(), "k-l1") + require.NoError(t, err) + require.Equal(t, int32(1), atomic.LoadInt32(&calls)) +} + +func TestAPIKeyService_InvalidateAuthCacheByUserID(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByUserID: func(ctx context.Context, userID int64) ([]string, error) { + return []string{"k1", "k2"}, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByUserID(context.Background(), 7) + require.Len(t, cache.deleteAuthKeys, 2) +} + +func TestAPIKeyService_InvalidateAuthCacheByGroupID(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByGroupID: func(ctx context.Context, groupID int64) ([]string, error) { + return []string{"k1", "k2"}, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByGroupID(context.Background(), 9) + require.Len(t, cache.deleteAuthKeys, 2) +} + +func TestAPIKeyService_InvalidateAuthCacheByKey(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByUserID: func(ctx context.Context, userID int64) ([]string, error) { + return nil, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByKey(context.Background(), "k1") + require.Len(t, cache.deleteAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_CachesNegativeOnRepoMiss(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, ErrAPIKeyNotFound + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, redis.Nil + } + + _, err := svc.GetByKey(context.Background(), "missing") + require.ErrorIs(t, err, ErrAPIKeyNotFound) + require.Len(t, cache.setAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_SingleflightCollapses(t *testing.T) { + var calls int32 + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + atomic.AddInt32(&calls, 1) + time.Sleep(50 * time.Millisecond) + return &APIKey{ + ID: 11, + UserID: 2, + Status: StatusActive, + User: &User{ + ID: 2, + Status: StatusActive, + Role: RoleUser, + Balance: 1, + Concurrency: 1, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + Singleflight: true, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + start := make(chan struct{}) + wg := sync.WaitGroup{} + errs := make([]error, 5) + for i := 0; i < 5; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + <-start + _, err := svc.GetByKey(context.Background(), "k1") + errs[idx] = err + }(i) + } + close(start) + wg.Wait() + + for _, err := range errs { + require.NoError(t, err) + } + require.Equal(t, int32(1), atomic.LoadInt32(&calls)) +} diff --git a/backend/internal/service/api_key_service_delete_test.go b/backend/internal/service/api_key_service_delete_test.go new file mode 100644 index 00000000..32ae884e --- /dev/null +++ b/backend/internal/service/api_key_service_delete_test.go @@ -0,0 +1,252 @@ +//go:build unit + +// API Key 服务删除方法的单元测试 +// 测试 APIKeyService.Delete 方法在各种场景下的行为, +// 包括权限验证、缓存清理和错误处理 + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +// apiKeyRepoStub 是 APIKeyRepository 接口的测试桩实现。 +// 用于隔离测试 APIKeyService.Delete 方法,避免依赖真实数据库。 +// +// 设计说明: +// - apiKey/getByIDErr: 模拟 GetKeyAndOwnerID 返回的记录与错误 +// - deleteErr: 模拟 Delete 返回的错误 +// - deletedIDs: 记录被调用删除的 API Key ID,用于断言验证 +type apiKeyRepoStub struct { + apiKey *APIKey // GetKeyAndOwnerID 的返回值 + getByIDErr error // GetKeyAndOwnerID 的错误返回值 + deleteErr error // Delete 的错误返回值 + deletedIDs []int64 // 记录已删除的 API Key ID 列表 +} + +// 以下方法在本测试中不应被调用,使用 panic 确保测试失败时能快速定位问题 + +func (s *apiKeyRepoStub) Create(ctx context.Context, key *APIKey) error { + panic("unexpected Create call") +} + +func (s *apiKeyRepoStub) GetByID(ctx context.Context, id int64) (*APIKey, error) { + if s.getByIDErr != nil { + return nil, s.getByIDErr + } + if s.apiKey != nil { + clone := *s.apiKey + return &clone, nil + } + panic("unexpected GetByID call") +} + +func (s *apiKeyRepoStub) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + if s.getByIDErr != nil { + return "", 0, s.getByIDErr + } + if s.apiKey != nil { + return s.apiKey.Key, s.apiKey.UserID, nil + } + return "", 0, ErrAPIKeyNotFound +} + +func (s *apiKeyRepoStub) GetByKey(ctx context.Context, key string) (*APIKey, error) { + panic("unexpected GetByKey call") +} + +func (s *apiKeyRepoStub) GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) { + panic("unexpected GetByKeyForAuth call") +} + +func (s *apiKeyRepoStub) Update(ctx context.Context, key *APIKey) error { + panic("unexpected Update call") +} + +// Delete 记录被删除的 API Key ID 并返回预设的错误。 +// 通过 deletedIDs 可以验证删除操作是否被正确调用。 +func (s *apiKeyRepoStub) Delete(ctx context.Context, id int64) error { + s.deletedIDs = append(s.deletedIDs, id) + return s.deleteErr +} + +// 以下是接口要求实现但本测试不关心的方法 + +func (s *apiKeyRepoStub) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByUserID call") +} + +func (s *apiKeyRepoStub) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + panic("unexpected VerifyOwnership call") +} + +func (s *apiKeyRepoStub) CountByUserID(ctx context.Context, userID int64) (int64, error) { + panic("unexpected CountByUserID call") +} + +func (s *apiKeyRepoStub) ExistsByKey(ctx context.Context, key string) (bool, error) { + panic("unexpected ExistsByKey call") +} + +func (s *apiKeyRepoStub) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByGroupID call") +} + +func (s *apiKeyRepoStub) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) { + panic("unexpected SearchAPIKeys call") +} + +func (s *apiKeyRepoStub) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected ClearGroupIDByGroupID call") +} + +func (s *apiKeyRepoStub) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected CountByGroupID call") +} + +func (s *apiKeyRepoStub) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + panic("unexpected ListKeysByUserID call") +} + +func (s *apiKeyRepoStub) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + panic("unexpected ListKeysByGroupID call") +} + +// apiKeyCacheStub 是 APIKeyCache 接口的测试桩实现。 +// 用于验证删除操作时缓存清理逻辑是否被正确调用。 +// +// 设计说明: +// - invalidated: 记录被清除缓存的用户 ID 列表 +type apiKeyCacheStub struct { + invalidated []int64 // 记录调用 DeleteCreateAttemptCount 时传入的用户 ID + deleteAuthKeys []string // 记录调用 DeleteAuthCache 时传入的缓存 key +} + +// GetCreateAttemptCount 返回 0,表示用户未超过创建次数限制 +func (s *apiKeyCacheStub) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +// IncrementCreateAttemptCount 空实现,本测试不验证此行为 +func (s *apiKeyCacheStub) IncrementCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +// DeleteCreateAttemptCount 记录被清除缓存的用户 ID。 +// 删除 API Key 时会调用此方法清除用户的创建尝试计数缓存。 +func (s *apiKeyCacheStub) DeleteCreateAttemptCount(ctx context.Context, userID int64) error { + s.invalidated = append(s.invalidated, userID) + return nil +} + +// IncrementDailyUsage 空实现,本测试不验证此行为 +func (s *apiKeyCacheStub) IncrementDailyUsage(ctx context.Context, apiKey string) error { + return nil +} + +// SetDailyUsageExpiry 空实现,本测试不验证此行为 +func (s *apiKeyCacheStub) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { + return nil +} + +func (s *apiKeyCacheStub) GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, nil +} + +func (s *apiKeyCacheStub) SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error { + return nil +} + +func (s *apiKeyCacheStub) DeleteAuthCache(ctx context.Context, key string) error { + s.deleteAuthKeys = append(s.deleteAuthKeys, key) + return nil +} + +// TestApiKeyService_Delete_OwnerMismatch 测试非所有者尝试删除时返回权限错误。 +// 预期行为: +// - GetKeyAndOwnerID 返回所有者 ID 为 1 +// - 调用者 userID 为 2(不匹配) +// - 返回 ErrInsufficientPerms 错误 +// - Delete 方法不被调用 +// - 缓存不被清除 +func TestApiKeyService_Delete_OwnerMismatch(t *testing.T) { + repo := &apiKeyRepoStub{ + apiKey: &APIKey{ID: 10, UserID: 1, Key: "k"}, + } + cache := &apiKeyCacheStub{} + svc := &APIKeyService{apiKeyRepo: repo, cache: cache} + + err := svc.Delete(context.Background(), 10, 2) // API Key ID=10, 调用者 userID=2 + require.ErrorIs(t, err, ErrInsufficientPerms) + require.Empty(t, repo.deletedIDs) // 验证删除操作未被调用 + require.Empty(t, cache.invalidated) // 验证缓存未被清除 + require.Empty(t, cache.deleteAuthKeys) +} + +// TestApiKeyService_Delete_Success 测试所有者成功删除 API Key 的场景。 +// 预期行为: +// - GetKeyAndOwnerID 返回所有者 ID 为 7 +// - 调用者 userID 为 7(匹配) +// - Delete 成功执行 +// - 缓存被正确清除(使用 ownerID) +// - 返回 nil 错误 +func TestApiKeyService_Delete_Success(t *testing.T) { + repo := &apiKeyRepoStub{ + apiKey: &APIKey{ID: 42, UserID: 7, Key: "k"}, + } + cache := &apiKeyCacheStub{} + svc := &APIKeyService{apiKeyRepo: repo, cache: cache} + + err := svc.Delete(context.Background(), 42, 7) // API Key ID=42, 调用者 userID=7 + require.NoError(t, err) + require.Equal(t, []int64{42}, repo.deletedIDs) // 验证正确的 API Key 被删除 + require.Equal(t, []int64{7}, cache.invalidated) // 验证所有者的缓存被清除 + require.Equal(t, []string{svc.authCacheKey("k")}, cache.deleteAuthKeys) +} + +// TestApiKeyService_Delete_NotFound 测试删除不存在的 API Key 时返回正确的错误。 +// 预期行为: +// - GetKeyAndOwnerID 返回 ErrAPIKeyNotFound 错误 +// - 返回 ErrAPIKeyNotFound 错误(被 fmt.Errorf 包装) +// - Delete 方法不被调用 +// - 缓存不被清除 +func TestApiKeyService_Delete_NotFound(t *testing.T) { + repo := &apiKeyRepoStub{getByIDErr: ErrAPIKeyNotFound} + cache := &apiKeyCacheStub{} + svc := &APIKeyService{apiKeyRepo: repo, cache: cache} + + err := svc.Delete(context.Background(), 99, 1) + require.ErrorIs(t, err, ErrAPIKeyNotFound) + require.Empty(t, repo.deletedIDs) + require.Empty(t, cache.invalidated) + require.Empty(t, cache.deleteAuthKeys) +} + +// TestApiKeyService_Delete_DeleteFails 测试删除操作失败时的错误处理。 +// 预期行为: +// - GetKeyAndOwnerID 返回正确的所有者 ID +// - 所有权验证通过 +// - 缓存被清除(在删除之前) +// - Delete 被调用但返回错误 +// - 返回包含 "delete api key" 的错误信息 +func TestApiKeyService_Delete_DeleteFails(t *testing.T) { + repo := &apiKeyRepoStub{ + apiKey: &APIKey{ID: 42, UserID: 3, Key: "k"}, + deleteErr: errors.New("delete failed"), + } + cache := &apiKeyCacheStub{} + svc := &APIKeyService{apiKeyRepo: repo, cache: cache} + + err := svc.Delete(context.Background(), 3, 3) // API Key ID=3, 调用者 userID=3 + require.Error(t, err) + require.ErrorContains(t, err, "delete api key") + require.Equal(t, []int64{3}, repo.deletedIDs) // 验证删除操作被调用 + require.Equal(t, []int64{3}, cache.invalidated) // 验证缓存已被清除(即使删除失败) + require.Equal(t, []string{svc.authCacheKey("k")}, cache.deleteAuthKeys) +} diff --git a/backend/internal/service/auth_cache_invalidation_test.go b/backend/internal/service/auth_cache_invalidation_test.go new file mode 100644 index 00000000..b6e56177 --- /dev/null +++ b/backend/internal/service/auth_cache_invalidation_test.go @@ -0,0 +1,33 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUsageService_InvalidateUsageCaches(t *testing.T) { + invalidator := &authCacheInvalidatorStub{} + svc := &UsageService{authCacheInvalidator: invalidator} + + svc.invalidateUsageCaches(context.Background(), 7, false) + require.Empty(t, invalidator.userIDs) + + svc.invalidateUsageCaches(context.Background(), 7, true) + require.Equal(t, []int64{7}, invalidator.userIDs) +} + +func TestRedeemService_InvalidateRedeemCaches_AuthCache(t *testing.T) { + invalidator := &authCacheInvalidatorStub{} + svc := &RedeemService{authCacheInvalidator: invalidator} + + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeBalance}) + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeConcurrency}) + groupID := int64(3) + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeSubscription, GroupID: &groupID}) + + require.Equal(t, []int64{11, 11, 11}, invalidator.userIDs) +} diff --git a/backend/internal/service/auth_service.go b/backend/internal/service/auth_service.go new file mode 100644 index 00000000..386b43fc --- /dev/null +++ b/backend/internal/service/auth_service.go @@ -0,0 +1,582 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "log" + "net/mail" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + + "github.com/golang-jwt/jwt/v5" + "golang.org/x/crypto/bcrypt" +) + +var ( + ErrInvalidCredentials = infraerrors.Unauthorized("INVALID_CREDENTIALS", "invalid email or password") + ErrUserNotActive = infraerrors.Forbidden("USER_NOT_ACTIVE", "user is not active") + ErrEmailExists = infraerrors.Conflict("EMAIL_EXISTS", "email already exists") + ErrEmailReserved = infraerrors.BadRequest("EMAIL_RESERVED", "email is reserved") + ErrInvalidToken = infraerrors.Unauthorized("INVALID_TOKEN", "invalid token") + ErrTokenExpired = infraerrors.Unauthorized("TOKEN_EXPIRED", "token has expired") + ErrTokenTooLarge = infraerrors.BadRequest("TOKEN_TOO_LARGE", "token too large") + ErrTokenRevoked = infraerrors.Unauthorized("TOKEN_REVOKED", "token has been revoked") + ErrEmailVerifyRequired = infraerrors.BadRequest("EMAIL_VERIFY_REQUIRED", "email verification is required") + ErrRegDisabled = infraerrors.Forbidden("REGISTRATION_DISABLED", "registration is currently disabled") + ErrServiceUnavailable = infraerrors.ServiceUnavailable("SERVICE_UNAVAILABLE", "service temporarily unavailable") +) + +// maxTokenLength 限制 token 大小,避免超长 header 触发解析时的异常内存分配。 +const maxTokenLength = 8192 + +// JWTClaims JWT载荷数据 +type JWTClaims struct { + UserID int64 `json:"user_id"` + Email string `json:"email"` + Role string `json:"role"` + TokenVersion int64 `json:"token_version"` // Used to invalidate tokens on password change + jwt.RegisteredClaims +} + +// AuthService 认证服务 +type AuthService struct { + userRepo UserRepository + cfg *config.Config + settingService *SettingService + emailService *EmailService + turnstileService *TurnstileService + emailQueueService *EmailQueueService + promoService *PromoService +} + +// NewAuthService 创建认证服务实例 +func NewAuthService( + userRepo UserRepository, + cfg *config.Config, + settingService *SettingService, + emailService *EmailService, + turnstileService *TurnstileService, + emailQueueService *EmailQueueService, + promoService *PromoService, +) *AuthService { + return &AuthService{ + userRepo: userRepo, + cfg: cfg, + settingService: settingService, + emailService: emailService, + turnstileService: turnstileService, + emailQueueService: emailQueueService, + promoService: promoService, + } +} + +// Register 用户注册,返回token和用户 +func (s *AuthService) Register(ctx context.Context, email, password string) (string, *User, error) { + return s.RegisterWithVerification(ctx, email, password, "", "") +} + +// RegisterWithVerification 用户注册(支持邮件验证和优惠码),返回token和用户 +func (s *AuthService) RegisterWithVerification(ctx context.Context, email, password, verifyCode, promoCode string) (string, *User, error) { + // 检查是否开放注册(默认关闭:settingService 未配置时不允许注册) + if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { + return "", nil, ErrRegDisabled + } + + // 防止用户注册 LinuxDo OAuth 合成邮箱,避免第三方登录与本地账号发生碰撞。 + if isReservedEmail(email) { + return "", nil, ErrEmailReserved + } + + // 检查是否需要邮件验证 + if s.settingService != nil && s.settingService.IsEmailVerifyEnabled(ctx) { + // 如果邮件验证已开启但邮件服务未配置,拒绝注册 + // 这是一个配置错误,不应该允许绕过验证 + if s.emailService == nil { + log.Println("[Auth] Email verification enabled but email service not configured, rejecting registration") + return "", nil, ErrServiceUnavailable + } + if verifyCode == "" { + return "", nil, ErrEmailVerifyRequired + } + // 验证邮箱验证码 + if err := s.emailService.VerifyCode(ctx, email, verifyCode); err != nil { + return "", nil, fmt.Errorf("verify code: %w", err) + } + } + + // 检查邮箱是否已存在 + existsEmail, err := s.userRepo.ExistsByEmail(ctx, email) + if err != nil { + log.Printf("[Auth] Database error checking email exists: %v", err) + return "", nil, ErrServiceUnavailable + } + if existsEmail { + return "", nil, ErrEmailExists + } + + // 密码哈希 + hashedPassword, err := s.HashPassword(password) + if err != nil { + return "", nil, fmt.Errorf("hash password: %w", err) + } + + // 获取默认配置 + defaultBalance := s.cfg.Default.UserBalance + defaultConcurrency := s.cfg.Default.UserConcurrency + if s.settingService != nil { + defaultBalance = s.settingService.GetDefaultBalance(ctx) + defaultConcurrency = s.settingService.GetDefaultConcurrency(ctx) + } + + // 创建用户 + user := &User{ + Email: email, + PasswordHash: hashedPassword, + Role: RoleUser, + Balance: defaultBalance, + Concurrency: defaultConcurrency, + Status: StatusActive, + } + + if err := s.userRepo.Create(ctx, user); err != nil { + // 优先检查邮箱冲突错误(竞态条件下可能发生) + if errors.Is(err, ErrEmailExists) { + return "", nil, ErrEmailExists + } + log.Printf("[Auth] Database error creating user: %v", err) + return "", nil, ErrServiceUnavailable + } + + // 应用优惠码(如果提供) + if promoCode != "" && s.promoService != nil { + if err := s.promoService.ApplyPromoCode(ctx, user.ID, promoCode); err != nil { + // 优惠码应用失败不影响注册,只记录日志 + log.Printf("[Auth] Failed to apply promo code for user %d: %v", user.ID, err) + } else { + // 重新获取用户信息以获取更新后的余额 + if updatedUser, err := s.userRepo.GetByID(ctx, user.ID); err == nil { + user = updatedUser + } + } + } + + // 生成token + token, err := s.GenerateToken(user) + if err != nil { + return "", nil, fmt.Errorf("generate token: %w", err) + } + + return token, user, nil +} + +// SendVerifyCodeResult 发送验证码返回结果 +type SendVerifyCodeResult struct { + Countdown int `json:"countdown"` // 倒计时秒数 +} + +// SendVerifyCode 发送邮箱验证码(同步方式) +func (s *AuthService) SendVerifyCode(ctx context.Context, email string) error { + // 检查是否开放注册(默认关闭) + if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { + return ErrRegDisabled + } + + if isReservedEmail(email) { + return ErrEmailReserved + } + + // 检查邮箱是否已存在 + existsEmail, err := s.userRepo.ExistsByEmail(ctx, email) + if err != nil { + log.Printf("[Auth] Database error checking email exists: %v", err) + return ErrServiceUnavailable + } + if existsEmail { + return ErrEmailExists + } + + // 发送验证码 + if s.emailService == nil { + return errors.New("email service not configured") + } + + // 获取网站名称 + siteName := "Sub2API" + if s.settingService != nil { + siteName = s.settingService.GetSiteName(ctx) + } + + return s.emailService.SendVerifyCode(ctx, email, siteName) +} + +// SendVerifyCodeAsync 异步发送邮箱验证码并返回倒计时 +func (s *AuthService) SendVerifyCodeAsync(ctx context.Context, email string) (*SendVerifyCodeResult, error) { + log.Printf("[Auth] SendVerifyCodeAsync called for email: %s", email) + + // 检查是否开放注册(默认关闭) + if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { + log.Println("[Auth] Registration is disabled") + return nil, ErrRegDisabled + } + + if isReservedEmail(email) { + return nil, ErrEmailReserved + } + + // 检查邮箱是否已存在 + existsEmail, err := s.userRepo.ExistsByEmail(ctx, email) + if err != nil { + log.Printf("[Auth] Database error checking email exists: %v", err) + return nil, ErrServiceUnavailable + } + if existsEmail { + log.Printf("[Auth] Email already exists: %s", email) + return nil, ErrEmailExists + } + + // 检查邮件队列服务是否配置 + if s.emailQueueService == nil { + log.Println("[Auth] Email queue service not configured") + return nil, errors.New("email queue service not configured") + } + + // 获取网站名称 + siteName := "Sub2API" + if s.settingService != nil { + siteName = s.settingService.GetSiteName(ctx) + } + + // 异步发送 + log.Printf("[Auth] Enqueueing verify code for: %s", email) + if err := s.emailQueueService.EnqueueVerifyCode(email, siteName); err != nil { + log.Printf("[Auth] Failed to enqueue: %v", err) + return nil, fmt.Errorf("enqueue verify code: %w", err) + } + + log.Printf("[Auth] Verify code enqueued successfully for: %s", email) + return &SendVerifyCodeResult{ + Countdown: 60, // 60秒倒计时 + }, nil +} + +// VerifyTurnstile 验证Turnstile token +func (s *AuthService) VerifyTurnstile(ctx context.Context, token string, remoteIP string) error { + required := s.cfg != nil && s.cfg.Server.Mode == "release" && s.cfg.Turnstile.Required + + if required { + if s.settingService == nil { + log.Println("[Auth] Turnstile required but settings service is not configured") + return ErrTurnstileNotConfigured + } + enabled := s.settingService.IsTurnstileEnabled(ctx) + secretConfigured := s.settingService.GetTurnstileSecretKey(ctx) != "" + if !enabled || !secretConfigured { + log.Printf("[Auth] Turnstile required but not configured (enabled=%v, secret_configured=%v)", enabled, secretConfigured) + return ErrTurnstileNotConfigured + } + } + + if s.turnstileService == nil { + if required { + log.Println("[Auth] Turnstile required but service not configured") + return ErrTurnstileNotConfigured + } + return nil // 服务未配置则跳过验证 + } + + if !required && s.settingService != nil && s.settingService.IsTurnstileEnabled(ctx) && s.settingService.GetTurnstileSecretKey(ctx) == "" { + log.Println("[Auth] Turnstile enabled but secret key not configured") + } + + return s.turnstileService.VerifyToken(ctx, token, remoteIP) +} + +// IsTurnstileEnabled 检查是否启用Turnstile验证 +func (s *AuthService) IsTurnstileEnabled(ctx context.Context) bool { + if s.turnstileService == nil { + return false + } + return s.turnstileService.IsEnabled(ctx) +} + +// IsRegistrationEnabled 检查是否开放注册 +func (s *AuthService) IsRegistrationEnabled(ctx context.Context) bool { + if s.settingService == nil { + return false // 安全默认:settingService 未配置时关闭注册 + } + return s.settingService.IsRegistrationEnabled(ctx) +} + +// IsEmailVerifyEnabled 检查是否开启邮件验证 +func (s *AuthService) IsEmailVerifyEnabled(ctx context.Context) bool { + if s.settingService == nil { + return false + } + return s.settingService.IsEmailVerifyEnabled(ctx) +} + +// Login 用户登录,返回JWT token +func (s *AuthService) Login(ctx context.Context, email, password string) (string, *User, error) { + // 查找用户 + user, err := s.userRepo.GetByEmail(ctx, email) + if err != nil { + if errors.Is(err, ErrUserNotFound) { + return "", nil, ErrInvalidCredentials + } + // 记录数据库错误但不暴露给用户 + log.Printf("[Auth] Database error during login: %v", err) + return "", nil, ErrServiceUnavailable + } + + // 验证密码 + if !s.CheckPassword(password, user.PasswordHash) { + return "", nil, ErrInvalidCredentials + } + + // 检查用户状态 + if !user.IsActive() { + return "", nil, ErrUserNotActive + } + + // 生成JWT token + token, err := s.GenerateToken(user) + if err != nil { + return "", nil, fmt.Errorf("generate token: %w", err) + } + + return token, user, nil +} + +// LoginOrRegisterOAuth 用于第三方 OAuth/SSO 登录: +// - 如果邮箱已存在:直接登录(不需要本地密码) +// - 如果邮箱不存在:创建新用户并登录 +// +// 注意:该函数用于 LinuxDo OAuth 登录场景(不同于上游账号的 OAuth,例如 Claude/OpenAI/Gemini)。 +// 为了满足现有数据库约束(需要密码哈希),新用户会生成随机密码并进行哈希保存。 +func (s *AuthService) LoginOrRegisterOAuth(ctx context.Context, email, username string) (string, *User, error) { + email = strings.TrimSpace(email) + if email == "" || len(email) > 255 { + return "", nil, infraerrors.BadRequest("INVALID_EMAIL", "invalid email") + } + if _, err := mail.ParseAddress(email); err != nil { + return "", nil, infraerrors.BadRequest("INVALID_EMAIL", "invalid email") + } + + username = strings.TrimSpace(username) + if len([]rune(username)) > 100 { + username = string([]rune(username)[:100]) + } + + user, err := s.userRepo.GetByEmail(ctx, email) + if err != nil { + if errors.Is(err, ErrUserNotFound) { + // OAuth 首次登录视为注册(fail-close:settingService 未配置时不允许注册) + if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { + return "", nil, ErrRegDisabled + } + + randomPassword, err := randomHexString(32) + if err != nil { + log.Printf("[Auth] Failed to generate random password for oauth signup: %v", err) + return "", nil, ErrServiceUnavailable + } + hashedPassword, err := s.HashPassword(randomPassword) + if err != nil { + return "", nil, fmt.Errorf("hash password: %w", err) + } + + // 新用户默认值。 + defaultBalance := s.cfg.Default.UserBalance + defaultConcurrency := s.cfg.Default.UserConcurrency + if s.settingService != nil { + defaultBalance = s.settingService.GetDefaultBalance(ctx) + defaultConcurrency = s.settingService.GetDefaultConcurrency(ctx) + } + + newUser := &User{ + Email: email, + Username: username, + PasswordHash: hashedPassword, + Role: RoleUser, + Balance: defaultBalance, + Concurrency: defaultConcurrency, + Status: StatusActive, + } + + if err := s.userRepo.Create(ctx, newUser); err != nil { + if errors.Is(err, ErrEmailExists) { + // 并发场景:GetByEmail 与 Create 之间用户被创建。 + user, err = s.userRepo.GetByEmail(ctx, email) + if err != nil { + log.Printf("[Auth] Database error getting user after conflict: %v", err) + return "", nil, ErrServiceUnavailable + } + } else { + log.Printf("[Auth] Database error creating oauth user: %v", err) + return "", nil, ErrServiceUnavailable + } + } else { + user = newUser + } + } else { + log.Printf("[Auth] Database error during oauth login: %v", err) + return "", nil, ErrServiceUnavailable + } + } + + if !user.IsActive() { + return "", nil, ErrUserNotActive + } + + // 尽力补全:当用户名为空时,使用第三方返回的用户名回填。 + if user.Username == "" && username != "" { + user.Username = username + if err := s.userRepo.Update(ctx, user); err != nil { + log.Printf("[Auth] Failed to update username after oauth login: %v", err) + } + } + + token, err := s.GenerateToken(user) + if err != nil { + return "", nil, fmt.Errorf("generate token: %w", err) + } + return token, user, nil +} + +// ValidateToken 验证JWT token并返回用户声明 +func (s *AuthService) ValidateToken(tokenString string) (*JWTClaims, error) { + // 先做长度校验,尽早拒绝异常超长 token,降低 DoS 风险。 + if len(tokenString) > maxTokenLength { + return nil, ErrTokenTooLarge + } + + // 使用解析器并限制可接受的签名算法,防止算法混淆。 + parser := jwt.NewParser(jwt.WithValidMethods([]string{ + jwt.SigningMethodHS256.Name, + jwt.SigningMethodHS384.Name, + jwt.SigningMethodHS512.Name, + })) + + // 保留默认 claims 校验(exp/nbf),避免放行过期或未生效的 token。 + token, err := parser.ParseWithClaims(tokenString, &JWTClaims{}, func(token *jwt.Token) (any, error) { + // 验证签名方法 + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return []byte(s.cfg.JWT.Secret), nil + }) + + if err != nil { + if errors.Is(err, jwt.ErrTokenExpired) { + // token 过期但仍返回 claims(用于 RefreshToken 等场景) + // jwt-go 在解析时即使遇到过期错误,token.Claims 仍会被填充 + if claims, ok := token.Claims.(*JWTClaims); ok { + return claims, ErrTokenExpired + } + return nil, ErrTokenExpired + } + return nil, ErrInvalidToken + } + + if claims, ok := token.Claims.(*JWTClaims); ok && token.Valid { + return claims, nil + } + + return nil, ErrInvalidToken +} + +func randomHexString(byteLength int) (string, error) { + if byteLength <= 0 { + byteLength = 16 + } + buf := make([]byte, byteLength) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return hex.EncodeToString(buf), nil +} + +func isReservedEmail(email string) bool { + normalized := strings.ToLower(strings.TrimSpace(email)) + return strings.HasSuffix(normalized, LinuxDoConnectSyntheticEmailDomain) +} + +// GenerateToken 生成JWT token +func (s *AuthService) GenerateToken(user *User) (string, error) { + now := time.Now() + expiresAt := now.Add(time.Duration(s.cfg.JWT.ExpireHour) * time.Hour) + + claims := &JWTClaims{ + UserID: user.ID, + Email: user.Email, + Role: user.Role, + TokenVersion: user.TokenVersion, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(expiresAt), + IssuedAt: jwt.NewNumericDate(now), + NotBefore: jwt.NewNumericDate(now), + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + tokenString, err := token.SignedString([]byte(s.cfg.JWT.Secret)) + if err != nil { + return "", fmt.Errorf("sign token: %w", err) + } + + return tokenString, nil +} + +// HashPassword 使用bcrypt加密密码 +func (s *AuthService) HashPassword(password string) (string, error) { + hashedBytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return "", err + } + return string(hashedBytes), nil +} + +// CheckPassword 验证密码是否匹配 +func (s *AuthService) CheckPassword(password, hashedPassword string) bool { + err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password)) + return err == nil +} + +// RefreshToken 刷新token +func (s *AuthService) RefreshToken(ctx context.Context, oldTokenString string) (string, error) { + // 验证旧token(即使过期也允许,用于刷新) + claims, err := s.ValidateToken(oldTokenString) + if err != nil && !errors.Is(err, ErrTokenExpired) { + return "", err + } + + // 获取最新的用户信息 + user, err := s.userRepo.GetByID(ctx, claims.UserID) + if err != nil { + if errors.Is(err, ErrUserNotFound) { + return "", ErrInvalidToken + } + log.Printf("[Auth] Database error refreshing token: %v", err) + return "", ErrServiceUnavailable + } + + // 检查用户状态 + if !user.IsActive() { + return "", ErrUserNotActive + } + + // Security: Check TokenVersion to prevent refreshing revoked tokens + // This ensures tokens issued before a password change cannot be refreshed + if claims.TokenVersion != user.TokenVersion { + return "", ErrTokenRevoked + } + + // 生成新token + return s.GenerateToken(user) +} diff --git a/backend/internal/service/auth_service_register_test.go b/backend/internal/service/auth_service_register_test.go new file mode 100644 index 00000000..bc8f6f68 --- /dev/null +++ b/backend/internal/service/auth_service_register_test.go @@ -0,0 +1,295 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type settingRepoStub struct { + values map[string]string + err error +} + +func (s *settingRepoStub) Get(ctx context.Context, key string) (*Setting, error) { + panic("unexpected Get call") +} + +func (s *settingRepoStub) GetValue(ctx context.Context, key string) (string, error) { + if s.err != nil { + return "", s.err + } + if v, ok := s.values[key]; ok { + return v, nil + } + return "", ErrSettingNotFound +} + +func (s *settingRepoStub) Set(ctx context.Context, key, value string) error { + panic("unexpected Set call") +} + +func (s *settingRepoStub) GetMultiple(ctx context.Context, keys []string) (map[string]string, error) { + panic("unexpected GetMultiple call") +} + +func (s *settingRepoStub) SetMultiple(ctx context.Context, settings map[string]string) error { + panic("unexpected SetMultiple call") +} + +func (s *settingRepoStub) GetAll(ctx context.Context) (map[string]string, error) { + panic("unexpected GetAll call") +} + +func (s *settingRepoStub) Delete(ctx context.Context, key string) error { + panic("unexpected Delete call") +} + +type emailCacheStub struct { + data *VerificationCodeData + err error +} + +func (s *emailCacheStub) GetVerificationCode(ctx context.Context, email string) (*VerificationCodeData, error) { + if s.err != nil { + return nil, s.err + } + return s.data, nil +} + +func (s *emailCacheStub) SetVerificationCode(ctx context.Context, email string, data *VerificationCodeData, ttl time.Duration) error { + return nil +} + +func (s *emailCacheStub) DeleteVerificationCode(ctx context.Context, email string) error { + return nil +} + +func newAuthService(repo *userRepoStub, settings map[string]string, emailCache EmailCache) *AuthService { + cfg := &config.Config{ + JWT: config.JWTConfig{ + Secret: "test-secret", + ExpireHour: 1, + }, + Default: config.DefaultConfig{ + UserBalance: 3.5, + UserConcurrency: 2, + }, + } + + var settingService *SettingService + if settings != nil { + settingService = NewSettingService(&settingRepoStub{values: settings}, cfg) + } + + var emailService *EmailService + if emailCache != nil { + emailService = NewEmailService(&settingRepoStub{values: settings}, emailCache) + } + + return NewAuthService( + repo, + cfg, + settingService, + emailService, + nil, + nil, + nil, // promoService + ) +} + +func TestAuthService_Register_Disabled(t *testing.T) { + repo := &userRepoStub{} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "false", + }, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrRegDisabled) +} + +func TestAuthService_Register_DisabledByDefault(t *testing.T) { + // 当 settings 为 nil(设置项不存在)时,注册应该默认关闭 + repo := &userRepoStub{} + service := newAuthService(repo, nil, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrRegDisabled) +} + +func TestAuthService_Register_EmailVerifyEnabledButServiceNotConfigured(t *testing.T) { + repo := &userRepoStub{} + // 邮件验证开启但 emailCache 为 nil(emailService 未配置) + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + SettingKeyEmailVerifyEnabled: "true", + }, nil) + + // 应返回服务不可用错误,而不是允许绕过验证 + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "any-code", "") + require.ErrorIs(t, err, ErrServiceUnavailable) +} + +func TestAuthService_Register_EmailVerifyRequired(t *testing.T) { + repo := &userRepoStub{} + cache := &emailCacheStub{} // 配置 emailService + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + SettingKeyEmailVerifyEnabled: "true", + }, cache) + + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "", "") + require.ErrorIs(t, err, ErrEmailVerifyRequired) +} + +func TestAuthService_Register_EmailVerifyInvalid(t *testing.T) { + repo := &userRepoStub{} + cache := &emailCacheStub{ + data: &VerificationCodeData{Code: "expected", Attempts: 0}, + } + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + SettingKeyEmailVerifyEnabled: "true", + }, cache) + + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "wrong", "") + require.ErrorIs(t, err, ErrInvalidVerifyCode) + require.ErrorContains(t, err, "verify code") +} + +func TestAuthService_Register_EmailExists(t *testing.T) { + repo := &userRepoStub{exists: true} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrEmailExists) +} + +func TestAuthService_Register_CheckEmailError(t *testing.T) { + repo := &userRepoStub{existsErr: errors.New("db down")} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrServiceUnavailable) +} + +func TestAuthService_Register_ReservedEmail(t *testing.T) { + repo := &userRepoStub{} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + _, _, err := service.Register(context.Background(), "linuxdo-123@linuxdo-connect.invalid", "password") + require.ErrorIs(t, err, ErrEmailReserved) +} + +func TestAuthService_Register_CreateError(t *testing.T) { + repo := &userRepoStub{createErr: errors.New("create failed")} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrServiceUnavailable) +} + +func TestAuthService_Register_CreateEmailExistsRace(t *testing.T) { + // 模拟竞态条件:ExistsByEmail 返回 false,但 Create 时因唯一约束失败 + repo := &userRepoStub{createErr: ErrEmailExists} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + _, _, err := service.Register(context.Background(), "user@test.com", "password") + require.ErrorIs(t, err, ErrEmailExists) +} + +func TestAuthService_Register_Success(t *testing.T) { + repo := &userRepoStub{nextID: 5} + service := newAuthService(repo, map[string]string{ + SettingKeyRegistrationEnabled: "true", + }, nil) + + token, user, err := service.Register(context.Background(), "user@test.com", "password") + require.NoError(t, err) + require.NotEmpty(t, token) + require.NotNil(t, user) + require.Equal(t, int64(5), user.ID) + require.Equal(t, "user@test.com", user.Email) + require.Equal(t, RoleUser, user.Role) + require.Equal(t, StatusActive, user.Status) + require.Equal(t, 3.5, user.Balance) + require.Equal(t, 2, user.Concurrency) + require.Len(t, repo.created, 1) + require.True(t, user.CheckPassword("password")) +} + +func TestAuthService_ValidateToken_ExpiredReturnsClaimsWithError(t *testing.T) { + repo := &userRepoStub{} + service := newAuthService(repo, nil, nil) + + // 创建用户并生成 token + user := &User{ + ID: 1, + Email: "test@test.com", + Role: RoleUser, + Status: StatusActive, + TokenVersion: 1, + } + token, err := service.GenerateToken(user) + require.NoError(t, err) + + // 验证有效 token + claims, err := service.ValidateToken(token) + require.NoError(t, err) + require.NotNil(t, claims) + require.Equal(t, int64(1), claims.UserID) + + // 模拟过期 token(通过创建一个过期很久的 token) + service.cfg.JWT.ExpireHour = -1 // 设置为负数使 token 立即过期 + expiredToken, err := service.GenerateToken(user) + require.NoError(t, err) + service.cfg.JWT.ExpireHour = 1 // 恢复 + + // 验证过期 token 应返回 claims 和 ErrTokenExpired + claims, err = service.ValidateToken(expiredToken) + require.ErrorIs(t, err, ErrTokenExpired) + require.NotNil(t, claims, "claims should not be nil when token is expired") + require.Equal(t, int64(1), claims.UserID) + require.Equal(t, "test@test.com", claims.Email) +} + +func TestAuthService_RefreshToken_ExpiredTokenNoPanic(t *testing.T) { + user := &User{ + ID: 1, + Email: "test@test.com", + Role: RoleUser, + Status: StatusActive, + TokenVersion: 1, + } + repo := &userRepoStub{user: user} + service := newAuthService(repo, nil, nil) + + // 创建过期 token + service.cfg.JWT.ExpireHour = -1 + expiredToken, err := service.GenerateToken(user) + require.NoError(t, err) + service.cfg.JWT.ExpireHour = 1 + + // RefreshToken 使用过期 token 不应 panic + require.NotPanics(t, func() { + newToken, err := service.RefreshToken(context.Background(), expiredToken) + require.NoError(t, err) + require.NotEmpty(t, newToken) + }) +} diff --git a/backend/internal/service/billing_cache_port.go b/backend/internal/service/billing_cache_port.go new file mode 100644 index 00000000..00bb43da --- /dev/null +++ b/backend/internal/service/billing_cache_port.go @@ -0,0 +1,15 @@ +package service + +import ( + "time" +) + +// SubscriptionCacheData represents cached subscription data +type SubscriptionCacheData struct { + Status string + ExpiresAt time.Time + DailyUsage float64 + WeeklyUsage float64 + MonthlyUsage float64 + Version int64 +} diff --git a/backend/internal/service/billing_cache_service.go b/backend/internal/service/billing_cache_service.go new file mode 100644 index 00000000..c09cafb9 --- /dev/null +++ b/backend/internal/service/billing_cache_service.go @@ -0,0 +1,661 @@ +package service + +import ( + "context" + "fmt" + "log" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// 错误定义 +// 注:ErrInsufficientBalance在redeem_service.go中定义 +// 注:ErrDailyLimitExceeded/ErrWeeklyLimitExceeded/ErrMonthlyLimitExceeded在subscription_service.go中定义 +var ( + ErrSubscriptionInvalid = infraerrors.Forbidden("SUBSCRIPTION_INVALID", "subscription is invalid or expired") + ErrBillingServiceUnavailable = infraerrors.ServiceUnavailable("BILLING_SERVICE_ERROR", "Billing service temporarily unavailable. Please retry later.") +) + +// subscriptionCacheData 订阅缓存数据结构(内部使用) +type subscriptionCacheData struct { + Status string + ExpiresAt time.Time + DailyUsage float64 + WeeklyUsage float64 + MonthlyUsage float64 + Version int64 +} + +// 缓存写入任务类型 +type cacheWriteKind int + +const ( + cacheWriteSetBalance cacheWriteKind = iota + cacheWriteSetSubscription + cacheWriteUpdateSubscriptionUsage + cacheWriteDeductBalance +) + +// 异步缓存写入工作池配置 +// +// 性能优化说明: +// 原实现在请求热路径中使用 goroutine 异步更新缓存,存在以下问题: +// 1. 每次请求创建新 goroutine,高并发下产生大量短生命周期 goroutine +// 2. 无法控制并发数量,可能导致 Redis 连接耗尽 +// 3. goroutine 创建/销毁带来额外开销 +// +// 新实现使用固定大小的工作池: +// 1. 预创建 10 个 worker goroutine,避免频繁创建销毁 +// 2. 使用带缓冲的 channel(1000)作为任务队列,平滑写入峰值 +// 3. 非阻塞写入,队列满时关键任务同步回退,非关键任务丢弃并告警 +// 4. 统一超时控制,避免慢操作阻塞工作池 +const ( + cacheWriteWorkerCount = 10 // 工作协程数量 + cacheWriteBufferSize = 1000 // 任务队列缓冲大小 + cacheWriteTimeout = 2 * time.Second // 单个写入操作超时 + cacheWriteDropLogInterval = 5 * time.Second // 丢弃日志节流间隔 +) + +// cacheWriteTask 缓存写入任务 +type cacheWriteTask struct { + kind cacheWriteKind + userID int64 + groupID int64 + balance float64 + amount float64 + subscriptionData *subscriptionCacheData +} + +// BillingCacheService 计费缓存服务 +// 负责余额和订阅数据的缓存管理,提供高性能的计费资格检查 +type BillingCacheService struct { + cache BillingCache + userRepo UserRepository + subRepo UserSubscriptionRepository + cfg *config.Config + circuitBreaker *billingCircuitBreaker + + cacheWriteChan chan cacheWriteTask + cacheWriteWg sync.WaitGroup + cacheWriteStopOnce sync.Once + // 丢弃日志节流计数器(减少高负载下日志噪音) + cacheWriteDropFullCount uint64 + cacheWriteDropFullLastLog int64 + cacheWriteDropClosedCount uint64 + cacheWriteDropClosedLastLog int64 +} + +// NewBillingCacheService 创建计费缓存服务 +func NewBillingCacheService(cache BillingCache, userRepo UserRepository, subRepo UserSubscriptionRepository, cfg *config.Config) *BillingCacheService { + svc := &BillingCacheService{ + cache: cache, + userRepo: userRepo, + subRepo: subRepo, + cfg: cfg, + } + svc.circuitBreaker = newBillingCircuitBreaker(cfg.Billing.CircuitBreaker) + svc.startCacheWriteWorkers() + return svc +} + +// Stop 关闭缓存写入工作池 +func (s *BillingCacheService) Stop() { + s.cacheWriteStopOnce.Do(func() { + if s.cacheWriteChan == nil { + return + } + close(s.cacheWriteChan) + s.cacheWriteWg.Wait() + s.cacheWriteChan = nil + }) +} + +func (s *BillingCacheService) startCacheWriteWorkers() { + s.cacheWriteChan = make(chan cacheWriteTask, cacheWriteBufferSize) + for i := 0; i < cacheWriteWorkerCount; i++ { + s.cacheWriteWg.Add(1) + go s.cacheWriteWorker() + } +} + +// enqueueCacheWrite 尝试将任务入队,队列满时返回 false(并记录告警)。 +func (s *BillingCacheService) enqueueCacheWrite(task cacheWriteTask) (enqueued bool) { + if s.cacheWriteChan == nil { + return false + } + defer func() { + if recovered := recover(); recovered != nil { + // 队列已关闭时可能触发 panic,记录后静默失败。 + s.logCacheWriteDrop(task, "closed") + enqueued = false + } + }() + select { + case s.cacheWriteChan <- task: + return true + default: + // 队列满时不阻塞主流程,交由调用方决定是否同步回退。 + s.logCacheWriteDrop(task, "full") + return false + } +} + +func (s *BillingCacheService) cacheWriteWorker() { + defer s.cacheWriteWg.Done() + for task := range s.cacheWriteChan { + ctx, cancel := context.WithTimeout(context.Background(), cacheWriteTimeout) + switch task.kind { + case cacheWriteSetBalance: + s.setBalanceCache(ctx, task.userID, task.balance) + case cacheWriteSetSubscription: + s.setSubscriptionCache(ctx, task.userID, task.groupID, task.subscriptionData) + case cacheWriteUpdateSubscriptionUsage: + if s.cache != nil { + if err := s.cache.UpdateSubscriptionUsage(ctx, task.userID, task.groupID, task.amount); err != nil { + log.Printf("Warning: update subscription cache failed for user %d group %d: %v", task.userID, task.groupID, err) + } + } + case cacheWriteDeductBalance: + if s.cache != nil { + if err := s.cache.DeductUserBalance(ctx, task.userID, task.amount); err != nil { + log.Printf("Warning: deduct balance cache failed for user %d: %v", task.userID, err) + } + } + } + cancel() + } +} + +// cacheWriteKindName 用于日志中的任务类型标识,便于排查丢弃原因。 +func cacheWriteKindName(kind cacheWriteKind) string { + switch kind { + case cacheWriteSetBalance: + return "set_balance" + case cacheWriteSetSubscription: + return "set_subscription" + case cacheWriteUpdateSubscriptionUsage: + return "update_subscription_usage" + case cacheWriteDeductBalance: + return "deduct_balance" + default: + return "unknown" + } +} + +// logCacheWriteDrop 使用节流方式记录丢弃情况,并汇总丢弃数量。 +func (s *BillingCacheService) logCacheWriteDrop(task cacheWriteTask, reason string) { + var ( + countPtr *uint64 + lastPtr *int64 + ) + switch reason { + case "full": + countPtr = &s.cacheWriteDropFullCount + lastPtr = &s.cacheWriteDropFullLastLog + case "closed": + countPtr = &s.cacheWriteDropClosedCount + lastPtr = &s.cacheWriteDropClosedLastLog + default: + return + } + + atomic.AddUint64(countPtr, 1) + now := time.Now().UnixNano() + last := atomic.LoadInt64(lastPtr) + if now-last < int64(cacheWriteDropLogInterval) { + return + } + if !atomic.CompareAndSwapInt64(lastPtr, last, now) { + return + } + dropped := atomic.SwapUint64(countPtr, 0) + if dropped == 0 { + return + } + log.Printf("Warning: cache write queue %s, dropped %d tasks in last %s (latest kind=%s user %d group %d)", + reason, + dropped, + cacheWriteDropLogInterval, + cacheWriteKindName(task.kind), + task.userID, + task.groupID, + ) +} + +// ============================================ +// 余额缓存方法 +// ============================================ + +// GetUserBalance 获取用户余额(优先从缓存读取) +func (s *BillingCacheService) GetUserBalance(ctx context.Context, userID int64) (float64, error) { + if s.cache == nil { + // Redis不可用,直接查询数据库 + return s.getUserBalanceFromDB(ctx, userID) + } + + // 尝试从缓存读取 + balance, err := s.cache.GetUserBalance(ctx, userID) + if err == nil { + return balance, nil + } + + // 缓存未命中,从数据库读取 + balance, err = s.getUserBalanceFromDB(ctx, userID) + if err != nil { + return 0, err + } + + // 异步建立缓存 + _ = s.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteSetBalance, + userID: userID, + balance: balance, + }) + + return balance, nil +} + +// getUserBalanceFromDB 从数据库获取用户余额 +func (s *BillingCacheService) getUserBalanceFromDB(ctx context.Context, userID int64) (float64, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return 0, fmt.Errorf("get user balance: %w", err) + } + return user.Balance, nil +} + +// setBalanceCache 设置余额缓存 +func (s *BillingCacheService) setBalanceCache(ctx context.Context, userID int64, balance float64) { + if s.cache == nil { + return + } + if err := s.cache.SetUserBalance(ctx, userID, balance); err != nil { + log.Printf("Warning: set balance cache failed for user %d: %v", userID, err) + } +} + +// DeductBalanceCache 扣减余额缓存(同步调用) +func (s *BillingCacheService) DeductBalanceCache(ctx context.Context, userID int64, amount float64) error { + if s.cache == nil { + return nil + } + return s.cache.DeductUserBalance(ctx, userID, amount) +} + +// QueueDeductBalance 异步扣减余额缓存 +func (s *BillingCacheService) QueueDeductBalance(userID int64, amount float64) { + if s.cache == nil { + return + } + // 队列满时同步回退,避免关键扣减被静默丢弃。 + if s.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteDeductBalance, + userID: userID, + amount: amount, + }) { + return + } + ctx, cancel := context.WithTimeout(context.Background(), cacheWriteTimeout) + defer cancel() + if err := s.DeductBalanceCache(ctx, userID, amount); err != nil { + log.Printf("Warning: deduct balance cache fallback failed for user %d: %v", userID, err) + } +} + +// InvalidateUserBalance 失效用户余额缓存 +func (s *BillingCacheService) InvalidateUserBalance(ctx context.Context, userID int64) error { + if s.cache == nil { + return nil + } + if err := s.cache.InvalidateUserBalance(ctx, userID); err != nil { + log.Printf("Warning: invalidate balance cache failed for user %d: %v", userID, err) + return err + } + return nil +} + +// ============================================ +// 订阅缓存方法 +// ============================================ + +// GetSubscriptionStatus 获取订阅状态(优先从缓存读取) +func (s *BillingCacheService) GetSubscriptionStatus(ctx context.Context, userID, groupID int64) (*subscriptionCacheData, error) { + if s.cache == nil { + return s.getSubscriptionFromDB(ctx, userID, groupID) + } + + // 尝试从缓存读取 + cacheData, err := s.cache.GetSubscriptionCache(ctx, userID, groupID) + if err == nil && cacheData != nil { + return s.convertFromPortsData(cacheData), nil + } + + // 缓存未命中,从数据库读取 + data, err := s.getSubscriptionFromDB(ctx, userID, groupID) + if err != nil { + return nil, err + } + + // 异步建立缓存 + _ = s.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteSetSubscription, + userID: userID, + groupID: groupID, + subscriptionData: data, + }) + + return data, nil +} + +func (s *BillingCacheService) convertFromPortsData(data *SubscriptionCacheData) *subscriptionCacheData { + return &subscriptionCacheData{ + Status: data.Status, + ExpiresAt: data.ExpiresAt, + DailyUsage: data.DailyUsage, + WeeklyUsage: data.WeeklyUsage, + MonthlyUsage: data.MonthlyUsage, + Version: data.Version, + } +} + +func (s *BillingCacheService) convertToPortsData(data *subscriptionCacheData) *SubscriptionCacheData { + return &SubscriptionCacheData{ + Status: data.Status, + ExpiresAt: data.ExpiresAt, + DailyUsage: data.DailyUsage, + WeeklyUsage: data.WeeklyUsage, + MonthlyUsage: data.MonthlyUsage, + Version: data.Version, + } +} + +// getSubscriptionFromDB 从数据库获取订阅数据 +func (s *BillingCacheService) getSubscriptionFromDB(ctx context.Context, userID, groupID int64) (*subscriptionCacheData, error) { + sub, err := s.subRepo.GetActiveByUserIDAndGroupID(ctx, userID, groupID) + if err != nil { + return nil, fmt.Errorf("get subscription: %w", err) + } + + return &subscriptionCacheData{ + Status: sub.Status, + ExpiresAt: sub.ExpiresAt, + DailyUsage: sub.DailyUsageUSD, + WeeklyUsage: sub.WeeklyUsageUSD, + MonthlyUsage: sub.MonthlyUsageUSD, + Version: sub.UpdatedAt.Unix(), + }, nil +} + +// setSubscriptionCache 设置订阅缓存 +func (s *BillingCacheService) setSubscriptionCache(ctx context.Context, userID, groupID int64, data *subscriptionCacheData) { + if s.cache == nil || data == nil { + return + } + if err := s.cache.SetSubscriptionCache(ctx, userID, groupID, s.convertToPortsData(data)); err != nil { + log.Printf("Warning: set subscription cache failed for user %d group %d: %v", userID, groupID, err) + } +} + +// UpdateSubscriptionUsage 更新订阅用量缓存(同步调用) +func (s *BillingCacheService) UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, costUSD float64) error { + if s.cache == nil { + return nil + } + return s.cache.UpdateSubscriptionUsage(ctx, userID, groupID, costUSD) +} + +// QueueUpdateSubscriptionUsage 异步更新订阅用量缓存 +func (s *BillingCacheService) QueueUpdateSubscriptionUsage(userID, groupID int64, costUSD float64) { + if s.cache == nil { + return + } + // 队列满时同步回退,确保订阅用量及时更新。 + if s.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteUpdateSubscriptionUsage, + userID: userID, + groupID: groupID, + amount: costUSD, + }) { + return + } + ctx, cancel := context.WithTimeout(context.Background(), cacheWriteTimeout) + defer cancel() + if err := s.UpdateSubscriptionUsage(ctx, userID, groupID, costUSD); err != nil { + log.Printf("Warning: update subscription cache fallback failed for user %d group %d: %v", userID, groupID, err) + } +} + +// InvalidateSubscription 失效指定订阅缓存 +func (s *BillingCacheService) InvalidateSubscription(ctx context.Context, userID, groupID int64) error { + if s.cache == nil { + return nil + } + if err := s.cache.InvalidateSubscriptionCache(ctx, userID, groupID); err != nil { + log.Printf("Warning: invalidate subscription cache failed for user %d group %d: %v", userID, groupID, err) + return err + } + return nil +} + +// ============================================ +// 统一检查方法 +// ============================================ + +// CheckBillingEligibility 检查用户是否有资格发起请求 +// 余额模式:检查缓存余额 > 0 +// 订阅模式:检查缓存用量未超过限额(Group限额从参数传入) +func (s *BillingCacheService) CheckBillingEligibility(ctx context.Context, user *User, apiKey *APIKey, group *Group, subscription *UserSubscription) error { + // 简易模式:跳过所有计费检查 + if s.cfg.RunMode == config.RunModeSimple { + return nil + } + if s.circuitBreaker != nil && !s.circuitBreaker.Allow() { + return ErrBillingServiceUnavailable + } + + // 判断计费模式 + isSubscriptionMode := group != nil && group.IsSubscriptionType() && subscription != nil + + if isSubscriptionMode { + return s.checkSubscriptionEligibility(ctx, user.ID, group, subscription) + } + + return s.checkBalanceEligibility(ctx, user.ID) +} + +// checkBalanceEligibility 检查余额模式资格 +func (s *BillingCacheService) checkBalanceEligibility(ctx context.Context, userID int64) error { + balance, err := s.GetUserBalance(ctx, userID) + if err != nil { + if s.circuitBreaker != nil { + s.circuitBreaker.OnFailure(err) + } + log.Printf("ALERT: billing balance check failed for user %d: %v", userID, err) + return ErrBillingServiceUnavailable.WithCause(err) + } + if s.circuitBreaker != nil { + s.circuitBreaker.OnSuccess() + } + + if balance <= 0 { + return ErrInsufficientBalance + } + + return nil +} + +// checkSubscriptionEligibility 检查订阅模式资格 +func (s *BillingCacheService) checkSubscriptionEligibility(ctx context.Context, userID int64, group *Group, subscription *UserSubscription) error { + // 获取订阅缓存数据 + subData, err := s.GetSubscriptionStatus(ctx, userID, group.ID) + if err != nil { + if s.circuitBreaker != nil { + s.circuitBreaker.OnFailure(err) + } + log.Printf("ALERT: billing subscription check failed for user %d group %d: %v", userID, group.ID, err) + return ErrBillingServiceUnavailable.WithCause(err) + } + if s.circuitBreaker != nil { + s.circuitBreaker.OnSuccess() + } + + // 检查订阅状态 + if subData.Status != SubscriptionStatusActive { + return ErrSubscriptionInvalid + } + + // 检查是否过期 + if time.Now().After(subData.ExpiresAt) { + return ErrSubscriptionInvalid + } + + // 检查限额(使用传入的Group限额配置) + if group.HasDailyLimit() && subData.DailyUsage >= *group.DailyLimitUSD { + return ErrDailyLimitExceeded + } + + if group.HasWeeklyLimit() && subData.WeeklyUsage >= *group.WeeklyLimitUSD { + return ErrWeeklyLimitExceeded + } + + if group.HasMonthlyLimit() && subData.MonthlyUsage >= *group.MonthlyLimitUSD { + return ErrMonthlyLimitExceeded + } + + return nil +} + +type billingCircuitBreakerState int + +const ( + billingCircuitClosed billingCircuitBreakerState = iota + billingCircuitOpen + billingCircuitHalfOpen +) + +type billingCircuitBreaker struct { + mu sync.Mutex + state billingCircuitBreakerState + failures int + openedAt time.Time + failureThreshold int + resetTimeout time.Duration + halfOpenRequests int + halfOpenRemaining int +} + +func newBillingCircuitBreaker(cfg config.CircuitBreakerConfig) *billingCircuitBreaker { + if !cfg.Enabled { + return nil + } + resetTimeout := time.Duration(cfg.ResetTimeoutSeconds) * time.Second + if resetTimeout <= 0 { + resetTimeout = 30 * time.Second + } + halfOpen := cfg.HalfOpenRequests + if halfOpen <= 0 { + halfOpen = 1 + } + threshold := cfg.FailureThreshold + if threshold <= 0 { + threshold = 5 + } + return &billingCircuitBreaker{ + state: billingCircuitClosed, + failureThreshold: threshold, + resetTimeout: resetTimeout, + halfOpenRequests: halfOpen, + } +} + +func (b *billingCircuitBreaker) Allow() bool { + b.mu.Lock() + defer b.mu.Unlock() + + switch b.state { + case billingCircuitClosed: + return true + case billingCircuitOpen: + if time.Since(b.openedAt) < b.resetTimeout { + return false + } + b.state = billingCircuitHalfOpen + b.halfOpenRemaining = b.halfOpenRequests + log.Printf("ALERT: billing circuit breaker entering half-open state") + fallthrough + case billingCircuitHalfOpen: + if b.halfOpenRemaining <= 0 { + return false + } + b.halfOpenRemaining-- + return true + default: + return false + } +} + +func (b *billingCircuitBreaker) OnFailure(err error) { + if b == nil { + return + } + b.mu.Lock() + defer b.mu.Unlock() + + switch b.state { + case billingCircuitOpen: + return + case billingCircuitHalfOpen: + b.state = billingCircuitOpen + b.openedAt = time.Now() + b.halfOpenRemaining = 0 + log.Printf("ALERT: billing circuit breaker opened after half-open failure: %v", err) + return + default: + b.failures++ + if b.failures >= b.failureThreshold { + b.state = billingCircuitOpen + b.openedAt = time.Now() + b.halfOpenRemaining = 0 + log.Printf("ALERT: billing circuit breaker opened after %d failures: %v", b.failures, err) + } + } +} + +func (b *billingCircuitBreaker) OnSuccess() { + if b == nil { + return + } + b.mu.Lock() + defer b.mu.Unlock() + + previousState := b.state + previousFailures := b.failures + + b.state = billingCircuitClosed + b.failures = 0 + b.halfOpenRemaining = 0 + + // 只有状态真正发生变化时才记录日志 + if previousState != billingCircuitClosed { + log.Printf("ALERT: billing circuit breaker closed (was %s)", circuitStateString(previousState)) + } else if previousFailures > 0 { + log.Printf("INFO: billing circuit breaker failures reset from %d", previousFailures) + } +} + +func circuitStateString(state billingCircuitBreakerState) string { + switch state { + case billingCircuitClosed: + return "closed" + case billingCircuitOpen: + return "open" + case billingCircuitHalfOpen: + return "half-open" + default: + return "unknown" + } +} diff --git a/backend/internal/service/billing_cache_service_test.go b/backend/internal/service/billing_cache_service_test.go new file mode 100644 index 00000000..445d5319 --- /dev/null +++ b/backend/internal/service/billing_cache_service_test.go @@ -0,0 +1,75 @@ +package service + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type billingCacheWorkerStub struct { + balanceUpdates int64 + subscriptionUpdates int64 +} + +func (b *billingCacheWorkerStub) GetUserBalance(ctx context.Context, userID int64) (float64, error) { + return 0, errors.New("not implemented") +} + +func (b *billingCacheWorkerStub) SetUserBalance(ctx context.Context, userID int64, balance float64) error { + atomic.AddInt64(&b.balanceUpdates, 1) + return nil +} + +func (b *billingCacheWorkerStub) DeductUserBalance(ctx context.Context, userID int64, amount float64) error { + atomic.AddInt64(&b.balanceUpdates, 1) + return nil +} + +func (b *billingCacheWorkerStub) InvalidateUserBalance(ctx context.Context, userID int64) error { + return nil +} + +func (b *billingCacheWorkerStub) GetSubscriptionCache(ctx context.Context, userID, groupID int64) (*SubscriptionCacheData, error) { + return nil, errors.New("not implemented") +} + +func (b *billingCacheWorkerStub) SetSubscriptionCache(ctx context.Context, userID, groupID int64, data *SubscriptionCacheData) error { + atomic.AddInt64(&b.subscriptionUpdates, 1) + return nil +} + +func (b *billingCacheWorkerStub) UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, cost float64) error { + atomic.AddInt64(&b.subscriptionUpdates, 1) + return nil +} + +func (b *billingCacheWorkerStub) InvalidateSubscriptionCache(ctx context.Context, userID, groupID int64) error { + return nil +} + +func TestBillingCacheServiceQueueHighLoad(t *testing.T) { + cache := &billingCacheWorkerStub{} + svc := NewBillingCacheService(cache, nil, nil, &config.Config{}) + t.Cleanup(svc.Stop) + + start := time.Now() + for i := 0; i < cacheWriteBufferSize*2; i++ { + svc.QueueDeductBalance(1, 1) + } + require.Less(t, time.Since(start), 2*time.Second) + + svc.QueueUpdateSubscriptionUsage(1, 2, 1.5) + + require.Eventually(t, func() bool { + return atomic.LoadInt64(&cache.balanceUpdates) > 0 + }, 2*time.Second, 10*time.Millisecond) + + require.Eventually(t, func() bool { + return atomic.LoadInt64(&cache.subscriptionUpdates) > 0 + }, 2*time.Second, 10*time.Millisecond) +} diff --git a/backend/internal/service/billing_service.go b/backend/internal/service/billing_service.go new file mode 100644 index 00000000..f2afc343 --- /dev/null +++ b/backend/internal/service/billing_service.go @@ -0,0 +1,382 @@ +package service + +import ( + "context" + "fmt" + + "log" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// BillingCache defines cache operations for billing service +type BillingCache interface { + // Balance operations + GetUserBalance(ctx context.Context, userID int64) (float64, error) + SetUserBalance(ctx context.Context, userID int64, balance float64) error + DeductUserBalance(ctx context.Context, userID int64, amount float64) error + InvalidateUserBalance(ctx context.Context, userID int64) error + + // Subscription operations + GetSubscriptionCache(ctx context.Context, userID, groupID int64) (*SubscriptionCacheData, error) + SetSubscriptionCache(ctx context.Context, userID, groupID int64, data *SubscriptionCacheData) error + UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, cost float64) error + InvalidateSubscriptionCache(ctx context.Context, userID, groupID int64) error +} + +// ModelPricing 模型价格配置(per-token价格,与LiteLLM格式一致) +type ModelPricing struct { + InputPricePerToken float64 // 每token输入价格 (USD) + OutputPricePerToken float64 // 每token输出价格 (USD) + CacheCreationPricePerToken float64 // 缓存创建每token价格 (USD) + CacheReadPricePerToken float64 // 缓存读取每token价格 (USD) + CacheCreation5mPrice float64 // 5分钟缓存创建价格(每百万token)- 仅用于硬编码回退 + CacheCreation1hPrice float64 // 1小时缓存创建价格(每百万token)- 仅用于硬编码回退 + SupportsCacheBreakdown bool // 是否支持详细的缓存分类 +} + +// UsageTokens 使用的token数量 +type UsageTokens struct { + InputTokens int + OutputTokens int + CacheCreationTokens int + CacheReadTokens int + CacheCreation5mTokens int + CacheCreation1hTokens int +} + +// CostBreakdown 费用明细 +type CostBreakdown struct { + InputCost float64 + OutputCost float64 + CacheCreationCost float64 + CacheReadCost float64 + TotalCost float64 + ActualCost float64 // 应用倍率后的实际费用 +} + +// BillingService 计费服务 +type BillingService struct { + cfg *config.Config + pricingService *PricingService + fallbackPrices map[string]*ModelPricing // 硬编码回退价格 +} + +// NewBillingService 创建计费服务实例 +func NewBillingService(cfg *config.Config, pricingService *PricingService) *BillingService { + s := &BillingService{ + cfg: cfg, + pricingService: pricingService, + fallbackPrices: make(map[string]*ModelPricing), + } + + // 初始化硬编码回退价格(当动态价格不可用时使用) + s.initFallbackPricing() + + return s +} + +// initFallbackPricing 初始化硬编码回退价格(当动态价格不可用时使用) +// 价格单位:USD per token(与LiteLLM格式一致) +func (s *BillingService) initFallbackPricing() { + // Claude 4.5 Opus + s.fallbackPrices["claude-opus-4.5"] = &ModelPricing{ + InputPricePerToken: 5e-6, // $5 per MTok + OutputPricePerToken: 25e-6, // $25 per MTok + CacheCreationPricePerToken: 6.25e-6, // $6.25 per MTok + CacheReadPricePerToken: 0.5e-6, // $0.50 per MTok + SupportsCacheBreakdown: false, + } + + // Claude 4 Sonnet + s.fallbackPrices["claude-sonnet-4"] = &ModelPricing{ + InputPricePerToken: 3e-6, // $3 per MTok + OutputPricePerToken: 15e-6, // $15 per MTok + CacheCreationPricePerToken: 3.75e-6, // $3.75 per MTok + CacheReadPricePerToken: 0.3e-6, // $0.30 per MTok + SupportsCacheBreakdown: false, + } + + // Claude 3.5 Sonnet + s.fallbackPrices["claude-3-5-sonnet"] = &ModelPricing{ + InputPricePerToken: 3e-6, // $3 per MTok + OutputPricePerToken: 15e-6, // $15 per MTok + CacheCreationPricePerToken: 3.75e-6, // $3.75 per MTok + CacheReadPricePerToken: 0.3e-6, // $0.30 per MTok + SupportsCacheBreakdown: false, + } + + // Claude 3.5 Haiku + s.fallbackPrices["claude-3-5-haiku"] = &ModelPricing{ + InputPricePerToken: 1e-6, // $1 per MTok + OutputPricePerToken: 5e-6, // $5 per MTok + CacheCreationPricePerToken: 1.25e-6, // $1.25 per MTok + CacheReadPricePerToken: 0.1e-6, // $0.10 per MTok + SupportsCacheBreakdown: false, + } + + // Claude 3 Opus + s.fallbackPrices["claude-3-opus"] = &ModelPricing{ + InputPricePerToken: 15e-6, // $15 per MTok + OutputPricePerToken: 75e-6, // $75 per MTok + CacheCreationPricePerToken: 18.75e-6, // $18.75 per MTok + CacheReadPricePerToken: 1.5e-6, // $1.50 per MTok + SupportsCacheBreakdown: false, + } + + // Claude 3 Haiku + s.fallbackPrices["claude-3-haiku"] = &ModelPricing{ + InputPricePerToken: 0.25e-6, // $0.25 per MTok + OutputPricePerToken: 1.25e-6, // $1.25 per MTok + CacheCreationPricePerToken: 0.3e-6, // $0.30 per MTok + CacheReadPricePerToken: 0.03e-6, // $0.03 per MTok + SupportsCacheBreakdown: false, + } +} + +// getFallbackPricing 根据模型系列获取回退价格 +func (s *BillingService) getFallbackPricing(model string) *ModelPricing { + modelLower := strings.ToLower(model) + + // 按模型系列匹配 + if strings.Contains(modelLower, "opus") { + if strings.Contains(modelLower, "4.5") || strings.Contains(modelLower, "4-5") { + return s.fallbackPrices["claude-opus-4.5"] + } + return s.fallbackPrices["claude-3-opus"] + } + if strings.Contains(modelLower, "sonnet") { + if strings.Contains(modelLower, "4") && !strings.Contains(modelLower, "3") { + return s.fallbackPrices["claude-sonnet-4"] + } + return s.fallbackPrices["claude-3-5-sonnet"] + } + if strings.Contains(modelLower, "haiku") { + if strings.Contains(modelLower, "3-5") || strings.Contains(modelLower, "3.5") { + return s.fallbackPrices["claude-3-5-haiku"] + } + return s.fallbackPrices["claude-3-haiku"] + } + + // 默认使用Sonnet价格 + return s.fallbackPrices["claude-sonnet-4"] +} + +// GetModelPricing 获取模型价格配置 +func (s *BillingService) GetModelPricing(model string) (*ModelPricing, error) { + // 标准化模型名称(转小写) + model = strings.ToLower(model) + + // 1. 优先从动态价格服务获取 + if s.pricingService != nil { + litellmPricing := s.pricingService.GetModelPricing(model) + if litellmPricing != nil { + return &ModelPricing{ + InputPricePerToken: litellmPricing.InputCostPerToken, + OutputPricePerToken: litellmPricing.OutputCostPerToken, + CacheCreationPricePerToken: litellmPricing.CacheCreationInputTokenCost, + CacheReadPricePerToken: litellmPricing.CacheReadInputTokenCost, + SupportsCacheBreakdown: false, + }, nil + } + } + + // 2. 使用硬编码回退价格 + fallback := s.getFallbackPricing(model) + if fallback != nil { + log.Printf("[Billing] Using fallback pricing for model: %s", model) + return fallback, nil + } + + return nil, fmt.Errorf("pricing not found for model: %s", model) +} + +// CalculateCost 计算使用费用 +func (s *BillingService) CalculateCost(model string, tokens UsageTokens, rateMultiplier float64) (*CostBreakdown, error) { + pricing, err := s.GetModelPricing(model) + if err != nil { + return nil, err + } + + breakdown := &CostBreakdown{} + + // 计算输入token费用(使用per-token价格) + breakdown.InputCost = float64(tokens.InputTokens) * pricing.InputPricePerToken + + // 计算输出token费用 + breakdown.OutputCost = float64(tokens.OutputTokens) * pricing.OutputPricePerToken + + // 计算缓存费用 + if pricing.SupportsCacheBreakdown && (pricing.CacheCreation5mPrice > 0 || pricing.CacheCreation1hPrice > 0) { + // 支持详细缓存分类的模型(5分钟/1小时缓存) + breakdown.CacheCreationCost = float64(tokens.CacheCreation5mTokens)/1_000_000*pricing.CacheCreation5mPrice + + float64(tokens.CacheCreation1hTokens)/1_000_000*pricing.CacheCreation1hPrice + } else { + // 标准缓存创建价格(per-token) + breakdown.CacheCreationCost = float64(tokens.CacheCreationTokens) * pricing.CacheCreationPricePerToken + } + + breakdown.CacheReadCost = float64(tokens.CacheReadTokens) * pricing.CacheReadPricePerToken + + // 计算总费用 + breakdown.TotalCost = breakdown.InputCost + breakdown.OutputCost + + breakdown.CacheCreationCost + breakdown.CacheReadCost + + // 应用倍率计算实际费用 + if rateMultiplier <= 0 { + rateMultiplier = 1.0 + } + breakdown.ActualCost = breakdown.TotalCost * rateMultiplier + + return breakdown, nil +} + +// CalculateCostWithConfig 使用配置中的默认倍率计算费用 +func (s *BillingService) CalculateCostWithConfig(model string, tokens UsageTokens) (*CostBreakdown, error) { + multiplier := s.cfg.Default.RateMultiplier + if multiplier <= 0 { + multiplier = 1.0 + } + return s.CalculateCost(model, tokens, multiplier) +} + +// ListSupportedModels 列出所有支持的模型(现在总是返回true,因为有模糊匹配) +func (s *BillingService) ListSupportedModels() []string { + models := make([]string, 0) + // 返回回退价格支持的模型系列 + for model := range s.fallbackPrices { + models = append(models, model) + } + return models +} + +// IsModelSupported 检查模型是否支持(现在总是返回true,因为有模糊匹配回退) +func (s *BillingService) IsModelSupported(model string) bool { + // 所有Claude模型都有回退价格支持 + modelLower := strings.ToLower(model) + return strings.Contains(modelLower, "claude") || + strings.Contains(modelLower, "opus") || + strings.Contains(modelLower, "sonnet") || + strings.Contains(modelLower, "haiku") +} + +// GetEstimatedCost 估算费用(用于前端展示) +func (s *BillingService) GetEstimatedCost(model string, estimatedInputTokens, estimatedOutputTokens int) (float64, error) { + tokens := UsageTokens{ + InputTokens: estimatedInputTokens, + OutputTokens: estimatedOutputTokens, + } + + breakdown, err := s.CalculateCostWithConfig(model, tokens) + if err != nil { + return 0, err + } + + return breakdown.ActualCost, nil +} + +// GetPricingServiceStatus 获取价格服务状态 +func (s *BillingService) GetPricingServiceStatus() map[string]any { + if s.pricingService != nil { + return s.pricingService.GetStatus() + } + return map[string]any{ + "model_count": len(s.fallbackPrices), + "last_updated": "using fallback", + "local_hash": "N/A", + } +} + +// ForceUpdatePricing 强制更新价格数据 +func (s *BillingService) ForceUpdatePricing() error { + if s.pricingService != nil { + return s.pricingService.ForceUpdate() + } + return fmt.Errorf("pricing service not initialized") +} + +// ImagePriceConfig 图片计费配置 +type ImagePriceConfig struct { + Price1K *float64 // 1K 尺寸价格(nil 表示使用默认值) + Price2K *float64 // 2K 尺寸价格(nil 表示使用默认值) + Price4K *float64 // 4K 尺寸价格(nil 表示使用默认值) +} + +// CalculateImageCost 计算图片生成费用 +// model: 请求的模型名称(用于获取 LiteLLM 默认价格) +// imageSize: 图片尺寸 "1K", "2K", "4K" +// imageCount: 生成的图片数量 +// groupConfig: 分组配置的价格(可能为 nil,表示使用默认值) +// rateMultiplier: 费率倍数 +func (s *BillingService) CalculateImageCost(model string, imageSize string, imageCount int, groupConfig *ImagePriceConfig, rateMultiplier float64) *CostBreakdown { + if imageCount <= 0 { + return &CostBreakdown{} + } + + // 获取单价 + unitPrice := s.getImageUnitPrice(model, imageSize, groupConfig) + + // 计算总费用 + totalCost := unitPrice * float64(imageCount) + + // 应用倍率 + if rateMultiplier <= 0 { + rateMultiplier = 1.0 + } + actualCost := totalCost * rateMultiplier + + return &CostBreakdown{ + TotalCost: totalCost, + ActualCost: actualCost, + } +} + +// getImageUnitPrice 获取图片单价 +func (s *BillingService) getImageUnitPrice(model string, imageSize string, groupConfig *ImagePriceConfig) float64 { + // 优先使用分组配置的价格 + if groupConfig != nil { + switch imageSize { + case "1K": + if groupConfig.Price1K != nil { + return *groupConfig.Price1K + } + case "2K": + if groupConfig.Price2K != nil { + return *groupConfig.Price2K + } + case "4K": + if groupConfig.Price4K != nil { + return *groupConfig.Price4K + } + } + } + + // 回退到 LiteLLM 默认价格 + return s.getDefaultImagePrice(model, imageSize) +} + +// getDefaultImagePrice 获取 LiteLLM 默认图片价格 +func (s *BillingService) getDefaultImagePrice(model string, imageSize string) float64 { + basePrice := 0.0 + + // 从 PricingService 获取 output_cost_per_image + if s.pricingService != nil { + pricing := s.pricingService.GetModelPricing(model) + if pricing != nil && pricing.OutputCostPerImage > 0 { + basePrice = pricing.OutputCostPerImage + } + } + + // 如果没有找到价格,使用硬编码默认值($0.134,来自 gemini-3-pro-image-preview) + if basePrice <= 0 { + basePrice = 0.134 + } + + // 4K 尺寸翻倍 + if imageSize == "4K" { + return basePrice * 2 + } + + return basePrice +} diff --git a/backend/internal/service/billing_service_image_test.go b/backend/internal/service/billing_service_image_test.go new file mode 100644 index 00000000..18a6b74d --- /dev/null +++ b/backend/internal/service/billing_service_image_test.go @@ -0,0 +1,149 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestCalculateImageCost_DefaultPricing 测试无分组配置时使用默认价格 +func TestCalculateImageCost_DefaultPricing(t *testing.T) { + svc := &BillingService{} // pricingService 为 nil,使用硬编码默认值 + + // 2K 尺寸,默认价格 $0.134 + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.0) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + require.InDelta(t, 0.134, cost.ActualCost, 0.0001) + + // 多张图片 + cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 3, nil, 1.0) + require.InDelta(t, 0.402, cost.TotalCost, 0.0001) +} + +// TestCalculateImageCost_GroupCustomPricing 测试分组自定义价格 +func TestCalculateImageCost_GroupCustomPricing(t *testing.T) { + svc := &BillingService{} + + price1K := 0.10 + price2K := 0.15 + price4K := 0.30 + groupConfig := &ImagePriceConfig{ + Price1K: &price1K, + Price2K: &price2K, + Price4K: &price4K, + } + + // 1K 使用分组价格 + cost := svc.CalculateImageCost("gemini-3-pro-image", "1K", 2, groupConfig, 1.0) + require.InDelta(t, 0.20, cost.TotalCost, 0.0001) + + // 2K 使用分组价格 + cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, groupConfig, 1.0) + require.InDelta(t, 0.15, cost.TotalCost, 0.0001) + + // 4K 使用分组价格 + cost = svc.CalculateImageCost("gemini-3-pro-image", "4K", 1, groupConfig, 1.0) + require.InDelta(t, 0.30, cost.TotalCost, 0.0001) +} + +// TestCalculateImageCost_4KDoublePrice 测试 4K 默认价格翻倍 +func TestCalculateImageCost_4KDoublePrice(t *testing.T) { + svc := &BillingService{} + + // 4K 尺寸,默认价格翻倍 $0.134 * 2 = $0.268 + cost := svc.CalculateImageCost("gemini-3-pro-image", "4K", 1, nil, 1.0) + require.InDelta(t, 0.268, cost.TotalCost, 0.0001) +} + +// TestCalculateImageCost_RateMultiplier 测试费率倍数 +func TestCalculateImageCost_RateMultiplier(t *testing.T) { + svc := &BillingService{} + + // 费率倍数 1.5x + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.5) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) // TotalCost 不变 + require.InDelta(t, 0.201, cost.ActualCost, 0.0001) // ActualCost = 0.134 * 1.5 + + // 费率倍数 2.0x + cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 2, nil, 2.0) + require.InDelta(t, 0.268, cost.TotalCost, 0.0001) + require.InDelta(t, 0.536, cost.ActualCost, 0.0001) +} + +// TestCalculateImageCost_ZeroCount 测试 imageCount=0 +func TestCalculateImageCost_ZeroCount(t *testing.T) { + svc := &BillingService{} + + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 0, nil, 1.0) + require.Equal(t, 0.0, cost.TotalCost) + require.Equal(t, 0.0, cost.ActualCost) +} + +// TestCalculateImageCost_NegativeCount 测试 imageCount=-1 +func TestCalculateImageCost_NegativeCount(t *testing.T) { + svc := &BillingService{} + + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", -1, nil, 1.0) + require.Equal(t, 0.0, cost.TotalCost) + require.Equal(t, 0.0, cost.ActualCost) +} + +// TestCalculateImageCost_ZeroRateMultiplier 测试费率倍数为 0 时默认使用 1.0 +func TestCalculateImageCost_ZeroRateMultiplier(t *testing.T) { + svc := &BillingService{} + + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 0) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + require.InDelta(t, 0.134, cost.ActualCost, 0.0001) // 0 倍率当作 1.0 处理 +} + +// TestGetImageUnitPrice_GroupPriorityOverDefault 测试分组价格优先于默认价格 +func TestGetImageUnitPrice_GroupPriorityOverDefault(t *testing.T) { + svc := &BillingService{} + + price2K := 0.20 + groupConfig := &ImagePriceConfig{ + Price2K: &price2K, + } + + // 分组配置了 2K 价格,应该使用分组价格而不是默认的 $0.134 + cost := svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, groupConfig, 1.0) + require.InDelta(t, 0.20, cost.TotalCost, 0.0001) +} + +// TestGetImageUnitPrice_PartialGroupConfig 测试分组部分配置时回退默认 +func TestGetImageUnitPrice_PartialGroupConfig(t *testing.T) { + svc := &BillingService{} + + // 只配置 1K 价格 + price1K := 0.10 + groupConfig := &ImagePriceConfig{ + Price1K: &price1K, + } + + // 1K 使用分组价格 + cost := svc.CalculateImageCost("gemini-3-pro-image", "1K", 1, groupConfig, 1.0) + require.InDelta(t, 0.10, cost.TotalCost, 0.0001) + + // 2K 回退默认价格 $0.134 + cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, groupConfig, 1.0) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + + // 4K 回退默认价格 $0.268 (翻倍) + cost = svc.CalculateImageCost("gemini-3-pro-image", "4K", 1, groupConfig, 1.0) + require.InDelta(t, 0.268, cost.TotalCost, 0.0001) +} + +// TestGetDefaultImagePrice_FallbackHardcoded 测试 PricingService 无数据时使用硬编码默认值 +func TestGetDefaultImagePrice_FallbackHardcoded(t *testing.T) { + svc := &BillingService{} // pricingService 为 nil + + // 1K 和 2K 使用相同的默认价格 $0.134 + cost := svc.CalculateImageCost("gemini-3-pro-image", "1K", 1, nil, 1.0) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) + + cost = svc.CalculateImageCost("gemini-3-pro-image", "2K", 1, nil, 1.0) + require.InDelta(t, 0.134, cost.TotalCost, 0.0001) +} diff --git a/backend/internal/service/claude_code_validator.go b/backend/internal/service/claude_code_validator.go new file mode 100644 index 00000000..ab86f1e8 --- /dev/null +++ b/backend/internal/service/claude_code_validator.go @@ -0,0 +1,265 @@ +package service + +import ( + "context" + "net/http" + "regexp" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" +) + +// ClaudeCodeValidator 验证请求是否来自 Claude Code 客户端 +// 完全学习自 claude-relay-service 项目的验证逻辑 +type ClaudeCodeValidator struct{} + +var ( + // User-Agent 匹配: claude-cli/x.x.x (仅支持官方 CLI,大小写不敏感) + claudeCodeUAPattern = regexp.MustCompile(`(?i)^claude-cli/\d+\.\d+\.\d+`) + + // metadata.user_id 格式: user_{64位hex}_account__session_{uuid} + userIDPattern = regexp.MustCompile(`^user_[a-fA-F0-9]{64}_account__session_[\w-]+$`) + + // System prompt 相似度阈值(默认 0.5,和 claude-relay-service 一致) + systemPromptThreshold = 0.5 +) + +// Claude Code 官方 System Prompt 模板 +// 从 claude-relay-service/src/utils/contents.js 提取 +var claudeCodeSystemPrompts = []string{ + // claudeOtherSystemPrompt1 - Primary + "You are Claude Code, Anthropic's official CLI for Claude.", + + // claudeOtherSystemPrompt3 - Agent SDK + "You are a Claude agent, built on Anthropic's Claude Agent SDK.", + + // claudeOtherSystemPrompt4 - Compact Agent SDK + "You are Claude Code, Anthropic's official CLI for Claude, running within the Claude Agent SDK.", + + // exploreAgentSystemPrompt + "You are a file search specialist for Claude Code, Anthropic's official CLI for Claude.", + + // claudeOtherSystemPromptCompact - Compact (用于对话摘要) + "You are a helpful AI assistant tasked with summarizing conversations.", + + // claudeOtherSystemPrompt2 - Secondary (长提示词的关键部分) + "You are an interactive CLI tool that helps users", +} + +// NewClaudeCodeValidator 创建验证器实例 +func NewClaudeCodeValidator() *ClaudeCodeValidator { + return &ClaudeCodeValidator{} +} + +// Validate 验证请求是否来自 Claude Code CLI +// 采用与 claude-relay-service 完全一致的验证策略: +// +// Step 1: User-Agent 检查 (必需) - 必须是 claude-cli/x.x.x +// Step 2: 对于非 messages 路径,只要 UA 匹配就通过 +// Step 3: 对于 messages 路径,进行严格验证: +// - System prompt 相似度检查 +// - X-App header 检查 +// - anthropic-beta header 检查 +// - anthropic-version header 检查 +// - metadata.user_id 格式验证 +func (v *ClaudeCodeValidator) Validate(r *http.Request, body map[string]any) bool { + // Step 1: User-Agent 检查 + ua := r.Header.Get("User-Agent") + if !claudeCodeUAPattern.MatchString(ua) { + return false + } + + // Step 2: 非 messages 路径,只要 UA 匹配就通过 + path := r.URL.Path + if !strings.Contains(path, "messages") { + return true + } + + // Step 3: messages 路径,进行严格验证 + + // 3.1 检查 system prompt 相似度 + if !v.hasClaudeCodeSystemPrompt(body) { + return false + } + + // 3.2 检查必需的 headers(值不为空即可) + xApp := r.Header.Get("X-App") + if xApp == "" { + return false + } + + anthropicBeta := r.Header.Get("anthropic-beta") + if anthropicBeta == "" { + return false + } + + anthropicVersion := r.Header.Get("anthropic-version") + if anthropicVersion == "" { + return false + } + + // 3.3 验证 metadata.user_id + if body == nil { + return false + } + + metadata, ok := body["metadata"].(map[string]any) + if !ok { + return false + } + + userID, ok := metadata["user_id"].(string) + if !ok || userID == "" { + return false + } + + if !userIDPattern.MatchString(userID) { + return false + } + + return true +} + +// hasClaudeCodeSystemPrompt 检查请求是否包含 Claude Code 系统提示词 +// 使用字符串相似度匹配(Dice coefficient) +func (v *ClaudeCodeValidator) hasClaudeCodeSystemPrompt(body map[string]any) bool { + if body == nil { + return false + } + + // 检查 model 字段 + if _, ok := body["model"].(string); !ok { + return false + } + + // 获取 system 字段 + systemEntries, ok := body["system"].([]any) + if !ok { + return false + } + + // 检查每个 system entry + for _, entry := range systemEntries { + entryMap, ok := entry.(map[string]any) + if !ok { + continue + } + + text, ok := entryMap["text"].(string) + if !ok || text == "" { + continue + } + + // 计算与所有模板的最佳相似度 + bestScore := v.bestSimilarityScore(text) + if bestScore >= systemPromptThreshold { + return true + } + } + + return false +} + +// bestSimilarityScore 计算文本与所有 Claude Code 模板的最佳相似度 +func (v *ClaudeCodeValidator) bestSimilarityScore(text string) float64 { + normalizedText := normalizePrompt(text) + bestScore := 0.0 + + for _, template := range claudeCodeSystemPrompts { + normalizedTemplate := normalizePrompt(template) + score := diceCoefficient(normalizedText, normalizedTemplate) + if score > bestScore { + bestScore = score + } + } + + return bestScore +} + +// normalizePrompt 标准化提示词文本(去除多余空白) +func normalizePrompt(text string) string { + // 将所有空白字符替换为单个空格,并去除首尾空白 + return strings.Join(strings.Fields(text), " ") +} + +// diceCoefficient 计算两个字符串的 Dice 系数(Sørensen–Dice coefficient) +// 这是 string-similarity 库使用的算法 +// 公式: 2 * |intersection| / (|bigrams(a)| + |bigrams(b)|) +func diceCoefficient(a, b string) float64 { + if a == b { + return 1.0 + } + + if len(a) < 2 || len(b) < 2 { + return 0.0 + } + + // 生成 bigrams + bigramsA := getBigrams(a) + bigramsB := getBigrams(b) + + if len(bigramsA) == 0 || len(bigramsB) == 0 { + return 0.0 + } + + // 计算交集大小 + intersection := 0 + for bigram, countA := range bigramsA { + if countB, exists := bigramsB[bigram]; exists { + if countA < countB { + intersection += countA + } else { + intersection += countB + } + } + } + + // 计算总 bigram 数量 + totalA := 0 + for _, count := range bigramsA { + totalA += count + } + totalB := 0 + for _, count := range bigramsB { + totalB += count + } + + return float64(2*intersection) / float64(totalA+totalB) +} + +// getBigrams 获取字符串的所有 bigrams(相邻字符对) +func getBigrams(s string) map[string]int { + bigrams := make(map[string]int) + runes := []rune(strings.ToLower(s)) + + for i := 0; i < len(runes)-1; i++ { + bigram := string(runes[i : i+2]) + bigrams[bigram]++ + } + + return bigrams +} + +// ValidateUserAgent 仅验证 User-Agent(用于不需要解析请求体的场景) +func (v *ClaudeCodeValidator) ValidateUserAgent(ua string) bool { + return claudeCodeUAPattern.MatchString(ua) +} + +// IncludesClaudeCodeSystemPrompt 检查请求体是否包含 Claude Code 系统提示词 +// 只要存在匹配的系统提示词就返回 true(用于宽松检测) +func (v *ClaudeCodeValidator) IncludesClaudeCodeSystemPrompt(body map[string]any) bool { + return v.hasClaudeCodeSystemPrompt(body) +} + +// IsClaudeCodeClient 从 context 中获取 Claude Code 客户端标识 +func IsClaudeCodeClient(ctx context.Context) bool { + if v, ok := ctx.Value(ctxkey.IsClaudeCodeClient).(bool); ok { + return v + } + return false +} + +// SetClaudeCodeClient 将 Claude Code 客户端标识设置到 context 中 +func SetClaudeCodeClient(ctx context.Context, isClaudeCode bool) context.Context { + return context.WithValue(ctx, ctxkey.IsClaudeCodeClient, isClaudeCode) +} diff --git a/backend/internal/service/concurrency_service.go b/backend/internal/service/concurrency_service.go new file mode 100644 index 00000000..65ef16db --- /dev/null +++ b/backend/internal/service/concurrency_service.go @@ -0,0 +1,314 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "log" + "time" +) + +// ConcurrencyCache 定义并发控制的缓存接口 +// 使用有序集合存储槽位,按时间戳清理过期条目 +type ConcurrencyCache interface { + // 账号槽位管理 + // 键格式: concurrency:account:{accountID}(有序集合,成员为 requestID) + AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) + ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error + GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) + + // 账号等待队列(账号级) + IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) + DecrementAccountWaitCount(ctx context.Context, accountID int64) error + GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) + + // 用户槽位管理 + // 键格式: concurrency:user:{userID}(有序集合,成员为 requestID) + AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) + ReleaseUserSlot(ctx context.Context, userID int64, requestID string) error + GetUserConcurrency(ctx context.Context, userID int64) (int, error) + + // 等待队列计数(只在首次创建时设置 TTL) + IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) + DecrementWaitCount(ctx context.Context, userID int64) error + + // 批量负载查询(只读) + GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) + + // 清理过期槽位(后台任务) + CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error +} + +// generateRequestID generates a unique request ID for concurrency slot tracking +// Uses 8 random bytes (16 hex chars) for uniqueness +func generateRequestID() string { + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + // Fallback to nanosecond timestamp (extremely rare case) + return fmt.Sprintf("%x", time.Now().UnixNano()) + } + return hex.EncodeToString(b) +} + +const ( + // Default extra wait slots beyond concurrency limit + defaultExtraWaitSlots = 20 +) + +// ConcurrencyService manages concurrent request limiting for accounts and users +type ConcurrencyService struct { + cache ConcurrencyCache +} + +// NewConcurrencyService creates a new ConcurrencyService +func NewConcurrencyService(cache ConcurrencyCache) *ConcurrencyService { + return &ConcurrencyService{cache: cache} +} + +// AcquireResult represents the result of acquiring a concurrency slot +type AcquireResult struct { + Acquired bool + ReleaseFunc func() // Must be called when done (typically via defer) +} + +type AccountWithConcurrency struct { + ID int64 + MaxConcurrency int +} + +type AccountLoadInfo struct { + AccountID int64 + CurrentConcurrency int + WaitingCount int + LoadRate int // 0-100+ (percent) +} + +// AcquireAccountSlot attempts to acquire a concurrency slot for an account. +// If the account is at max concurrency, it waits until a slot is available or timeout. +// Returns a release function that MUST be called when the request completes. +func (s *ConcurrencyService) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { + // If maxConcurrency is 0 or negative, no limit + if maxConcurrency <= 0 { + return &AcquireResult{ + Acquired: true, + ReleaseFunc: func() {}, // no-op + }, nil + } + + // Generate unique request ID for this slot + requestID := generateRequestID() + + acquired, err := s.cache.AcquireAccountSlot(ctx, accountID, maxConcurrency, requestID) + if err != nil { + return nil, err + } + + if acquired { + return &AcquireResult{ + Acquired: true, + ReleaseFunc: func() { + bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.cache.ReleaseAccountSlot(bgCtx, accountID, requestID); err != nil { + log.Printf("Warning: failed to release account slot for %d (req=%s): %v", accountID, requestID, err) + } + }, + }, nil + } + + return &AcquireResult{ + Acquired: false, + ReleaseFunc: nil, + }, nil +} + +// AcquireUserSlot attempts to acquire a concurrency slot for a user. +// If the user is at max concurrency, it waits until a slot is available or timeout. +// Returns a release function that MUST be called when the request completes. +func (s *ConcurrencyService) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int) (*AcquireResult, error) { + // If maxConcurrency is 0 or negative, no limit + if maxConcurrency <= 0 { + return &AcquireResult{ + Acquired: true, + ReleaseFunc: func() {}, // no-op + }, nil + } + + // Generate unique request ID for this slot + requestID := generateRequestID() + + acquired, err := s.cache.AcquireUserSlot(ctx, userID, maxConcurrency, requestID) + if err != nil { + return nil, err + } + + if acquired { + return &AcquireResult{ + Acquired: true, + ReleaseFunc: func() { + bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := s.cache.ReleaseUserSlot(bgCtx, userID, requestID); err != nil { + log.Printf("Warning: failed to release user slot for %d (req=%s): %v", userID, requestID, err) + } + }, + }, nil + } + + return &AcquireResult{ + Acquired: false, + ReleaseFunc: nil, + }, nil +} + +// ============================================ +// Wait Queue Count Methods +// ============================================ + +// IncrementWaitCount attempts to increment the wait queue counter for a user. +// Returns true if successful, false if the wait queue is full. +// maxWait should be user.Concurrency + defaultExtraWaitSlots +func (s *ConcurrencyService) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + if s.cache == nil { + // Redis not available, allow request + return true, nil + } + + result, err := s.cache.IncrementWaitCount(ctx, userID, maxWait) + if err != nil { + // On error, allow the request to proceed (fail open) + log.Printf("Warning: increment wait count failed for user %d: %v", userID, err) + return true, nil + } + return result, nil +} + +// DecrementWaitCount decrements the wait queue counter for a user. +// Should be called when a request completes or exits the wait queue. +func (s *ConcurrencyService) DecrementWaitCount(ctx context.Context, userID int64) { + if s.cache == nil { + return + } + + // Use background context to ensure decrement even if original context is cancelled + bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := s.cache.DecrementWaitCount(bgCtx, userID); err != nil { + log.Printf("Warning: decrement wait count failed for user %d: %v", userID, err) + } +} + +// IncrementAccountWaitCount increments the wait queue counter for an account. +func (s *ConcurrencyService) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + if s.cache == nil { + return true, nil + } + + result, err := s.cache.IncrementAccountWaitCount(ctx, accountID, maxWait) + if err != nil { + log.Printf("Warning: increment wait count failed for account %d: %v", accountID, err) + return true, nil + } + return result, nil +} + +// DecrementAccountWaitCount decrements the wait queue counter for an account. +func (s *ConcurrencyService) DecrementAccountWaitCount(ctx context.Context, accountID int64) { + if s.cache == nil { + return + } + + bgCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := s.cache.DecrementAccountWaitCount(bgCtx, accountID); err != nil { + log.Printf("Warning: decrement wait count failed for account %d: %v", accountID, err) + } +} + +// GetAccountWaitingCount gets current wait queue count for an account. +func (s *ConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if s.cache == nil { + return 0, nil + } + return s.cache.GetAccountWaitingCount(ctx, accountID) +} + +// CalculateMaxWait calculates the maximum wait queue size for a user +// maxWait = userConcurrency + defaultExtraWaitSlots +func CalculateMaxWait(userConcurrency int) int { + if userConcurrency <= 0 { + userConcurrency = 1 + } + return userConcurrency + defaultExtraWaitSlots +} + +// GetAccountsLoadBatch returns load info for multiple accounts. +func (s *ConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + if s.cache == nil { + return map[int64]*AccountLoadInfo{}, nil + } + return s.cache.GetAccountsLoadBatch(ctx, accounts) +} + +// CleanupExpiredAccountSlots removes expired slots for one account (background task). +func (s *ConcurrencyService) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + if s.cache == nil { + return nil + } + return s.cache.CleanupExpiredAccountSlots(ctx, accountID) +} + +// StartSlotCleanupWorker starts a background cleanup worker for expired account slots. +func (s *ConcurrencyService) StartSlotCleanupWorker(accountRepo AccountRepository, interval time.Duration) { + if s == nil || s.cache == nil || accountRepo == nil || interval <= 0 { + return + } + + runCleanup := func() { + listCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + accounts, err := accountRepo.ListSchedulable(listCtx) + cancel() + if err != nil { + log.Printf("Warning: list schedulable accounts failed: %v", err) + return + } + for _, account := range accounts { + accountCtx, accountCancel := context.WithTimeout(context.Background(), 2*time.Second) + err := s.cache.CleanupExpiredAccountSlots(accountCtx, account.ID) + accountCancel() + if err != nil { + log.Printf("Warning: cleanup expired slots failed for account %d: %v", account.ID, err) + } + } + } + + go func() { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + runCleanup() + for range ticker.C { + runCleanup() + } + }() +} + +// GetAccountConcurrencyBatch gets current concurrency counts for multiple accounts +// Returns a map of accountID -> current concurrency count +func (s *ConcurrencyService) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + result := make(map[int64]int) + + for _, accountID := range accountIDs { + count, err := s.cache.GetAccountConcurrency(ctx, accountID) + if err != nil { + // If key doesn't exist in Redis, count is 0 + count = 0 + } + result[accountID] = count + } + + return result, nil +} diff --git a/backend/internal/service/crs_sync_service.go b/backend/internal/service/crs_sync_service.go new file mode 100644 index 00000000..a6ccb967 --- /dev/null +++ b/backend/internal/service/crs_sync_service.go @@ -0,0 +1,1255 @@ +package service + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" +) + +type CRSSyncService struct { + accountRepo AccountRepository + proxyRepo ProxyRepository + oauthService *OAuthService + openaiOAuthService *OpenAIOAuthService + geminiOAuthService *GeminiOAuthService + cfg *config.Config +} + +func NewCRSSyncService( + accountRepo AccountRepository, + proxyRepo ProxyRepository, + oauthService *OAuthService, + openaiOAuthService *OpenAIOAuthService, + geminiOAuthService *GeminiOAuthService, + cfg *config.Config, +) *CRSSyncService { + return &CRSSyncService{ + accountRepo: accountRepo, + proxyRepo: proxyRepo, + oauthService: oauthService, + openaiOAuthService: openaiOAuthService, + geminiOAuthService: geminiOAuthService, + cfg: cfg, + } +} + +type SyncFromCRSInput struct { + BaseURL string + Username string + Password string + SyncProxies bool +} + +type SyncFromCRSItemResult struct { + CRSAccountID string `json:"crs_account_id"` + Kind string `json:"kind"` + Name string `json:"name"` + Action string `json:"action"` // created/updated/failed/skipped + Error string `json:"error,omitempty"` +} + +type SyncFromCRSResult struct { + Created int `json:"created"` + Updated int `json:"updated"` + Skipped int `json:"skipped"` + Failed int `json:"failed"` + Items []SyncFromCRSItemResult `json:"items"` +} + +type crsLoginResponse struct { + Success bool `json:"success"` + Token string `json:"token"` + Message string `json:"message"` + Error string `json:"error"` + Username string `json:"username"` +} + +type crsExportResponse struct { + Success bool `json:"success"` + Error string `json:"error"` + Message string `json:"message"` + Data struct { + ExportedAt string `json:"exportedAt"` + ClaudeAccounts []crsClaudeAccount `json:"claudeAccounts"` + ClaudeConsoleAccounts []crsConsoleAccount `json:"claudeConsoleAccounts"` + OpenAIOAuthAccounts []crsOpenAIOAuthAccount `json:"openaiOAuthAccounts"` + OpenAIResponsesAccounts []crsOpenAIResponsesAccount `json:"openaiResponsesAccounts"` + GeminiOAuthAccounts []crsGeminiOAuthAccount `json:"geminiOAuthAccounts"` + GeminiAPIKeyAccounts []crsGeminiAPIKeyAccount `json:"geminiApiKeyAccounts"` + } `json:"data"` +} + +type crsProxy struct { + Protocol string `json:"protocol"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` +} + +type crsClaudeAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + AuthType string `json:"authType"` // oauth/setup-token + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` +} + +type crsConsoleAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + MaxConcurrentTasks int `json:"maxConcurrentTasks"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` +} + +type crsOpenAIResponsesAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` +} + +type crsOpenAIOAuthAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + AuthType string `json:"authType"` // oauth + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` +} + +type crsGeminiOAuthAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + AuthType string `json:"authType"` // oauth + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` +} + +type crsGeminiAPIKeyAccount struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + Platform string `json:"platform"` + IsActive bool `json:"isActive"` + Schedulable bool `json:"schedulable"` + Priority int `json:"priority"` + Status string `json:"status"` + Proxy *crsProxy `json:"proxy"` + Credentials map[string]any `json:"credentials"` + Extra map[string]any `json:"extra"` +} + +func (s *CRSSyncService) SyncFromCRS(ctx context.Context, input SyncFromCRSInput) (*SyncFromCRSResult, error) { + if s.cfg == nil { + return nil, errors.New("config is not available") + } + baseURL := strings.TrimSpace(input.BaseURL) + if s.cfg.Security.URLAllowlist.Enabled { + normalized, err := normalizeBaseURL(baseURL, s.cfg.Security.URLAllowlist.CRSHosts, s.cfg.Security.URLAllowlist.AllowPrivateHosts) + if err != nil { + return nil, err + } + baseURL = normalized + } else { + normalized, err := urlvalidator.ValidateURLFormat(baseURL, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + if err != nil { + return nil, fmt.Errorf("invalid base_url: %w", err) + } + baseURL = normalized + } + if strings.TrimSpace(input.Username) == "" || strings.TrimSpace(input.Password) == "" { + return nil, errors.New("username and password are required") + } + + client, err := httpclient.GetClient(httpclient.Options{ + Timeout: 20 * time.Second, + ValidateResolvedIP: s.cfg.Security.URLAllowlist.Enabled, + AllowPrivateHosts: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + client = &http.Client{Timeout: 20 * time.Second} + } + + adminToken, err := crsLogin(ctx, client, baseURL, input.Username, input.Password) + if err != nil { + return nil, err + } + + exported, err := crsExportAccounts(ctx, client, baseURL, adminToken) + if err != nil { + return nil, err + } + + now := time.Now().UTC().Format(time.RFC3339) + + result := &SyncFromCRSResult{ + Items: make( + []SyncFromCRSItemResult, + 0, + len(exported.Data.ClaudeAccounts)+len(exported.Data.ClaudeConsoleAccounts)+len(exported.Data.OpenAIOAuthAccounts)+len(exported.Data.OpenAIResponsesAccounts)+len(exported.Data.GeminiOAuthAccounts)+len(exported.Data.GeminiAPIKeyAccounts), + ), + } + + var proxies []Proxy + if input.SyncProxies { + proxies, _ = s.proxyRepo.ListActive(ctx) + } + + // Claude OAuth / Setup Token -> sub2api anthropic oauth/setup-token + for _, src := range exported.Data.ClaudeAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + targetType := strings.TrimSpace(src.AuthType) + if targetType == "" { + targetType = "oauth" + } + if targetType != AccountTypeOAuth && targetType != AccountTypeSetupToken { + item.Action = "skipped" + item.Error = "unsupported authType: " + targetType + result.Skipped++ + result.Items = append(result.Items, item) + continue + } + + accessToken, _ := src.Credentials["access_token"].(string) + if strings.TrimSpace(accessToken) == "" { + item.Action = "failed" + item.Error = "missing access_token" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + proxyID, err := s.mapOrCreateProxy(ctx, input.SyncProxies, &proxies, src.Proxy, fmt.Sprintf("crs-%s", src.Name)) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + // 🔧 Remove /v1 suffix from base_url for Claude accounts + cleanBaseURL(credentials, "/v1") + // 🔧 Convert expires_at from ISO string to Unix timestamp + if expiresAtStr, ok := credentials["expires_at"].(string); ok && expiresAtStr != "" { + if t, err := time.Parse(time.RFC3339, expiresAtStr); err == nil { + credentials["expires_at"] = t.Unix() + } + } + // 🔧 Add intercept_warmup_requests if not present (defaults to false) + if _, exists := credentials["intercept_warmup_requests"]; !exists { + credentials["intercept_warmup_requests"] = false + } + priority := clampPriority(src.Priority) + concurrency := 3 + status := mapCRSStatus(src.IsActive, src.Status) + + // 🔧 Preserve all CRS extra fields and add sync metadata + extra := make(map[string]any) + if src.Extra != nil { + for k, v := range src.Extra { + extra[k] = v + } + } + extra["crs_account_id"] = src.ID + extra["crs_kind"] = src.Kind + extra["crs_synced_at"] = now + // Extract org_uuid and account_uuid from CRS credentials to extra + if orgUUID, ok := src.Credentials["org_uuid"]; ok { + extra["org_uuid"] = orgUUID + } + if accountUUID, ok := src.Credentials["account_uuid"]; ok { + extra["account_uuid"] = accountUUID + } + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformAnthropic, + Type: targetType, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: concurrency, + Priority: priority, + Status: status, + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + // 🔄 Refresh OAuth token after creation + if targetType == AccountTypeOAuth { + if refreshedCreds := s.refreshOAuthToken(ctx, account); refreshedCreds != nil { + account.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, account) + } + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + // Update existing + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformAnthropic + existing.Type = targetType + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = concurrency + existing.Priority = priority + existing.Status = status + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + // 🔄 Refresh OAuth token after update + if targetType == AccountTypeOAuth { + if refreshedCreds := s.refreshOAuthToken(ctx, existing); refreshedCreds != nil { + existing.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, existing) + } + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + // Claude Console API Key -> sub2api anthropic apikey + for _, src := range exported.Data.ClaudeConsoleAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + apiKey, _ := src.Credentials["api_key"].(string) + if strings.TrimSpace(apiKey) == "" { + item.Action = "failed" + item.Error = "missing api_key" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + proxyID, err := s.mapOrCreateProxy(ctx, input.SyncProxies, &proxies, src.Proxy, fmt.Sprintf("crs-%s", src.Name)) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + priority := clampPriority(src.Priority) + concurrency := 3 + if src.MaxConcurrentTasks > 0 { + concurrency = src.MaxConcurrentTasks + } + status := mapCRSStatus(src.IsActive, src.Status) + + extra := map[string]any{ + "crs_account_id": src.ID, + "crs_kind": src.Kind, + "crs_synced_at": now, + } + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformAnthropic, + Type: AccountTypeAPIKey, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: concurrency, + Priority: priority, + Status: status, + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformAnthropic + existing.Type = AccountTypeAPIKey + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = concurrency + existing.Priority = priority + existing.Status = status + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + // OpenAI OAuth -> sub2api openai oauth + for _, src := range exported.Data.OpenAIOAuthAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + accessToken, _ := src.Credentials["access_token"].(string) + if strings.TrimSpace(accessToken) == "" { + item.Action = "failed" + item.Error = "missing access_token" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + proxyID, err := s.mapOrCreateProxy( + ctx, + input.SyncProxies, + &proxies, + src.Proxy, + fmt.Sprintf("crs-%s", src.Name), + ) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + // Normalize token_type + if v, ok := credentials["token_type"].(string); !ok || strings.TrimSpace(v) == "" { + credentials["token_type"] = "Bearer" + } + // 🔧 Convert expires_at from ISO string to Unix timestamp + if expiresAtStr, ok := credentials["expires_at"].(string); ok && expiresAtStr != "" { + if t, err := time.Parse(time.RFC3339, expiresAtStr); err == nil { + credentials["expires_at"] = t.Unix() + } + } + priority := clampPriority(src.Priority) + concurrency := 3 + status := mapCRSStatus(src.IsActive, src.Status) + + // 🔧 Preserve all CRS extra fields and add sync metadata + extra := make(map[string]any) + if src.Extra != nil { + for k, v := range src.Extra { + extra[k] = v + } + } + extra["crs_account_id"] = src.ID + extra["crs_kind"] = src.Kind + extra["crs_synced_at"] = now + // Extract email from CRS extra (crs_email -> email) + if crsEmail, ok := src.Extra["crs_email"]; ok { + extra["email"] = crsEmail + } + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: concurrency, + Priority: priority, + Status: status, + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + // 🔄 Refresh OAuth token after creation + if refreshedCreds := s.refreshOAuthToken(ctx, account); refreshedCreds != nil { + account.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, account) + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformOpenAI + existing.Type = AccountTypeOAuth + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = concurrency + existing.Priority = priority + existing.Status = status + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + // 🔄 Refresh OAuth token after update + if refreshedCreds := s.refreshOAuthToken(ctx, existing); refreshedCreds != nil { + existing.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, existing) + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + // OpenAI Responses API Key -> sub2api openai apikey + for _, src := range exported.Data.OpenAIResponsesAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + apiKey, _ := src.Credentials["api_key"].(string) + if strings.TrimSpace(apiKey) == "" { + item.Action = "failed" + item.Error = "missing api_key" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if baseURL, ok := src.Credentials["base_url"].(string); !ok || strings.TrimSpace(baseURL) == "" { + src.Credentials["base_url"] = "https://api.openai.com" + } + // 🔧 Remove /v1 suffix from base_url for OpenAI accounts + cleanBaseURL(src.Credentials, "/v1") + + proxyID, err := s.mapOrCreateProxy( + ctx, + input.SyncProxies, + &proxies, + src.Proxy, + fmt.Sprintf("crs-%s", src.Name), + ) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + priority := clampPriority(src.Priority) + concurrency := 3 + status := mapCRSStatus(src.IsActive, src.Status) + + extra := map[string]any{ + "crs_account_id": src.ID, + "crs_kind": src.Kind, + "crs_synced_at": now, + } + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: concurrency, + Priority: priority, + Status: status, + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformOpenAI + existing.Type = AccountTypeAPIKey + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = concurrency + existing.Priority = priority + existing.Status = status + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + // Gemini OAuth -> sub2api gemini oauth + for _, src := range exported.Data.GeminiOAuthAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + refreshToken, _ := src.Credentials["refresh_token"].(string) + if strings.TrimSpace(refreshToken) == "" { + item.Action = "failed" + item.Error = "missing refresh_token" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + proxyID, err := s.mapOrCreateProxy(ctx, input.SyncProxies, &proxies, src.Proxy, fmt.Sprintf("crs-%s", src.Name)) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + if v, ok := credentials["token_type"].(string); !ok || strings.TrimSpace(v) == "" { + credentials["token_type"] = "Bearer" + } + // Convert expires_at from RFC3339 to Unix seconds string (recommended to keep consistent with GetCredential()) + if expiresAtStr, ok := credentials["expires_at"].(string); ok && strings.TrimSpace(expiresAtStr) != "" { + if t, err := time.Parse(time.RFC3339, expiresAtStr); err == nil { + credentials["expires_at"] = strconv.FormatInt(t.Unix(), 10) + } + } + + extra := make(map[string]any) + if src.Extra != nil { + for k, v := range src.Extra { + extra[k] = v + } + } + extra["crs_account_id"] = src.ID + extra["crs_kind"] = src.Kind + extra["crs_synced_at"] = now + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformGemini, + Type: AccountTypeOAuth, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: 3, + Priority: clampPriority(src.Priority), + Status: mapCRSStatus(src.IsActive, src.Status), + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + if refreshedCreds := s.refreshOAuthToken(ctx, account); refreshedCreds != nil { + account.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, account) + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformGemini + existing.Type = AccountTypeOAuth + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = 3 + existing.Priority = clampPriority(src.Priority) + existing.Status = mapCRSStatus(src.IsActive, src.Status) + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if refreshedCreds := s.refreshOAuthToken(ctx, existing); refreshedCreds != nil { + existing.Credentials = refreshedCreds + _ = s.accountRepo.Update(ctx, existing) + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + // Gemini API Key -> sub2api gemini apikey + for _, src := range exported.Data.GeminiAPIKeyAccounts { + item := SyncFromCRSItemResult{ + CRSAccountID: src.ID, + Kind: src.Kind, + Name: src.Name, + } + + apiKey, _ := src.Credentials["api_key"].(string) + if strings.TrimSpace(apiKey) == "" { + item.Action = "failed" + item.Error = "missing api_key" + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + proxyID, err := s.mapOrCreateProxy(ctx, input.SyncProxies, &proxies, src.Proxy, fmt.Sprintf("crs-%s", src.Name)) + if err != nil { + item.Action = "failed" + item.Error = "proxy sync failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + credentials := sanitizeCredentialsMap(src.Credentials) + if baseURL, ok := credentials["base_url"].(string); !ok || strings.TrimSpace(baseURL) == "" { + credentials["base_url"] = "https://generativelanguage.googleapis.com" + } + + extra := make(map[string]any) + if src.Extra != nil { + for k, v := range src.Extra { + extra[k] = v + } + } + extra["crs_account_id"] = src.ID + extra["crs_kind"] = src.Kind + extra["crs_synced_at"] = now + + existing, err := s.accountRepo.GetByCRSAccountID(ctx, src.ID) + if err != nil { + item.Action = "failed" + item.Error = "db lookup failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + if existing == nil { + account := &Account{ + Name: defaultName(src.Name, src.ID), + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + Credentials: credentials, + Extra: extra, + ProxyID: proxyID, + Concurrency: 3, + Priority: clampPriority(src.Priority), + Status: mapCRSStatus(src.IsActive, src.Status), + Schedulable: src.Schedulable, + } + if err := s.accountRepo.Create(ctx, account); err != nil { + item.Action = "failed" + item.Error = "create failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + item.Action = "created" + result.Created++ + result.Items = append(result.Items, item) + continue + } + + existing.Extra = mergeMap(existing.Extra, extra) + existing.Name = defaultName(src.Name, src.ID) + existing.Platform = PlatformGemini + existing.Type = AccountTypeAPIKey + existing.Credentials = mergeMap(existing.Credentials, credentials) + if proxyID != nil { + existing.ProxyID = proxyID + } + existing.Concurrency = 3 + existing.Priority = clampPriority(src.Priority) + existing.Status = mapCRSStatus(src.IsActive, src.Status) + existing.Schedulable = src.Schedulable + + if err := s.accountRepo.Update(ctx, existing); err != nil { + item.Action = "failed" + item.Error = "update failed: " + err.Error() + result.Failed++ + result.Items = append(result.Items, item) + continue + } + + item.Action = "updated" + result.Updated++ + result.Items = append(result.Items, item) + } + + return result, nil +} + +func mergeMap(existing map[string]any, updates map[string]any) map[string]any { + out := make(map[string]any, len(existing)+len(updates)) + for k, v := range existing { + out[k] = v + } + for k, v := range updates { + out[k] = v + } + return out +} + +func (s *CRSSyncService) mapOrCreateProxy(ctx context.Context, enabled bool, cached *[]Proxy, src *crsProxy, defaultName string) (*int64, error) { + if !enabled || src == nil { + return nil, nil + } + protocol := strings.ToLower(strings.TrimSpace(src.Protocol)) + switch protocol { + case "socks": + protocol = "socks5" + case "socks5h": + protocol = "socks5" + } + host := strings.TrimSpace(src.Host) + port := src.Port + username := strings.TrimSpace(src.Username) + password := strings.TrimSpace(src.Password) + + if protocol == "" || host == "" || port <= 0 { + return nil, nil + } + if protocol != "http" && protocol != "https" && protocol != "socks5" { + return nil, nil + } + + // Find existing proxy (active only). + for _, p := range *cached { + if strings.EqualFold(p.Protocol, protocol) && + p.Host == host && + p.Port == port && + p.Username == username && + p.Password == password { + id := p.ID + return &id, nil + } + } + + // Create new proxy + proxy := &Proxy{ + Name: defaultProxyName(defaultName, protocol, host, port), + Protocol: protocol, + Host: host, + Port: port, + Username: username, + Password: password, + Status: StatusActive, + } + if err := s.proxyRepo.Create(ctx, proxy); err != nil { + return nil, err + } + + *cached = append(*cached, *proxy) + id := proxy.ID + return &id, nil +} + +func defaultProxyName(base, protocol, host string, port int) string { + base = strings.TrimSpace(base) + if base == "" { + base = "crs" + } + return fmt.Sprintf("%s (%s://%s:%d)", base, protocol, host, port) +} + +func defaultName(name, id string) string { + if strings.TrimSpace(name) != "" { + return strings.TrimSpace(name) + } + return "CRS " + id +} + +func clampPriority(priority int) int { + if priority < 1 || priority > 100 { + return 50 + } + return priority +} + +func sanitizeCredentialsMap(input map[string]any) map[string]any { + if input == nil { + return map[string]any{} + } + out := make(map[string]any, len(input)) + for k, v := range input { + // Avoid nil values to keep JSONB cleaner + if v != nil { + out[k] = v + } + } + return out +} + +func mapCRSStatus(isActive bool, status string) string { + if !isActive { + return "inactive" + } + if strings.EqualFold(strings.TrimSpace(status), "error") { + return "error" + } + return "active" +} + +func normalizeBaseURL(raw string, allowlist []string, allowPrivate bool) (string, error) { + // 当 allowlist 为空时,不强制要求白名单(只进行基本的 URL 和 SSRF 验证) + requireAllowlist := len(allowlist) > 0 + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: allowlist, + RequireAllowlist: requireAllowlist, + AllowPrivate: allowPrivate, + }) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil +} + +// cleanBaseURL removes trailing suffix from base_url in credentials +// Used for both Claude and OpenAI accounts to remove /v1 +func cleanBaseURL(credentials map[string]any, suffixToRemove string) { + if baseURL, ok := credentials["base_url"].(string); ok && baseURL != "" { + trimmed := strings.TrimSpace(baseURL) + if strings.HasSuffix(trimmed, suffixToRemove) { + credentials["base_url"] = strings.TrimSuffix(trimmed, suffixToRemove) + } + } +} + +func crsLogin(ctx context.Context, client *http.Client, baseURL, username, password string) (string, error) { + payload := map[string]any{ + "username": username, + "password": password, + } + body, _ := json.Marshal(payload) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, baseURL+"/web/auth/login", bytes.NewReader(body)) + if err != nil { + return "", err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return "", err + } + defer func() { _ = resp.Body.Close() }() + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return "", fmt.Errorf("crs login failed: status=%d body=%s", resp.StatusCode, string(raw)) + } + + var parsed crsLoginResponse + if err := json.Unmarshal(raw, &parsed); err != nil { + return "", fmt.Errorf("crs login parse failed: %w", err) + } + if !parsed.Success || strings.TrimSpace(parsed.Token) == "" { + msg := parsed.Message + if msg == "" { + msg = parsed.Error + } + if msg == "" { + msg = "unknown error" + } + return "", errors.New("crs login failed: " + msg) + } + return parsed.Token, nil +} + +func crsExportAccounts(ctx context.Context, client *http.Client, baseURL, adminToken string) (*crsExportResponse, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, baseURL+"/admin/sync/export-accounts?include_secrets=true", nil) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+adminToken) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + raw, _ := io.ReadAll(io.LimitReader(resp.Body, 5<<20)) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return nil, fmt.Errorf("crs export failed: status=%d body=%s", resp.StatusCode, string(raw)) + } + + var parsed crsExportResponse + if err := json.Unmarshal(raw, &parsed); err != nil { + return nil, fmt.Errorf("crs export parse failed: %w", err) + } + if !parsed.Success { + msg := parsed.Message + if msg == "" { + msg = parsed.Error + } + if msg == "" { + msg = "unknown error" + } + return nil, errors.New("crs export failed: " + msg) + } + return &parsed, nil +} + +// refreshOAuthToken attempts to refresh OAuth token for a synced account +// Returns updated credentials or nil if refresh failed/not applicable +func (s *CRSSyncService) refreshOAuthToken(ctx context.Context, account *Account) map[string]any { + if account.Type != AccountTypeOAuth { + return nil + } + + var newCredentials map[string]any + var err error + + switch account.Platform { + case PlatformAnthropic: + if s.oauthService == nil { + return nil + } + tokenInfo, refreshErr := s.oauthService.RefreshAccountToken(ctx, account) + if refreshErr != nil { + err = refreshErr + } else { + // Preserve existing credentials + newCredentials = make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + // Update token fields + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = tokenInfo.ExpiresIn + newCredentials["expires_at"] = tokenInfo.ExpiresAt + if tokenInfo.RefreshToken != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.Scope != "" { + newCredentials["scope"] = tokenInfo.Scope + } + } + case PlatformOpenAI: + if s.openaiOAuthService == nil { + return nil + } + tokenInfo, refreshErr := s.openaiOAuthService.RefreshAccountToken(ctx, account) + if refreshErr != nil { + err = refreshErr + } else { + newCredentials = s.openaiOAuthService.BuildAccountCredentials(tokenInfo) + // Preserve non-token settings from existing credentials + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } + case PlatformGemini: + if s.geminiOAuthService == nil { + return nil + } + tokenInfo, refreshErr := s.geminiOAuthService.RefreshAccountToken(ctx, account) + if refreshErr != nil { + err = refreshErr + } else { + newCredentials = s.geminiOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + } + default: + return nil + } + + if err != nil { + // Log but don't fail the sync - token might still be valid or refreshable later + return nil + } + + return newCredentials +} diff --git a/backend/internal/service/dashboard_aggregation_service.go b/backend/internal/service/dashboard_aggregation_service.go new file mode 100644 index 00000000..da5c0e7d --- /dev/null +++ b/backend/internal/service/dashboard_aggregation_service.go @@ -0,0 +1,258 @@ +package service + +import ( + "context" + "errors" + "log" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +const ( + defaultDashboardAggregationTimeout = 2 * time.Minute + defaultDashboardAggregationBackfillTimeout = 30 * time.Minute + dashboardAggregationRetentionInterval = 6 * time.Hour +) + +var ( + // ErrDashboardBackfillDisabled 当配置禁用回填时返回。 + ErrDashboardBackfillDisabled = errors.New("仪表盘聚合回填已禁用") + // ErrDashboardBackfillTooLarge 当回填跨度超过限制时返回。 + ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") +) + +// DashboardAggregationRepository 定义仪表盘预聚合仓储接口。 +type DashboardAggregationRepository interface { + AggregateRange(ctx context.Context, start, end time.Time) error + GetAggregationWatermark(ctx context.Context) (time.Time, error) + UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error + CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error + CleanupUsageLogs(ctx context.Context, cutoff time.Time) error + EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error +} + +// DashboardAggregationService 负责定时聚合与回填。 +type DashboardAggregationService struct { + repo DashboardAggregationRepository + timingWheel *TimingWheelService + cfg config.DashboardAggregationConfig + running int32 + lastRetentionCleanup atomic.Value // time.Time +} + +// NewDashboardAggregationService 创建聚合服务。 +func NewDashboardAggregationService(repo DashboardAggregationRepository, timingWheel *TimingWheelService, cfg *config.Config) *DashboardAggregationService { + var aggCfg config.DashboardAggregationConfig + if cfg != nil { + aggCfg = cfg.DashboardAgg + } + return &DashboardAggregationService{ + repo: repo, + timingWheel: timingWheel, + cfg: aggCfg, + } +} + +// Start 启动定时聚合作业(重启生效配置)。 +func (s *DashboardAggregationService) Start() { + if s == nil || s.repo == nil || s.timingWheel == nil { + return + } + if !s.cfg.Enabled { + log.Printf("[DashboardAggregation] 聚合作业已禁用") + return + } + + interval := time.Duration(s.cfg.IntervalSeconds) * time.Second + if interval <= 0 { + interval = time.Minute + } + + if s.cfg.RecomputeDays > 0 { + go s.recomputeRecentDays() + } + + s.timingWheel.ScheduleRecurring("dashboard:aggregation", interval, func() { + s.runScheduledAggregation() + }) + log.Printf("[DashboardAggregation] 聚合作业启动 (interval=%v, lookback=%ds)", interval, s.cfg.LookbackSeconds) + if !s.cfg.BackfillEnabled { + log.Printf("[DashboardAggregation] 回填已禁用,如需补齐保留窗口以外历史数据请手动回填") + } +} + +// TriggerBackfill 触发回填(异步)。 +func (s *DashboardAggregationService) TriggerBackfill(start, end time.Time) error { + if s == nil || s.repo == nil { + return errors.New("聚合服务未初始化") + } + if !s.cfg.BackfillEnabled { + log.Printf("[DashboardAggregation] 回填被拒绝: backfill_enabled=false") + return ErrDashboardBackfillDisabled + } + if !end.After(start) { + return errors.New("回填时间范围无效") + } + if s.cfg.BackfillMaxDays > 0 { + maxRange := time.Duration(s.cfg.BackfillMaxDays) * 24 * time.Hour + if end.Sub(start) > maxRange { + return ErrDashboardBackfillTooLarge + } + } + + go func() { + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + defer cancel() + if err := s.backfillRange(ctx, start, end); err != nil { + log.Printf("[DashboardAggregation] 回填失败: %v", err) + } + }() + return nil +} + +func (s *DashboardAggregationService) recomputeRecentDays() { + days := s.cfg.RecomputeDays + if days <= 0 { + return + } + now := time.Now().UTC() + start := now.AddDate(0, 0, -days) + + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + defer cancel() + if err := s.backfillRange(ctx, start, now); err != nil { + log.Printf("[DashboardAggregation] 启动重算失败: %v", err) + return + } +} + +func (s *DashboardAggregationService) runScheduledAggregation() { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationTimeout) + defer cancel() + + now := time.Now().UTC() + last, err := s.repo.GetAggregationWatermark(ctx) + if err != nil { + log.Printf("[DashboardAggregation] 读取水位失败: %v", err) + last = time.Unix(0, 0).UTC() + } + + lookback := time.Duration(s.cfg.LookbackSeconds) * time.Second + epoch := time.Unix(0, 0).UTC() + start := last.Add(-lookback) + if !last.After(epoch) { + retentionDays := s.cfg.Retention.UsageLogsDays + if retentionDays <= 0 { + retentionDays = 1 + } + start = truncateToDayUTC(now.AddDate(0, 0, -retentionDays)) + } else if start.After(now) { + start = now.Add(-lookback) + } + + if err := s.aggregateRange(ctx, start, now); err != nil { + log.Printf("[DashboardAggregation] 聚合失败: %v", err) + return + } + + updateErr := s.repo.UpdateAggregationWatermark(ctx, now) + if updateErr != nil { + log.Printf("[DashboardAggregation] 更新水位失败: %v", updateErr) + } + log.Printf("[DashboardAggregation] 聚合完成 (start=%s end=%s duration=%s watermark_updated=%t)", + start.Format(time.RFC3339), + now.Format(time.RFC3339), + time.Since(jobStart).String(), + updateErr == nil, + ) + + s.maybeCleanupRetention(ctx, now) +} + +func (s *DashboardAggregationService) backfillRange(ctx context.Context, start, end time.Time) error { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return errors.New("聚合作业正在运行") + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + startUTC := start.UTC() + endUTC := end.UTC() + if !endUTC.After(startUTC) { + return errors.New("回填时间范围无效") + } + + cursor := truncateToDayUTC(startUTC) + for cursor.Before(endUTC) { + windowEnd := cursor.Add(24 * time.Hour) + if windowEnd.After(endUTC) { + windowEnd = endUTC + } + if err := s.aggregateRange(ctx, cursor, windowEnd); err != nil { + return err + } + cursor = windowEnd + } + + updateErr := s.repo.UpdateAggregationWatermark(ctx, endUTC) + if updateErr != nil { + log.Printf("[DashboardAggregation] 更新水位失败: %v", updateErr) + } + log.Printf("[DashboardAggregation] 回填聚合完成 (start=%s end=%s duration=%s watermark_updated=%t)", + startUTC.Format(time.RFC3339), + endUTC.Format(time.RFC3339), + time.Since(jobStart).String(), + updateErr == nil, + ) + + s.maybeCleanupRetention(ctx, endUTC) + return nil +} + +func (s *DashboardAggregationService) aggregateRange(ctx context.Context, start, end time.Time) error { + if !end.After(start) { + return nil + } + if err := s.repo.EnsureUsageLogsPartitions(ctx, end); err != nil { + log.Printf("[DashboardAggregation] 分区检查失败: %v", err) + } + return s.repo.AggregateRange(ctx, start, end) +} + +func (s *DashboardAggregationService) maybeCleanupRetention(ctx context.Context, now time.Time) { + lastAny := s.lastRetentionCleanup.Load() + if lastAny != nil { + if last, ok := lastAny.(time.Time); ok && now.Sub(last) < dashboardAggregationRetentionInterval { + return + } + } + + hourlyCutoff := now.AddDate(0, 0, -s.cfg.Retention.HourlyDays) + dailyCutoff := now.AddDate(0, 0, -s.cfg.Retention.DailyDays) + usageCutoff := now.AddDate(0, 0, -s.cfg.Retention.UsageLogsDays) + + aggErr := s.repo.CleanupAggregates(ctx, hourlyCutoff, dailyCutoff) + if aggErr != nil { + log.Printf("[DashboardAggregation] 聚合保留清理失败: %v", aggErr) + } + usageErr := s.repo.CleanupUsageLogs(ctx, usageCutoff) + if usageErr != nil { + log.Printf("[DashboardAggregation] usage_logs 保留清理失败: %v", usageErr) + } + if aggErr == nil && usageErr == nil { + s.lastRetentionCleanup.Store(now) + } +} + +func truncateToDayUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) +} diff --git a/backend/internal/service/dashboard_aggregation_service_test.go b/backend/internal/service/dashboard_aggregation_service_test.go new file mode 100644 index 00000000..2fc22105 --- /dev/null +++ b/backend/internal/service/dashboard_aggregation_service_test.go @@ -0,0 +1,106 @@ +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type dashboardAggregationRepoTestStub struct { + aggregateCalls int + lastStart time.Time + lastEnd time.Time + watermark time.Time + aggregateErr error + cleanupAggregatesErr error + cleanupUsageErr error +} + +func (s *dashboardAggregationRepoTestStub) AggregateRange(ctx context.Context, start, end time.Time) error { + s.aggregateCalls++ + s.lastStart = start + s.lastEnd = end + return s.aggregateErr +} + +func (s *dashboardAggregationRepoTestStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + return s.watermark, nil +} + +func (s *dashboardAggregationRepoTestStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoTestStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return s.cleanupAggregatesErr +} + +func (s *dashboardAggregationRepoTestStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return s.cleanupUsageErr +} + +func (s *dashboardAggregationRepoTestStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil +} + +func TestDashboardAggregationService_RunScheduledAggregation_EpochUsesRetentionStart(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{watermark: time.Unix(0, 0).UTC()} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + Enabled: true, + IntervalSeconds: 60, + LookbackSeconds: 120, + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 1, + HourlyDays: 1, + DailyDays: 1, + }, + }, + } + + svc.runScheduledAggregation() + + require.Equal(t, 1, repo.aggregateCalls) + require.False(t, repo.lastEnd.IsZero()) + require.Equal(t, truncateToDayUTC(repo.lastEnd.AddDate(0, 0, -1)), repo.lastStart) +} + +func TestDashboardAggregationService_CleanupRetentionFailure_DoesNotRecord(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{cleanupAggregatesErr: errors.New("清理失败")} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 1, + HourlyDays: 1, + DailyDays: 1, + }, + }, + } + + svc.maybeCleanupRetention(context.Background(), time.Now().UTC()) + + require.Nil(t, svc.lastRetentionCleanup.Load()) +} + +func TestDashboardAggregationService_TriggerBackfill_TooLarge(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + BackfillEnabled: true, + BackfillMaxDays: 1, + }, + } + + start := time.Now().AddDate(0, 0, -3) + end := time.Now() + err := svc.TriggerBackfill(start, end) + require.ErrorIs(t, err, ErrDashboardBackfillTooLarge) + require.Equal(t, 0, repo.aggregateCalls) +} diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go new file mode 100644 index 00000000..a9811919 --- /dev/null +++ b/backend/internal/service/dashboard_service.go @@ -0,0 +1,336 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" +) + +const ( + defaultDashboardStatsFreshTTL = 15 * time.Second + defaultDashboardStatsCacheTTL = 30 * time.Second + defaultDashboardStatsRefreshTimeout = 30 * time.Second +) + +// ErrDashboardStatsCacheMiss 标记仪表盘缓存未命中。 +var ErrDashboardStatsCacheMiss = errors.New("仪表盘缓存未命中") + +// DashboardStatsCache 定义仪表盘统计缓存接口。 +type DashboardStatsCache interface { + GetDashboardStats(ctx context.Context) (string, error) + SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error + DeleteDashboardStats(ctx context.Context) error +} + +type dashboardStatsRangeFetcher interface { + GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*usagestats.DashboardStats, error) +} + +type dashboardStatsCacheEntry struct { + Stats *usagestats.DashboardStats `json:"stats"` + UpdatedAt int64 `json:"updated_at"` +} + +// DashboardService 提供管理员仪表盘统计服务。 +type DashboardService struct { + usageRepo UsageLogRepository + aggRepo DashboardAggregationRepository + cache DashboardStatsCache + cacheFreshTTL time.Duration + cacheTTL time.Duration + refreshTimeout time.Duration + refreshing int32 + aggEnabled bool + aggInterval time.Duration + aggLookback time.Duration + aggUsageDays int +} + +func NewDashboardService(usageRepo UsageLogRepository, aggRepo DashboardAggregationRepository, cache DashboardStatsCache, cfg *config.Config) *DashboardService { + freshTTL := defaultDashboardStatsFreshTTL + cacheTTL := defaultDashboardStatsCacheTTL + refreshTimeout := defaultDashboardStatsRefreshTimeout + aggEnabled := true + aggInterval := time.Minute + aggLookback := 2 * time.Minute + aggUsageDays := 90 + if cfg != nil { + if !cfg.Dashboard.Enabled { + cache = nil + } + if cfg.Dashboard.StatsFreshTTLSeconds > 0 { + freshTTL = time.Duration(cfg.Dashboard.StatsFreshTTLSeconds) * time.Second + } + if cfg.Dashboard.StatsTTLSeconds > 0 { + cacheTTL = time.Duration(cfg.Dashboard.StatsTTLSeconds) * time.Second + } + if cfg.Dashboard.StatsRefreshTimeoutSeconds > 0 { + refreshTimeout = time.Duration(cfg.Dashboard.StatsRefreshTimeoutSeconds) * time.Second + } + aggEnabled = cfg.DashboardAgg.Enabled + if cfg.DashboardAgg.IntervalSeconds > 0 { + aggInterval = time.Duration(cfg.DashboardAgg.IntervalSeconds) * time.Second + } + if cfg.DashboardAgg.LookbackSeconds > 0 { + aggLookback = time.Duration(cfg.DashboardAgg.LookbackSeconds) * time.Second + } + if cfg.DashboardAgg.Retention.UsageLogsDays > 0 { + aggUsageDays = cfg.DashboardAgg.Retention.UsageLogsDays + } + } + if aggRepo == nil { + aggEnabled = false + } + return &DashboardService{ + usageRepo: usageRepo, + aggRepo: aggRepo, + cache: cache, + cacheFreshTTL: freshTTL, + cacheTTL: cacheTTL, + refreshTimeout: refreshTimeout, + aggEnabled: aggEnabled, + aggInterval: aggInterval, + aggLookback: aggLookback, + aggUsageDays: aggUsageDays, + } +} + +func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + if s.cache != nil { + cached, fresh, err := s.getCachedDashboardStats(ctx) + if err == nil && cached != nil { + s.refreshAggregationStaleness(cached) + if !fresh { + s.refreshDashboardStatsAsync() + } + return cached, nil + } + if err != nil && !errors.Is(err, ErrDashboardStatsCacheMiss) { + log.Printf("[Dashboard] 仪表盘缓存读取失败: %v", err) + } + } + + stats, err := s.refreshDashboardStats(ctx) + if err != nil { + return nil, fmt.Errorf("get dashboard stats: %w", err) + } + return stats, nil +} + +func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) + if err != nil { + return nil, fmt.Errorf("get usage trend with filters: %w", err) + } + return trend, nil +} + +func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream) + if err != nil { + return nil, fmt.Errorf("get model stats with filters: %w", err) + } + return stats, nil +} + +func (s *DashboardService) getCachedDashboardStats(ctx context.Context) (*usagestats.DashboardStats, bool, error) { + data, err := s.cache.GetDashboardStats(ctx) + if err != nil { + return nil, false, err + } + + var entry dashboardStatsCacheEntry + if err := json.Unmarshal([]byte(data), &entry); err != nil { + s.evictDashboardStatsCache(err) + return nil, false, ErrDashboardStatsCacheMiss + } + if entry.Stats == nil { + s.evictDashboardStatsCache(errors.New("仪表盘缓存缺少统计数据")) + return nil, false, ErrDashboardStatsCacheMiss + } + + age := time.Since(time.Unix(entry.UpdatedAt, 0)) + return entry.Stats, age <= s.cacheFreshTTL, nil +} + +func (s *DashboardService) refreshDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + stats, err := s.fetchDashboardStats(ctx) + if err != nil { + return nil, err + } + s.applyAggregationStatus(ctx, stats) + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + s.saveDashboardStatsCache(cacheCtx, stats) + return stats, nil +} + +func (s *DashboardService) refreshDashboardStatsAsync() { + if s.cache == nil { + return + } + if !atomic.CompareAndSwapInt32(&s.refreshing, 0, 1) { + return + } + + go func() { + defer atomic.StoreInt32(&s.refreshing, 0) + + ctx, cancel := context.WithTimeout(context.Background(), s.refreshTimeout) + defer cancel() + + stats, err := s.fetchDashboardStats(ctx) + if err != nil { + log.Printf("[Dashboard] 仪表盘缓存异步刷新失败: %v", err) + return + } + s.applyAggregationStatus(ctx, stats) + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + s.saveDashboardStatsCache(cacheCtx, stats) + }() +} + +func (s *DashboardService) fetchDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + if !s.aggEnabled { + if fetcher, ok := s.usageRepo.(dashboardStatsRangeFetcher); ok { + now := time.Now().UTC() + start := truncateToDayUTC(now.AddDate(0, 0, -s.aggUsageDays)) + return fetcher.GetDashboardStatsWithRange(ctx, start, now) + } + } + return s.usageRepo.GetDashboardStats(ctx) +} + +func (s *DashboardService) saveDashboardStatsCache(ctx context.Context, stats *usagestats.DashboardStats) { + if s.cache == nil || stats == nil { + return + } + + entry := dashboardStatsCacheEntry{ + Stats: stats, + UpdatedAt: time.Now().Unix(), + } + data, err := json.Marshal(entry) + if err != nil { + log.Printf("[Dashboard] 仪表盘缓存序列化失败: %v", err) + return + } + + if err := s.cache.SetDashboardStats(ctx, string(data), s.cacheTTL); err != nil { + log.Printf("[Dashboard] 仪表盘缓存写入失败: %v", err) + } +} + +func (s *DashboardService) evictDashboardStatsCache(reason error) { + if s.cache == nil { + return + } + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + + if err := s.cache.DeleteDashboardStats(cacheCtx); err != nil { + log.Printf("[Dashboard] 仪表盘缓存清理失败: %v", err) + } + if reason != nil { + log.Printf("[Dashboard] 仪表盘缓存异常,已清理: %v", reason) + } +} + +func (s *DashboardService) cacheOperationContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), s.refreshTimeout) +} + +func (s *DashboardService) applyAggregationStatus(ctx context.Context, stats *usagestats.DashboardStats) { + if stats == nil { + return + } + updatedAt := s.fetchAggregationUpdatedAt(ctx) + stats.StatsUpdatedAt = updatedAt.UTC().Format(time.RFC3339) + stats.StatsStale = s.isAggregationStale(updatedAt, time.Now().UTC()) +} + +func (s *DashboardService) refreshAggregationStaleness(stats *usagestats.DashboardStats) { + if stats == nil { + return + } + updatedAt := parseStatsUpdatedAt(stats.StatsUpdatedAt) + stats.StatsStale = s.isAggregationStale(updatedAt, time.Now().UTC()) +} + +func (s *DashboardService) fetchAggregationUpdatedAt(ctx context.Context) time.Time { + if s.aggRepo == nil { + return time.Unix(0, 0).UTC() + } + updatedAt, err := s.aggRepo.GetAggregationWatermark(ctx) + if err != nil { + log.Printf("[Dashboard] 读取聚合水位失败: %v", err) + return time.Unix(0, 0).UTC() + } + if updatedAt.IsZero() { + return time.Unix(0, 0).UTC() + } + return updatedAt.UTC() +} + +func (s *DashboardService) isAggregationStale(updatedAt, now time.Time) bool { + if !s.aggEnabled { + return true + } + epoch := time.Unix(0, 0).UTC() + if !updatedAt.After(epoch) { + return true + } + threshold := s.aggInterval + s.aggLookback + return now.Sub(updatedAt) > threshold +} + +func parseStatsUpdatedAt(raw string) time.Time { + if raw == "" { + return time.Unix(0, 0).UTC() + } + parsed, err := time.Parse(time.RFC3339, raw) + if err != nil { + return time.Unix(0, 0).UTC() + } + return parsed.UTC() +} + +func (s *DashboardService) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) { + trend, err := s.usageRepo.GetAPIKeyUsageTrend(ctx, startTime, endTime, granularity, limit) + if err != nil { + return nil, fmt.Errorf("get api key usage trend: %w", err) + } + return trend, nil +} + +func (s *DashboardService) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) { + trend, err := s.usageRepo.GetUserUsageTrend(ctx, startTime, endTime, granularity, limit) + if err != nil { + return nil, fmt.Errorf("get user usage trend: %w", err) + } + return trend, nil +} + +func (s *DashboardService) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) { + stats, err := s.usageRepo.GetBatchUserUsageStats(ctx, userIDs) + if err != nil { + return nil, fmt.Errorf("get batch user usage stats: %w", err) + } + return stats, nil +} + +func (s *DashboardService) GetBatchAPIKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*usagestats.BatchAPIKeyUsageStats, error) { + stats, err := s.usageRepo.GetBatchAPIKeyUsageStats(ctx, apiKeyIDs) + if err != nil { + return nil, fmt.Errorf("get batch api key usage stats: %w", err) + } + return stats, nil +} diff --git a/backend/internal/service/dashboard_service_test.go b/backend/internal/service/dashboard_service_test.go new file mode 100644 index 00000000..db3c78c3 --- /dev/null +++ b/backend/internal/service/dashboard_service_test.go @@ -0,0 +1,387 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/stretchr/testify/require" +) + +type usageRepoStub struct { + UsageLogRepository + stats *usagestats.DashboardStats + rangeStats *usagestats.DashboardStats + err error + rangeErr error + calls int32 + rangeCalls int32 + rangeStart time.Time + rangeEnd time.Time + onCall chan struct{} +} + +func (s *usageRepoStub) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + atomic.AddInt32(&s.calls, 1) + if s.onCall != nil { + select { + case s.onCall <- struct{}{}: + default: + } + } + if s.err != nil { + return nil, s.err + } + return s.stats, nil +} + +func (s *usageRepoStub) GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*usagestats.DashboardStats, error) { + atomic.AddInt32(&s.rangeCalls, 1) + s.rangeStart = start + s.rangeEnd = end + if s.rangeErr != nil { + return nil, s.rangeErr + } + if s.rangeStats != nil { + return s.rangeStats, nil + } + return s.stats, nil +} + +type dashboardCacheStub struct { + get func(ctx context.Context) (string, error) + set func(ctx context.Context, data string, ttl time.Duration) error + del func(ctx context.Context) error + getCalls int32 + setCalls int32 + delCalls int32 + lastSetMu sync.Mutex + lastSet string +} + +func (c *dashboardCacheStub) GetDashboardStats(ctx context.Context) (string, error) { + atomic.AddInt32(&c.getCalls, 1) + if c.get != nil { + return c.get(ctx) + } + return "", ErrDashboardStatsCacheMiss +} + +func (c *dashboardCacheStub) SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error { + atomic.AddInt32(&c.setCalls, 1) + c.lastSetMu.Lock() + c.lastSet = data + c.lastSetMu.Unlock() + if c.set != nil { + return c.set(ctx, data, ttl) + } + return nil +} + +func (c *dashboardCacheStub) DeleteDashboardStats(ctx context.Context) error { + atomic.AddInt32(&c.delCalls, 1) + if c.del != nil { + return c.del(ctx) + } + return nil +} + +type dashboardAggregationRepoStub struct { + watermark time.Time + err error +} + +func (s *dashboardAggregationRepoStub) AggregateRange(ctx context.Context, start, end time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + if s.err != nil { + return time.Time{}, s.err + } + return s.watermark, nil +} + +func (s *dashboardAggregationRepoStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil +} + +func (c *dashboardCacheStub) readLastEntry(t *testing.T) dashboardStatsCacheEntry { + t.Helper() + c.lastSetMu.Lock() + data := c.lastSet + c.lastSetMu.Unlock() + + var entry dashboardStatsCacheEntry + err := json.Unmarshal([]byte(data), &entry) + require.NoError(t, err) + return entry +} + +func TestDashboardService_CacheHitFresh(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 10, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + entry := dashboardStatsCacheEntry{ + Stats: stats, + UpdatedAt: time.Now().Unix(), + } + payload, err := json.Marshal(entry) + require.NoError(t, err) + + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return string(payload), nil + }, + } + repo := &usageRepoStub{ + stats: &usagestats.DashboardStats{TotalUsers: 99}, + } + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(0), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalls)) +} + +func TestDashboardService_CacheMiss_StoresCache(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 7, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "", ErrDashboardStatsCacheMiss + }, + } + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.setCalls)) + entry := cache.readLastEntry(t) + require.Equal(t, stats, entry.Stats) + require.WithinDuration(t, time.Now(), time.Unix(entry.UpdatedAt, 0), time.Second) +} + +func TestDashboardService_CacheDisabled_SkipsCache(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 3, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "", nil + }, + } + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalls)) +} + +func TestDashboardService_CacheHitStale_TriggersAsyncRefresh(t *testing.T) { + staleStats := &usagestats.DashboardStats{ + TotalUsers: 11, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + entry := dashboardStatsCacheEntry{ + Stats: staleStats, + UpdatedAt: time.Now().Add(-defaultDashboardStatsFreshTTL * 2).Unix(), + } + payload, err := json.Marshal(entry) + require.NoError(t, err) + + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return string(payload), nil + }, + } + refreshCh := make(chan struct{}, 1) + repo := &usageRepoStub{ + stats: &usagestats.DashboardStats{TotalUsers: 22}, + onCall: refreshCh, + } + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, staleStats, got) + + select { + case <-refreshCh: + case <-time.After(1 * time.Second): + t.Fatal("等待异步刷新超时") + } + require.Eventually(t, func() bool { + return atomic.LoadInt32(&cache.setCalls) >= 1 + }, 1*time.Second, 10*time.Millisecond) +} + +func TestDashboardService_CacheParseError_EvictsAndRefetches(t *testing.T) { + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "not-json", nil + }, + } + stats := &usagestats.DashboardStats{TotalUsers: 9} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.delCalls)) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) +} + +func TestDashboardService_CacheParseError_RepoFailure(t *testing.T) { + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "not-json", nil + }, + } + repo := &usageRepoStub{err: errors.New("db down")} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + _, err := svc.GetDashboardStats(context.Background()) + require.Error(t, err) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.delCalls)) +} + +func TestDashboardService_StatsUpdatedAtEpochWhenMissing(t *testing.T) { + stats := &usagestats.DashboardStats{} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{Dashboard: config.DashboardCacheConfig{Enabled: false}} + svc := NewDashboardService(repo, aggRepo, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, "1970-01-01T00:00:00Z", got.StatsUpdatedAt) + require.True(t, got.StatsStale) +} + +func TestDashboardService_StatsStaleFalseWhenFresh(t *testing.T) { + aggNow := time.Now().UTC().Truncate(time.Second) + stats := &usagestats.DashboardStats{} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: aggNow} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + IntervalSeconds: 60, + LookbackSeconds: 120, + }, + } + svc := NewDashboardService(repo, aggRepo, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, aggNow.Format(time.RFC3339), got.StatsUpdatedAt) + require.False(t, got.StatsStale) +} + +func TestDashboardService_AggDisabled_UsesUsageLogsFallback(t *testing.T) { + expected := &usagestats.DashboardStats{TotalUsers: 42} + repo := &usageRepoStub{ + rangeStats: expected, + err: errors.New("should not call aggregated stats"), + } + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: false, + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 7, + }, + }, + } + svc := NewDashboardService(repo, nil, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, int64(42), got.TotalUsers) + require.Equal(t, int32(0), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.rangeCalls)) + require.False(t, repo.rangeEnd.IsZero()) + require.Equal(t, truncateToDayUTC(repo.rangeEnd.AddDate(0, 0, -7)), repo.rangeStart) +} diff --git a/backend/internal/service/deferred_service.go b/backend/internal/service/deferred_service.go new file mode 100644 index 00000000..a3dfe008 --- /dev/null +++ b/backend/internal/service/deferred_service.go @@ -0,0 +1,76 @@ +package service + +import ( + "context" + "log" + "sync" + "time" +) + +// DeferredService provides deferred batch update functionality +type DeferredService struct { + accountRepo AccountRepository + timingWheel *TimingWheelService + interval time.Duration + + lastUsedUpdates sync.Map +} + +// NewDeferredService creates a new DeferredService instance +func NewDeferredService(accountRepo AccountRepository, timingWheel *TimingWheelService, interval time.Duration) *DeferredService { + return &DeferredService{ + accountRepo: accountRepo, + timingWheel: timingWheel, + interval: interval, + } +} + +// Start starts the deferred service +func (s *DeferredService) Start() { + s.timingWheel.ScheduleRecurring("deferred:last_used", s.interval, s.flushLastUsed) + log.Printf("[DeferredService] Started (interval: %v)", s.interval) +} + +// Stop stops the deferred service +func (s *DeferredService) Stop() { + s.timingWheel.Cancel("deferred:last_used") + s.flushLastUsed() + log.Printf("[DeferredService] Service stopped") +} + +func (s *DeferredService) ScheduleLastUsedUpdate(accountID int64) { + s.lastUsedUpdates.Store(accountID, time.Now()) +} + +func (s *DeferredService) flushLastUsed() { + updates := make(map[int64]time.Time) + s.lastUsedUpdates.Range(func(key, value any) bool { + id, ok := key.(int64) + if !ok { + return true + } + ts, ok := value.(time.Time) + if !ok { + return true + } + updates[id] = ts + s.lastUsedUpdates.Delete(key) + return true + }) + + if len(updates) == 0 { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + if err := s.accountRepo.BatchUpdateLastUsed(ctx, updates); err != nil { + log.Printf("[DeferredService] BatchUpdateLastUsed failed (%d accounts): %v", len(updates), err) + for id, ts := range updates { + s.lastUsedUpdates.Store(id, ts) + } + } else { + log.Printf("[DeferredService] BatchUpdateLastUsed flushed %d accounts", len(updates)) + } +} diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go new file mode 100644 index 00000000..49bb86a7 --- /dev/null +++ b/backend/internal/service/domain_constants.go @@ -0,0 +1,159 @@ +package service + +// Status constants +const ( + StatusActive = "active" + StatusDisabled = "disabled" + StatusError = "error" + StatusUnused = "unused" + StatusUsed = "used" + StatusExpired = "expired" +) + +// Role constants +const ( + RoleAdmin = "admin" + RoleUser = "user" +) + +// Platform constants +const ( + PlatformAnthropic = "anthropic" + PlatformOpenAI = "openai" + PlatformGemini = "gemini" + PlatformAntigravity = "antigravity" +) + +// Account type constants +const ( + AccountTypeOAuth = "oauth" // OAuth类型账号(full scope: profile + inference) + AccountTypeSetupToken = "setup-token" // Setup Token类型账号(inference only scope) + AccountTypeAPIKey = "apikey" // API Key类型账号 +) + +// Redeem type constants +const ( + RedeemTypeBalance = "balance" + RedeemTypeConcurrency = "concurrency" + RedeemTypeSubscription = "subscription" +) + +// PromoCode status constants +const ( + PromoCodeStatusActive = "active" + PromoCodeStatusDisabled = "disabled" +) + +// Admin adjustment type constants +const ( + AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 + AdjustmentTypeAdminConcurrency = "admin_concurrency" // 管理员调整并发数 +) + +// Group subscription type constants +const ( + SubscriptionTypeStandard = "standard" // 标准计费模式(按余额扣费) + SubscriptionTypeSubscription = "subscription" // 订阅模式(按限额控制) +) + +// Subscription status constants +const ( + SubscriptionStatusActive = "active" + SubscriptionStatusExpired = "expired" + SubscriptionStatusSuspended = "suspended" +) + +// LinuxDoConnectSyntheticEmailDomain 是 LinuxDo Connect 用户的合成邮箱后缀(RFC 保留域名)。 +const LinuxDoConnectSyntheticEmailDomain = "@linuxdo-connect.invalid" + +// Setting keys +const ( + // 注册设置 + SettingKeyRegistrationEnabled = "registration_enabled" // 是否开放注册 + SettingKeyEmailVerifyEnabled = "email_verify_enabled" // 是否开启邮件验证 + + // 邮件服务设置 + SettingKeySMTPHost = "smtp_host" // SMTP服务器地址 + SettingKeySMTPPort = "smtp_port" // SMTP端口 + SettingKeySMTPUsername = "smtp_username" // SMTP用户名 + SettingKeySMTPPassword = "smtp_password" // SMTP密码(加密存储) + SettingKeySMTPFrom = "smtp_from" // 发件人地址 + SettingKeySMTPFromName = "smtp_from_name" // 发件人名称 + SettingKeySMTPUseTLS = "smtp_use_tls" // 是否使用TLS + + // Cloudflare Turnstile 设置 + SettingKeyTurnstileEnabled = "turnstile_enabled" // 是否启用 Turnstile 验证 + SettingKeyTurnstileSiteKey = "turnstile_site_key" // Turnstile Site Key + SettingKeyTurnstileSecretKey = "turnstile_secret_key" // Turnstile Secret Key + + // LinuxDo Connect OAuth 登录设置 + SettingKeyLinuxDoConnectEnabled = "linuxdo_connect_enabled" + SettingKeyLinuxDoConnectClientID = "linuxdo_connect_client_id" + SettingKeyLinuxDoConnectClientSecret = "linuxdo_connect_client_secret" + SettingKeyLinuxDoConnectRedirectURL = "linuxdo_connect_redirect_url" + + // OEM设置 + SettingKeySiteName = "site_name" // 网站名称 + SettingKeySiteLogo = "site_logo" // 网站Logo (base64) + SettingKeySiteSubtitle = "site_subtitle" // 网站副标题 + SettingKeyAPIBaseURL = "api_base_url" // API端点地址(用于客户端配置和导入) + SettingKeyContactInfo = "contact_info" // 客服联系方式 + SettingKeyDocURL = "doc_url" // 文档链接 + SettingKeyHomeContent = "home_content" // 首页内容(支持 Markdown/HTML,或 URL 作为 iframe src) + + // 默认配置 + SettingKeyDefaultConcurrency = "default_concurrency" // 新用户默认并发量 + SettingKeyDefaultBalance = "default_balance" // 新用户默认余额 + + // 管理员 API Key + SettingKeyAdminAPIKey = "admin_api_key" // 全局管理员 API Key(用于外部系统集成) + + // Gemini 配额策略(JSON) + SettingKeyGeminiQuotaPolicy = "gemini_quota_policy" + + // Model fallback settings + SettingKeyEnableModelFallback = "enable_model_fallback" + SettingKeyFallbackModelAnthropic = "fallback_model_anthropic" + SettingKeyFallbackModelOpenAI = "fallback_model_openai" + SettingKeyFallbackModelGemini = "fallback_model_gemini" + SettingKeyFallbackModelAntigravity = "fallback_model_antigravity" + + // Request identity patch (Claude -> Gemini systemInstruction injection) + SettingKeyEnableIdentityPatch = "enable_identity_patch" + SettingKeyIdentityPatchPrompt = "identity_patch_prompt" + + // ========================= + // Ops Monitoring (vNext) + // ========================= + + // SettingKeyOpsMonitoringEnabled is a DB-backed soft switch to enable/disable ops module at runtime. + SettingKeyOpsMonitoringEnabled = "ops_monitoring_enabled" + + // SettingKeyOpsRealtimeMonitoringEnabled controls realtime features (e.g. WS/QPS push). + SettingKeyOpsRealtimeMonitoringEnabled = "ops_realtime_monitoring_enabled" + + // SettingKeyOpsQueryModeDefault controls the default query mode for ops dashboard (auto/raw/preagg). + SettingKeyOpsQueryModeDefault = "ops_query_mode_default" + + // SettingKeyOpsEmailNotificationConfig stores JSON config for ops email notifications. + SettingKeyOpsEmailNotificationConfig = "ops_email_notification_config" + + // SettingKeyOpsAlertRuntimeSettings stores JSON config for ops alert evaluator runtime settings. + SettingKeyOpsAlertRuntimeSettings = "ops_alert_runtime_settings" + + // SettingKeyOpsMetricsIntervalSeconds controls the ops metrics collector interval (>=60). + SettingKeyOpsMetricsIntervalSeconds = "ops_metrics_interval_seconds" + + // SettingKeyOpsAdvancedSettings stores JSON config for ops advanced settings (data retention, aggregation). + SettingKeyOpsAdvancedSettings = "ops_advanced_settings" + + // ========================= + // Stream Timeout Handling + // ========================= + + // SettingKeyStreamTimeoutSettings stores JSON config for stream timeout handling. + SettingKeyStreamTimeoutSettings = "stream_timeout_settings" +) + +// AdminAPIKeyPrefix is the prefix for admin API keys (distinct from user "sk-" keys). +const AdminAPIKeyPrefix = "admin-" diff --git a/backend/internal/service/email_queue_service.go b/backend/internal/service/email_queue_service.go new file mode 100644 index 00000000..1c22702c --- /dev/null +++ b/backend/internal/service/email_queue_service.go @@ -0,0 +1,109 @@ +package service + +import ( + "context" + "fmt" + "log" + "sync" + "time" +) + +// EmailTask 邮件发送任务 +type EmailTask struct { + Email string + SiteName string + TaskType string // "verify_code" +} + +// EmailQueueService 异步邮件队列服务 +type EmailQueueService struct { + emailService *EmailService + taskChan chan EmailTask + wg sync.WaitGroup + stopChan chan struct{} + workers int +} + +// NewEmailQueueService 创建邮件队列服务 +func NewEmailQueueService(emailService *EmailService, workers int) *EmailQueueService { + if workers <= 0 { + workers = 3 // 默认3个工作协程 + } + + service := &EmailQueueService{ + emailService: emailService, + taskChan: make(chan EmailTask, 100), // 缓冲100个任务 + stopChan: make(chan struct{}), + workers: workers, + } + + // 启动工作协程 + service.start() + + return service +} + +// start 启动工作协程 +func (s *EmailQueueService) start() { + for i := 0; i < s.workers; i++ { + s.wg.Add(1) + go s.worker(i) + } + log.Printf("[EmailQueue] Started %d workers", s.workers) +} + +// worker 工作协程 +func (s *EmailQueueService) worker(id int) { + defer s.wg.Done() + + for { + select { + case task := <-s.taskChan: + s.processTask(id, task) + case <-s.stopChan: + log.Printf("[EmailQueue] Worker %d stopping", id) + return + } + } +} + +// processTask 处理任务 +func (s *EmailQueueService) processTask(workerID int, task EmailTask) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + switch task.TaskType { + case "verify_code": + if err := s.emailService.SendVerifyCode(ctx, task.Email, task.SiteName); err != nil { + log.Printf("[EmailQueue] Worker %d failed to send verify code to %s: %v", workerID, task.Email, err) + } else { + log.Printf("[EmailQueue] Worker %d sent verify code to %s", workerID, task.Email) + } + default: + log.Printf("[EmailQueue] Worker %d unknown task type: %s", workerID, task.TaskType) + } +} + +// EnqueueVerifyCode 将验证码发送任务加入队列 +func (s *EmailQueueService) EnqueueVerifyCode(email, siteName string) error { + task := EmailTask{ + Email: email, + SiteName: siteName, + TaskType: "verify_code", + } + + select { + case s.taskChan <- task: + log.Printf("[EmailQueue] Enqueued verify code task for %s", email) + return nil + default: + return fmt.Errorf("email queue is full") + } +} + +// Stop 停止队列服务 +func (s *EmailQueueService) Stop() { + close(s.stopChan) + s.wg.Wait() + log.Println("[EmailQueue] All workers stopped") +} diff --git a/backend/internal/service/email_service.go b/backend/internal/service/email_service.go new file mode 100644 index 00000000..55e137d6 --- /dev/null +++ b/backend/internal/service/email_service.go @@ -0,0 +1,359 @@ +package service + +import ( + "context" + "crypto/rand" + "crypto/tls" + "fmt" + "log" + "math/big" + "net/smtp" + "strconv" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ( + ErrEmailNotConfigured = infraerrors.ServiceUnavailable("EMAIL_NOT_CONFIGURED", "email service not configured") + ErrInvalidVerifyCode = infraerrors.BadRequest("INVALID_VERIFY_CODE", "invalid or expired verification code") + ErrVerifyCodeTooFrequent = infraerrors.TooManyRequests("VERIFY_CODE_TOO_FREQUENT", "please wait before requesting a new code") + ErrVerifyCodeMaxAttempts = infraerrors.TooManyRequests("VERIFY_CODE_MAX_ATTEMPTS", "too many failed attempts, please request a new code") +) + +// EmailCache defines cache operations for email service +type EmailCache interface { + GetVerificationCode(ctx context.Context, email string) (*VerificationCodeData, error) + SetVerificationCode(ctx context.Context, email string, data *VerificationCodeData, ttl time.Duration) error + DeleteVerificationCode(ctx context.Context, email string) error +} + +// VerificationCodeData represents verification code data +type VerificationCodeData struct { + Code string + Attempts int + CreatedAt time.Time +} + +const ( + verifyCodeTTL = 15 * time.Minute + verifyCodeCooldown = 1 * time.Minute + maxVerifyCodeAttempts = 5 +) + +// SMTPConfig SMTP配置 +type SMTPConfig struct { + Host string + Port int + Username string + Password string + From string + FromName string + UseTLS bool +} + +// EmailService 邮件服务 +type EmailService struct { + settingRepo SettingRepository + cache EmailCache +} + +// NewEmailService 创建邮件服务实例 +func NewEmailService(settingRepo SettingRepository, cache EmailCache) *EmailService { + return &EmailService{ + settingRepo: settingRepo, + cache: cache, + } +} + +// GetSMTPConfig 从数据库获取SMTP配置 +func (s *EmailService) GetSMTPConfig(ctx context.Context) (*SMTPConfig, error) { + keys := []string{ + SettingKeySMTPHost, + SettingKeySMTPPort, + SettingKeySMTPUsername, + SettingKeySMTPPassword, + SettingKeySMTPFrom, + SettingKeySMTPFromName, + SettingKeySMTPUseTLS, + } + + settings, err := s.settingRepo.GetMultiple(ctx, keys) + if err != nil { + return nil, fmt.Errorf("get smtp settings: %w", err) + } + + host := settings[SettingKeySMTPHost] + if host == "" { + return nil, ErrEmailNotConfigured + } + + port := 587 // 默认端口 + if portStr := settings[SettingKeySMTPPort]; portStr != "" { + if p, err := strconv.Atoi(portStr); err == nil { + port = p + } + } + + useTLS := settings[SettingKeySMTPUseTLS] == "true" + + return &SMTPConfig{ + Host: host, + Port: port, + Username: settings[SettingKeySMTPUsername], + Password: settings[SettingKeySMTPPassword], + From: settings[SettingKeySMTPFrom], + FromName: settings[SettingKeySMTPFromName], + UseTLS: useTLS, + }, nil +} + +// SendEmail 发送邮件(使用数据库中保存的配置) +func (s *EmailService) SendEmail(ctx context.Context, to, subject, body string) error { + config, err := s.GetSMTPConfig(ctx) + if err != nil { + return err + } + return s.SendEmailWithConfig(config, to, subject, body) +} + +// SendEmailWithConfig 使用指定配置发送邮件 +func (s *EmailService) SendEmailWithConfig(config *SMTPConfig, to, subject, body string) error { + from := config.From + if config.FromName != "" { + from = fmt.Sprintf("%s <%s>", config.FromName, config.From) + } + + msg := fmt.Sprintf("From: %s\r\nTo: %s\r\nSubject: %s\r\nMIME-Version: 1.0\r\nContent-Type: text/html; charset=UTF-8\r\n\r\n%s", + from, to, subject, body) + + addr := fmt.Sprintf("%s:%d", config.Host, config.Port) + auth := smtp.PlainAuth("", config.Username, config.Password, config.Host) + + if config.UseTLS { + return s.sendMailTLS(addr, auth, config.From, to, []byte(msg), config.Host) + } + + return smtp.SendMail(addr, auth, config.From, []string{to}, []byte(msg)) +} + +// sendMailTLS 使用TLS发送邮件 +func (s *EmailService) sendMailTLS(addr string, auth smtp.Auth, from, to string, msg []byte, host string) error { + tlsConfig := &tls.Config{ + ServerName: host, + // 强制 TLS 1.2+,避免协议降级导致的弱加密风险。 + MinVersion: tls.VersionTLS12, + } + + conn, err := tls.Dial("tcp", addr, tlsConfig) + if err != nil { + return fmt.Errorf("tls dial: %w", err) + } + defer func() { _ = conn.Close() }() + + client, err := smtp.NewClient(conn, host) + if err != nil { + return fmt.Errorf("new smtp client: %w", err) + } + defer func() { _ = client.Close() }() + + if err = client.Auth(auth); err != nil { + return fmt.Errorf("smtp auth: %w", err) + } + + if err = client.Mail(from); err != nil { + return fmt.Errorf("smtp mail: %w", err) + } + + if err = client.Rcpt(to); err != nil { + return fmt.Errorf("smtp rcpt: %w", err) + } + + w, err := client.Data() + if err != nil { + return fmt.Errorf("smtp data: %w", err) + } + + _, err = w.Write(msg) + if err != nil { + return fmt.Errorf("write msg: %w", err) + } + + err = w.Close() + if err != nil { + return fmt.Errorf("close writer: %w", err) + } + + // Email is sent successfully after w.Close(), ignore Quit errors + // Some SMTP servers return non-standard responses on QUIT + _ = client.Quit() + return nil +} + +// GenerateVerifyCode 生成6位数字验证码 +func (s *EmailService) GenerateVerifyCode() (string, error) { + const digits = "0123456789" + code := make([]byte, 6) + for i := range code { + num, err := rand.Int(rand.Reader, big.NewInt(int64(len(digits)))) + if err != nil { + return "", err + } + code[i] = digits[num.Int64()] + } + return string(code), nil +} + +// SendVerifyCode 发送验证码邮件 +func (s *EmailService) SendVerifyCode(ctx context.Context, email, siteName string) error { + // 检查是否在冷却期内 + existing, err := s.cache.GetVerificationCode(ctx, email) + if err == nil && existing != nil { + if time.Since(existing.CreatedAt) < verifyCodeCooldown { + return ErrVerifyCodeTooFrequent + } + } + + // 生成验证码 + code, err := s.GenerateVerifyCode() + if err != nil { + return fmt.Errorf("generate code: %w", err) + } + + // 保存验证码到 Redis + data := &VerificationCodeData{ + Code: code, + Attempts: 0, + CreatedAt: time.Now(), + } + if err := s.cache.SetVerificationCode(ctx, email, data, verifyCodeTTL); err != nil { + return fmt.Errorf("save verify code: %w", err) + } + + // 构建邮件内容 + subject := fmt.Sprintf("[%s] Email Verification Code", siteName) + body := s.buildVerifyCodeEmailBody(code, siteName) + + // 发送邮件 + if err := s.SendEmail(ctx, email, subject, body); err != nil { + return fmt.Errorf("send email: %w", err) + } + + return nil +} + +// VerifyCode 验证验证码 +func (s *EmailService) VerifyCode(ctx context.Context, email, code string) error { + data, err := s.cache.GetVerificationCode(ctx, email) + if err != nil || data == nil { + return ErrInvalidVerifyCode + } + + // 检查是否已达到最大尝试次数 + if data.Attempts >= maxVerifyCodeAttempts { + return ErrVerifyCodeMaxAttempts + } + + // 验证码不匹配 + if data.Code != code { + data.Attempts++ + if err := s.cache.SetVerificationCode(ctx, email, data, verifyCodeTTL); err != nil { + log.Printf("[Email] Failed to update verification attempt count: %v", err) + } + if data.Attempts >= maxVerifyCodeAttempts { + return ErrVerifyCodeMaxAttempts + } + return ErrInvalidVerifyCode + } + + // 验证成功,删除验证码 + if err := s.cache.DeleteVerificationCode(ctx, email); err != nil { + log.Printf("[Email] Failed to delete verification code after success: %v", err) + } + return nil +} + +// buildVerifyCodeEmailBody 构建验证码邮件HTML内容 +func (s *EmailService) buildVerifyCodeEmailBody(code, siteName string) string { + return fmt.Sprintf(` + + + + + + + +
+
+

%s

+
+
+

Your verification code is:

+
%s
+
+

This code will expire in 15 minutes.

+

If you did not request this code, please ignore this email.

+
+
+ +
+ + +`, siteName, code) +} + +// TestSMTPConnectionWithConfig 使用指定配置测试SMTP连接 +func (s *EmailService) TestSMTPConnectionWithConfig(config *SMTPConfig) error { + addr := fmt.Sprintf("%s:%d", config.Host, config.Port) + + if config.UseTLS { + tlsConfig := &tls.Config{ + ServerName: config.Host, + // 与发送逻辑一致,显式要求 TLS 1.2+。 + MinVersion: tls.VersionTLS12, + } + conn, err := tls.Dial("tcp", addr, tlsConfig) + if err != nil { + return fmt.Errorf("tls connection failed: %w", err) + } + defer func() { _ = conn.Close() }() + + client, err := smtp.NewClient(conn, config.Host) + if err != nil { + return fmt.Errorf("smtp client creation failed: %w", err) + } + defer func() { _ = client.Close() }() + + auth := smtp.PlainAuth("", config.Username, config.Password, config.Host) + if err = client.Auth(auth); err != nil { + return fmt.Errorf("smtp authentication failed: %w", err) + } + + return client.Quit() + } + + // 非TLS连接测试 + client, err := smtp.Dial(addr) + if err != nil { + return fmt.Errorf("smtp connection failed: %w", err) + } + defer func() { _ = client.Close() }() + + auth := smtp.PlainAuth("", config.Username, config.Password, config.Host) + if err = client.Auth(auth); err != nil { + return fmt.Errorf("smtp authentication failed: %w", err) + } + + return client.Quit() +} diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go new file mode 100644 index 00000000..c2dbf7c9 --- /dev/null +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -0,0 +1,1467 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +// testConfig 返回一个用于测试的默认配置 +func testConfig() *config.Config { + return &config.Config{RunMode: config.RunModeStandard} +} + +// mockAccountRepoForPlatform 单平台测试用的 mock +type mockAccountRepoForPlatform struct { + accounts []Account + accountsByID map[int64]*Account + listPlatformFunc func(ctx context.Context, platform string) ([]Account, error) + getByIDCalls int +} + +func (m *mockAccountRepoForPlatform) GetByID(ctx context.Context, id int64) (*Account, error) { + m.getByIDCalls++ + if acc, ok := m.accountsByID[id]; ok { + return acc, nil + } + return nil, errors.New("account not found") +} + +func (m *mockAccountRepoForPlatform) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + var result []*Account + for _, id := range ids { + if acc, ok := m.accountsByID[id]; ok { + result = append(result, acc) + } + } + return result, nil +} + +func (m *mockAccountRepoForPlatform) ExistsByID(ctx context.Context, id int64) (bool, error) { + if m.accountsByID == nil { + return false, nil + } + _, ok := m.accountsByID[id] + return ok, nil +} + +func (m *mockAccountRepoForPlatform) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { + if m.listPlatformFunc != nil { + return m.listPlatformFunc(ctx, platform) + } + var result []Account + for _, acc := range m.accounts { + if acc.Platform == platform && acc.IsSchedulable() { + result = append(result, acc) + } + } + return result, nil +} + +func (m *mockAccountRepoForPlatform) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { + return m.ListSchedulableByPlatform(ctx, platform) +} + +// Stub methods to implement AccountRepository interface +func (m *mockAccountRepoForPlatform) Create(ctx context.Context, account *Account) error { + return nil +} +func (m *mockAccountRepoForPlatform) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) Update(ctx context.Context, account *Account) error { + return nil +} +func (m *mockAccountRepoForPlatform) Delete(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForPlatform) List(ctx context.Context, params pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockAccountRepoForPlatform) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]Account, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockAccountRepoForPlatform) ListByGroup(ctx context.Context, groupID int64) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) ListActive(ctx context.Context) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) UpdateLastUsed(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForPlatform) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + return nil +} +func (m *mockAccountRepoForPlatform) SetError(ctx context.Context, id int64, errorMsg string) error { + return nil +} +func (m *mockAccountRepoForPlatform) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + return nil +} +func (m *mockAccountRepoForPlatform) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + return 0, nil +} +func (m *mockAccountRepoForPlatform) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + return nil +} +func (m *mockAccountRepoForPlatform) ListSchedulable(ctx context.Context) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForPlatform) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]Account, error) { + var result []Account + platformSet := make(map[string]bool) + for _, p := range platforms { + platformSet[p] = true + } + for _, acc := range m.accounts { + if platformSet[acc.Platform] && acc.IsSchedulable() { + result = append(result, acc) + } + } + return result, nil +} +func (m *mockAccountRepoForPlatform) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) { + return m.ListSchedulableByPlatforms(ctx, platforms) +} +func (m *mockAccountRepoForPlatform) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + return nil +} +func (m *mockAccountRepoForPlatform) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + return nil +} +func (m *mockAccountRepoForPlatform) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + return nil +} +func (m *mockAccountRepoForPlatform) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + return nil +} +func (m *mockAccountRepoForPlatform) ClearTempUnschedulable(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForPlatform) ClearRateLimit(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForPlatform) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForPlatform) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + return nil +} +func (m *mockAccountRepoForPlatform) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + return nil +} +func (m *mockAccountRepoForPlatform) BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) { + return 0, nil +} + +// Verify interface implementation +var _ AccountRepository = (*mockAccountRepoForPlatform)(nil) + +// mockGatewayCacheForPlatform 单平台测试用的 cache mock +type mockGatewayCacheForPlatform struct { + sessionBindings map[string]int64 +} + +func (m *mockGatewayCacheForPlatform) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { + if id, ok := m.sessionBindings[sessionHash]; ok { + return id, nil + } + return 0, errors.New("not found") +} + +func (m *mockGatewayCacheForPlatform) SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error { + if m.sessionBindings == nil { + m.sessionBindings = make(map[string]int64) + } + m.sessionBindings[sessionHash] = accountID + return nil +} + +func (m *mockGatewayCacheForPlatform) RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error { + return nil +} + +type mockGroupRepoForGateway struct { + groups map[int64]*Group + getByIDCalls int + getByIDLiteCalls int +} + +func (m *mockGroupRepoForGateway) GetByID(ctx context.Context, id int64) (*Group, error) { + m.getByIDCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (m *mockGroupRepoForGateway) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + m.getByIDLiteCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (m *mockGroupRepoForGateway) Create(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGateway) Update(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGateway) Delete(ctx context.Context, id int64) error { return nil } +func (m *mockGroupRepoForGateway) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGateway) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGateway) ListActive(ctx context.Context) ([]Group, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) ExistsByName(ctx context.Context, name string) (bool, error) { + return false, nil +} +func (m *mockGroupRepoForGateway) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} +func (m *mockGroupRepoForGateway) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} + +func ptr[T any](v T) *T { + return &v +} + +// TestGatewayService_SelectAccountForModelWithPlatform_Anthropic 测试 anthropic 单平台选择 +func TestGatewayService_SelectAccountForModelWithPlatform_Anthropic(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 3, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, // 应被隔离 + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "应选择优先级最高的 anthropic 账户") + require.Equal(t, PlatformAnthropic, acc.Platform, "应只返回 anthropic 平台账户") +} + +// TestGatewayService_SelectAccountForModelWithPlatform_Antigravity 测试 antigravity 单平台选择 +func TestGatewayService_SelectAccountForModelWithPlatform_Antigravity(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, // 应被隔离 + {ID: 2, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAntigravity) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, PlatformAntigravity, acc.Platform, "应只返回 antigravity 平台账户") +} + +// TestGatewayService_SelectAccountForModelWithPlatform_PriorityAndLastUsed 测试优先级和最后使用时间 +func TestGatewayService_SelectAccountForModelWithPlatform_PriorityAndLastUsed(t *testing.T) { + ctx := context.Background() + now := time.Now() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: ptr(now.Add(-1 * time.Hour))}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: ptr(now.Add(-2 * time.Hour))}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级应选择最久未用的账户") +} + +func TestGatewayService_SelectAccountForModelWithPlatform_GeminiOAuthPreference(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeAPIKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") +} + +// TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts 测试无可用账户 +func TestGatewayService_SelectAccountForModelWithPlatform_NoAvailableAccounts(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{}, + accountsByID: map[int64]*Account{}, + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "no available accounts") +} + +// TestGatewayService_SelectAccountForModelWithPlatform_AllExcluded 测试所有账户被排除 +func TestGatewayService_SelectAccountForModelWithPlatform_AllExcluded(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + excludedIDs := map[int64]struct{}{1: {}, 2: {}} + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) +} + +// TestGatewayService_SelectAccountForModelWithPlatform_Schedulability 测试账户可调度性检查 +func TestGatewayService_SelectAccountForModelWithPlatform_Schedulability(t *testing.T) { + ctx := context.Background() + now := time.Now() + + tests := []struct { + name string + accounts []Account + expectedID int64 + }{ + { + name: "过载账户被跳过", + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, OverloadUntil: ptr(now.Add(1 * time.Hour))}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + expectedID: 2, + }, + { + name: "限流账户被跳过", + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, RateLimitResetAt: ptr(now.Add(1 * time.Hour))}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + expectedID: 2, + }, + { + name: "非active账户被跳过", + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: "error", Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + expectedID: 2, + }, + { + name: "schedulable=false被跳过", + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: false}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + expectedID: 2, + }, + { + name: "过期的过载账户可调度", + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, OverloadUntil: ptr(now.Add(-1 * time.Hour))}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + }, + expectedID: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: tt.accounts, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, tt.expectedID, acc.ID) + }) + } +} + +// TestGatewayService_SelectAccountForModelWithPlatform_StickySession 测试粘性会话 +func TestGatewayService_SelectAccountForModelWithPlatform_StickySession(t *testing.T) { + ctx := context.Background() + + t.Run("粘性会话命中-同平台", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "应返回粘性会话绑定的账户") + }) + + t.Run("粘性会话不匹配平台-降级选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true}, // 粘性会话绑定但平台不匹配 + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, // 绑定 antigravity 账户 + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + // 请求 anthropic 平台,但粘性会话绑定的是 antigravity 账户 + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "粘性会话账户平台不匹配,应降级选择同平台账户") + require.Equal(t, PlatformAnthropic, acc.Platform) + }) + + t.Run("粘性会话账户被排除-降级选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + excludedIDs := map[int64]struct{}{1: {}} + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", excludedIDs, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "粘性会话账户被排除,应选择其他账户") + }) + + t.Run("粘性会话账户不可调度-降级选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: "error", Schedulable: true}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 1}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountForModelWithPlatform(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "粘性会话账户不可调度,应选择其他账户") + }) +} + +func TestGatewayService_isModelSupportedByAccount(t *testing.T) { + svc := &GatewayService{} + + tests := []struct { + name string + account *Account + model string + expected bool + }{ + { + name: "Antigravity平台-支持claude模型", + account: &Account{Platform: PlatformAntigravity}, + model: "claude-3-5-sonnet-20241022", + expected: true, + }, + { + name: "Antigravity平台-支持gemini模型", + account: &Account{Platform: PlatformAntigravity}, + model: "gemini-2.5-flash", + expected: true, + }, + { + name: "Antigravity平台-不支持gpt模型", + account: &Account{Platform: PlatformAntigravity}, + model: "gpt-4", + expected: false, + }, + { + name: "Anthropic平台-无映射配置-支持所有模型", + account: &Account{Platform: PlatformAnthropic}, + model: "claude-3-5-sonnet-20241022", + expected: true, + }, + { + name: "Anthropic平台-有映射配置-只支持配置的模型", + account: &Account{ + Platform: PlatformAnthropic, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-opus-4": "x"}}, + }, + model: "claude-3-5-sonnet-20241022", + expected: false, + }, + { + name: "Anthropic平台-有映射配置-支持配置的模型", + account: &Account{ + Platform: PlatformAnthropic, + Credentials: map[string]any{"model_mapping": map[string]any{"claude-3-5-sonnet-20241022": "x"}}, + }, + model: "claude-3-5-sonnet-20241022", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.isModelSupportedByAccount(tt.account, tt.model) + require.Equal(t, tt.expected, got) + }) + } +} + +// TestGatewayService_selectAccountWithMixedScheduling 测试混合调度 +func TestGatewayService_selectAccountWithMixedScheduling(t *testing.T) { + ctx := context.Background() + + t.Run("混合调度-Gemini优先选择OAuth账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeAPIKey}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true, Type: AccountTypeOAuth}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "gemini-2.5-pro", nil, PlatformGemini) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级且未使用时应优先选择OAuth账户") + }) + + t.Run("混合调度-包含启用mixed_scheduling的antigravity账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "应选择优先级最高的账户(包含启用混合调度的antigravity)") + }) + + t.Run("混合调度-过滤未启用mixed_scheduling的antigravity账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, // 未启用 mixed_scheduling + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "未启用mixed_scheduling的antigravity账户应被过滤") + require.Equal(t, PlatformAnthropic, acc.Platform) + }) + + t.Run("混合调度-粘性会话命中启用mixed_scheduling的antigravity账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 2}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "应返回粘性会话绑定的启用mixed_scheduling的antigravity账户") + }) + + t.Run("混合调度-粘性会话命中未启用mixed_scheduling的antigravity账户-降级选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true}, // 未启用 mixed_scheduling + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"session-123": 2}, + } + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "session-123", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "粘性会话绑定的账户未启用mixed_scheduling,应降级选择anthropic账户") + }) + + t.Run("混合调度-仅有启用mixed_scheduling的antigravity账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true, Extra: map[string]any{"mixed_scheduling": true}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID) + require.Equal(t, PlatformAntigravity, acc.Platform) + }) + + t.Run("混合调度-无可用账户", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, // 未启用 mixed_scheduling + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: testConfig(), + } + + acc, err := svc.selectAccountWithMixedScheduling(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, PlatformAnthropic) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "no available accounts") + }) +} + +// TestAccount_IsMixedSchedulingEnabled 测试混合调度开关检查 +func TestAccount_IsMixedSchedulingEnabled(t *testing.T) { + tests := []struct { + name string + account Account + expected bool + }{ + { + name: "非antigravity平台-返回false", + account: Account{Platform: PlatformAnthropic}, + expected: false, + }, + { + name: "antigravity平台-无extra-返回false", + account: Account{Platform: PlatformAntigravity}, + expected: false, + }, + { + name: "antigravity平台-extra无mixed_scheduling-返回false", + account: Account{Platform: PlatformAntigravity, Extra: map[string]any{}}, + expected: false, + }, + { + name: "antigravity平台-mixed_scheduling=false-返回false", + account: Account{Platform: PlatformAntigravity, Extra: map[string]any{"mixed_scheduling": false}}, + expected: false, + }, + { + name: "antigravity平台-mixed_scheduling=true-返回true", + account: Account{Platform: PlatformAntigravity, Extra: map[string]any{"mixed_scheduling": true}}, + expected: true, + }, + { + name: "antigravity平台-mixed_scheduling非bool类型-返回false", + account: Account{Platform: PlatformAntigravity, Extra: map[string]any{"mixed_scheduling": "true"}}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.account.IsMixedSchedulingEnabled() + require.Equal(t, tt.expected, got) + }) + } +} + +// mockConcurrencyService for testing +type mockConcurrencyService struct { + accountLoads map[int64]*AccountLoadInfo + accountWaitCounts map[int64]int + acquireResults map[int64]bool +} + +func (m *mockConcurrencyService) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + if m.accountLoads == nil { + return map[int64]*AccountLoadInfo{}, nil + } + result := make(map[int64]*AccountLoadInfo) + for _, acc := range accounts { + if load, ok := m.accountLoads[acc.ID]; ok { + result[acc.ID] = load + } else { + result[acc.ID] = &AccountLoadInfo{ + AccountID: acc.ID, + CurrentConcurrency: 0, + WaitingCount: 0, + LoadRate: 0, + } + } + } + return result, nil +} + +func (m *mockConcurrencyService) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + if m.accountWaitCounts == nil { + return 0, nil + } + return m.accountWaitCounts[accountID], nil +} + +type mockConcurrencyCache struct { + acquireAccountCalls int + loadBatchCalls int +} + +func (m *mockConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + m.acquireAccountCalls++ + return true, nil +} + +func (m *mockConcurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) DecrementAccountWaitCount(ctx context.Context, accountID int64) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) ReleaseUserSlot(ctx context.Context, userID int64, requestID string) error { + return nil +} + +func (m *mockConcurrencyCache) GetUserConcurrency(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) DecrementWaitCount(ctx context.Context, userID int64) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + m.loadBatchCalls++ + result := make(map[int64]*AccountLoadInfo, len(accounts)) + for _, acc := range accounts { + result[acc.ID] = &AccountLoadInfo{ + AccountID: acc.ID, + CurrentConcurrency: 0, + WaitingCount: 0, + LoadRate: 0, + } + } + return result, nil +} + +func (m *mockConcurrencyCache) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + return nil +} + +// TestGatewayService_SelectAccountWithLoadAwareness tests load-aware account selection +func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { + ctx := context.Background() + + t.Run("禁用负载批量查询-降级到传统选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, // No concurrency service + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID, "应选择优先级最高的账号") + }) + + t.Run("无ConcurrencyService-降级到传统选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应选择优先级最高的账号") + }) + + t.Run("排除账号-不选择被排除的账号", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + excludedIDs := map[int64]struct{}{1: {}} + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "不应选择被排除的账号") + }) + + t.Run("粘性命中-不调用GetByID", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "sticky", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID) + require.Equal(t, 0, repo.getByIDCalls, "粘性命中不应调用GetByID") + require.Equal(t, 0, concurrencyCache.loadBatchCalls, "粘性命中应在负载批量查询前返回") + }) + + t.Run("粘性账号不在候选集-回退负载感知选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "sticky", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "粘性账号不在候选集时应回退到可用账号") + require.Equal(t, 0, repo.getByIDCalls, "粘性账号缺失不应回退到GetByID") + require.Equal(t, 1, concurrencyCache.loadBatchCalls, "应继续进行负载批量查询") + }) + + t.Run("无可用账号-返回错误", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{}, + accountsByID: map[int64]*Account{}, + } + + cache := &mockGatewayCacheForPlatform{} + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "no available accounts") + }) + + t.Run("过滤不可调度账号-限流账号被跳过", func(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, RateLimitResetAt: &resetAt}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应跳过限流账号,选择可用账号") + }) + + t.Run("过滤不可调度账号-过载账号被跳过", func(t *testing.T) { + now := time.Now() + overloadUntil := now.Add(10 * time.Minute) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, OverloadUntil: &overloadUntil}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应跳过过载账号,选择可用账号") + }) +} + +func TestGatewayService_GroupResolution_ReusesContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(42) + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{groupID: group}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 0, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_GroupResolution_IgnoresInvalidContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(42) + ctxGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + ctx = context.WithValue(ctx, ctxkey.Group, ctxGroup) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{groupID: group}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_GroupContext_OverwritesInvalidContextGroup(t *testing.T) { + groupID := int64(42) + invalidGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + hydratedGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + + ctx := context.WithValue(context.Background(), ctxkey.Group, invalidGroup) + svc := &GatewayService{} + ctx = svc.withGroupContext(ctx, hydratedGroup) + + got, ok := ctx.Value(ctxkey.Group).(*Group) + require.True(t, ok) + require.Same(t, hydratedGroup, got) +} + +func TestGatewayService_GroupResolution_FallbackUsesLiteOnce(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + fallbackID := int64(11) + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &fallbackID, + Hydrated: true, + } + fallbackGroup := &Group{ + ID: fallbackID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{fallbackID: fallbackGroup}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_ResolveGatewayGroup_DetectsFallbackCycle(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + fallbackID := int64(11) + + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &fallbackID, + } + fallbackGroup := &Group{ + ID: fallbackID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &groupID, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: group, + fallbackID: fallbackGroup, + }, + } + + svc := &GatewayService{ + groupRepo: groupRepo, + } + + gotGroup, gotID, err := svc.resolveGatewayGroup(ctx, &groupID) + require.Error(t, err) + require.Nil(t, gotGroup) + require.Nil(t, gotID) + require.Contains(t, err.Error(), "fallback group cycle") +} diff --git a/backend/internal/service/gateway_prompt_test.go b/backend/internal/service/gateway_prompt_test.go new file mode 100644 index 00000000..b056f8fa --- /dev/null +++ b/backend/internal/service/gateway_prompt_test.go @@ -0,0 +1,233 @@ +package service + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsClaudeCodeClient(t *testing.T) { + tests := []struct { + name string + userAgent string + metadataUserID string + want bool + }{ + { + name: "Claude Code client", + userAgent: "claude-cli/1.0.62 (darwin; arm64)", + metadataUserID: "session_123e4567-e89b-12d3-a456-426614174000", + want: true, + }, + { + name: "Claude Code without version suffix", + userAgent: "claude-cli/2.0.0", + metadataUserID: "session_abc", + want: true, + }, + { + name: "Missing metadata user_id", + userAgent: "claude-cli/1.0.0", + metadataUserID: "", + want: false, + }, + { + name: "Different user agent", + userAgent: "curl/7.68.0", + metadataUserID: "user123", + want: false, + }, + { + name: "Empty user agent", + userAgent: "", + metadataUserID: "user123", + want: false, + }, + { + name: "Similar but not Claude CLI", + userAgent: "claude-api/1.0.0", + metadataUserID: "user123", + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isClaudeCodeClient(tt.userAgent, tt.metadataUserID) + require.Equal(t, tt.want, got) + }) + } +} + +func TestSystemIncludesClaudeCodePrompt(t *testing.T) { + tests := []struct { + name string + system any + want bool + }{ + { + name: "nil system", + system: nil, + want: false, + }, + { + name: "empty string", + system: "", + want: false, + }, + { + name: "string with Claude Code prompt", + system: claudeCodeSystemPrompt, + want: true, + }, + { + name: "string with different content", + system: "You are a helpful assistant.", + want: false, + }, + { + name: "empty array", + system: []any{}, + want: false, + }, + { + name: "array with Claude Code prompt", + system: []any{ + map[string]any{ + "type": "text", + "text": claudeCodeSystemPrompt, + }, + }, + want: true, + }, + { + name: "array with Claude Code prompt in second position", + system: []any{ + map[string]any{"type": "text", "text": "First prompt"}, + map[string]any{"type": "text", "text": claudeCodeSystemPrompt}, + }, + want: true, + }, + { + name: "array without Claude Code prompt", + system: []any{ + map[string]any{"type": "text", "text": "Custom prompt"}, + }, + want: false, + }, + { + name: "array with partial match (should not match)", + system: []any{ + map[string]any{"type": "text", "text": "You are Claude"}, + }, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := systemIncludesClaudeCodePrompt(tt.system) + require.Equal(t, tt.want, got) + }) + } +} + +func TestInjectClaudeCodePrompt(t *testing.T) { + tests := []struct { + name string + body string + system any + wantSystemLen int + wantFirstText string + wantSecondText string + }{ + { + name: "nil system", + body: `{"model":"claude-3"}`, + system: nil, + wantSystemLen: 1, + wantFirstText: claudeCodeSystemPrompt, + }, + { + name: "empty string system", + body: `{"model":"claude-3"}`, + system: "", + wantSystemLen: 1, + wantFirstText: claudeCodeSystemPrompt, + }, + { + name: "string system", + body: `{"model":"claude-3"}`, + system: "Custom prompt", + wantSystemLen: 2, + wantFirstText: claudeCodeSystemPrompt, + wantSecondText: "Custom prompt", + }, + { + name: "string system equals Claude Code prompt", + body: `{"model":"claude-3"}`, + system: claudeCodeSystemPrompt, + wantSystemLen: 1, + wantFirstText: claudeCodeSystemPrompt, + }, + { + name: "array system", + body: `{"model":"claude-3"}`, + system: []any{map[string]any{"type": "text", "text": "Custom"}}, + // Claude Code + Custom = 2 + wantSystemLen: 2, + wantFirstText: claudeCodeSystemPrompt, + wantSecondText: "Custom", + }, + { + name: "array system with existing Claude Code prompt (should dedupe)", + body: `{"model":"claude-3"}`, + system: []any{ + map[string]any{"type": "text", "text": claudeCodeSystemPrompt}, + map[string]any{"type": "text", "text": "Other"}, + }, + // Claude Code at start + Other = 2 (deduped) + wantSystemLen: 2, + wantFirstText: claudeCodeSystemPrompt, + wantSecondText: "Other", + }, + { + name: "empty array", + body: `{"model":"claude-3"}`, + system: []any{}, + wantSystemLen: 1, + wantFirstText: claudeCodeSystemPrompt, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := injectClaudeCodePrompt([]byte(tt.body), tt.system) + + var parsed map[string]any + err := json.Unmarshal(result, &parsed) + require.NoError(t, err) + + system, ok := parsed["system"].([]any) + require.True(t, ok, "system should be an array") + require.Len(t, system, tt.wantSystemLen) + + first, ok := system[0].(map[string]any) + require.True(t, ok) + require.Equal(t, tt.wantFirstText, first["text"]) + require.Equal(t, "text", first["type"]) + + // Check cache_control + cc, ok := first["cache_control"].(map[string]any) + require.True(t, ok) + require.Equal(t, "ephemeral", cc["type"]) + + if tt.wantSecondText != "" && len(system) > 1 { + second, ok := system[1].(map[string]any) + require.True(t, ok) + require.Equal(t, tt.wantSecondText, second["text"]) + } + }) + } +} diff --git a/backend/internal/service/gateway_request.go b/backend/internal/service/gateway_request.go new file mode 100644 index 00000000..aa48d880 --- /dev/null +++ b/backend/internal/service/gateway_request.go @@ -0,0 +1,505 @@ +package service + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// ParsedRequest 保存网关请求的预解析结果 +// +// 性能优化说明: +// 原实现在多个位置重复解析请求体(Handler、Service 各解析一次): +// 1. gateway_handler.go 解析获取 model 和 stream +// 2. gateway_service.go 再次解析获取 system、messages、metadata +// 3. GenerateSessionHash 又一次解析获取会话哈希所需字段 +// +// 新实现一次解析,多处复用: +// 1. 在 Handler 层统一调用 ParseGatewayRequest 一次性解析 +// 2. 将解析结果 ParsedRequest 传递给 Service 层 +// 3. 避免重复 json.Unmarshal,减少 CPU 和内存开销 +type ParsedRequest struct { + Body []byte // 原始请求体(保留用于转发) + Model string // 请求的模型名称 + Stream bool // 是否为流式请求 + MetadataUserID string // metadata.user_id(用于会话亲和) + System any // system 字段内容 + Messages []any // messages 数组 + HasSystem bool // 是否包含 system 字段(包含 null 也视为显式传入) +} + +// ParseGatewayRequest 解析网关请求体并返回结构化结果 +// 性能优化:一次解析提取所有需要的字段,避免重复 Unmarshal +func ParseGatewayRequest(body []byte) (*ParsedRequest, error) { + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return nil, err + } + + parsed := &ParsedRequest{ + Body: body, + } + + if rawModel, exists := req["model"]; exists { + model, ok := rawModel.(string) + if !ok { + return nil, fmt.Errorf("invalid model field type") + } + parsed.Model = model + } + if rawStream, exists := req["stream"]; exists { + stream, ok := rawStream.(bool) + if !ok { + return nil, fmt.Errorf("invalid stream field type") + } + parsed.Stream = stream + } + if metadata, ok := req["metadata"].(map[string]any); ok { + if userID, ok := metadata["user_id"].(string); ok { + parsed.MetadataUserID = userID + } + } + // system 字段只要存在就视为显式提供(即使为 null), + // 以避免客户端传 null 时被默认 system 误注入。 + if system, ok := req["system"]; ok { + parsed.HasSystem = true + parsed.System = system + } + if messages, ok := req["messages"].([]any); ok { + parsed.Messages = messages + } + + return parsed, nil +} + +// FilterThinkingBlocks removes thinking blocks from request body +// Returns filtered body or original body if filtering fails (fail-safe) +// This prevents 400 errors from invalid thinking block signatures +// +// Strategy: +// - When thinking.type != "enabled": Remove all thinking blocks +// - When thinking.type == "enabled": Only remove thinking blocks without valid signatures +// (blocks with missing/empty/dummy signatures that would cause 400 errors) +func FilterThinkingBlocks(body []byte) []byte { + return filterThinkingBlocksInternal(body, false) +} + +// FilterThinkingBlocksForRetry strips thinking-related constructs for retry scenarios. +// +// Why: +// - Upstreams may reject historical `thinking`/`redacted_thinking` blocks due to invalid/missing signatures. +// - Anthropic extended thinking has a structural constraint: when top-level `thinking` is enabled and the +// final message is an assistant prefill, the assistant content must start with a thinking block. +// - If we remove thinking blocks but keep top-level `thinking` enabled, we can trigger: +// "Expected `thinking` or `redacted_thinking`, but found `text`" +// +// Strategy (B: preserve content as text): +// - Disable top-level `thinking` (remove `thinking` field). +// - Convert `thinking` blocks to `text` blocks (preserve the thinking content). +// - Remove `redacted_thinking` blocks (cannot be converted to text). +// - Ensure no message ends up with empty content. +func FilterThinkingBlocksForRetry(body []byte) []byte { + hasThinkingContent := bytes.Contains(body, []byte(`"type":"thinking"`)) || + bytes.Contains(body, []byte(`"type": "thinking"`)) || + bytes.Contains(body, []byte(`"type":"redacted_thinking"`)) || + bytes.Contains(body, []byte(`"type": "redacted_thinking"`)) || + bytes.Contains(body, []byte(`"thinking":`)) || + bytes.Contains(body, []byte(`"thinking" :`)) + + // Also check for empty content arrays that need fixing. + // Note: This is a heuristic check; the actual empty content handling is done below. + hasEmptyContent := bytes.Contains(body, []byte(`"content":[]`)) || + bytes.Contains(body, []byte(`"content": []`)) || + bytes.Contains(body, []byte(`"content" : []`)) || + bytes.Contains(body, []byte(`"content" :[]`)) + + // Fast path: nothing to process + if !hasThinkingContent && !hasEmptyContent { + return body + } + + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body + } + + modified := false + + messages, ok := req["messages"].([]any) + if !ok { + return body + } + + // Disable top-level thinking mode for retry to avoid structural/signature constraints upstream. + if _, exists := req["thinking"]; exists { + delete(req, "thinking") + modified = true + } + + newMessages := make([]any, 0, len(messages)) + + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + newMessages = append(newMessages, msg) + continue + } + + role, _ := msgMap["role"].(string) + content, ok := msgMap["content"].([]any) + if !ok { + // String content or other format - keep as is + newMessages = append(newMessages, msg) + continue + } + + newContent := make([]any, 0, len(content)) + modifiedThisMsg := false + + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + newContent = append(newContent, block) + continue + } + + blockType, _ := blockMap["type"].(string) + + // Convert thinking blocks to text (preserve content) and drop redacted_thinking. + switch blockType { + case "thinking": + modifiedThisMsg = true + thinkingText, _ := blockMap["thinking"].(string) + if thinkingText == "" { + continue + } + newContent = append(newContent, map[string]any{ + "type": "text", + "text": thinkingText, + }) + continue + case "redacted_thinking": + modifiedThisMsg = true + continue + } + + // Handle blocks without type discriminator but with a "thinking" field. + if blockType == "" { + if rawThinking, hasThinking := blockMap["thinking"]; hasThinking { + modifiedThisMsg = true + switch v := rawThinking.(type) { + case string: + if v != "" { + newContent = append(newContent, map[string]any{"type": "text", "text": v}) + } + default: + if b, err := json.Marshal(v); err == nil && len(b) > 0 { + newContent = append(newContent, map[string]any{"type": "text", "text": string(b)}) + } + } + continue + } + } + + newContent = append(newContent, block) + } + + // Handle empty content: either from filtering or originally empty + if len(newContent) == 0 { + modified = true + placeholder := "(content removed)" + if role == "assistant" { + placeholder = "(assistant content removed)" + } + newContent = append(newContent, map[string]any{ + "type": "text", + "text": placeholder, + }) + msgMap["content"] = newContent + } else if modifiedThisMsg { + modified = true + msgMap["content"] = newContent + } + newMessages = append(newMessages, msgMap) + } + + if modified { + req["messages"] = newMessages + } else { + // Avoid rewriting JSON when no changes are needed. + return body + } + + newBody, err := json.Marshal(req) + if err != nil { + return body + } + return newBody +} + +// FilterSignatureSensitiveBlocksForRetry is a stronger retry filter for cases where upstream errors indicate +// signature/thought_signature validation issues involving tool blocks. +// +// This performs everything in FilterThinkingBlocksForRetry, plus: +// - Convert `tool_use` blocks to text (name/id/input) so we stop sending structured tool calls. +// - Convert `tool_result` blocks to text so we keep tool results visible without tool semantics. +// +// Use this only when needed: converting tool blocks to text changes model behaviour and can increase the +// risk of prompt injection (tool output becomes plain conversation text). +func FilterSignatureSensitiveBlocksForRetry(body []byte) []byte { + // Fast path: only run when we see likely relevant constructs. + if !bytes.Contains(body, []byte(`"type":"thinking"`)) && + !bytes.Contains(body, []byte(`"type": "thinking"`)) && + !bytes.Contains(body, []byte(`"type":"redacted_thinking"`)) && + !bytes.Contains(body, []byte(`"type": "redacted_thinking"`)) && + !bytes.Contains(body, []byte(`"type":"tool_use"`)) && + !bytes.Contains(body, []byte(`"type": "tool_use"`)) && + !bytes.Contains(body, []byte(`"type":"tool_result"`)) && + !bytes.Contains(body, []byte(`"type": "tool_result"`)) && + !bytes.Contains(body, []byte(`"thinking":`)) && + !bytes.Contains(body, []byte(`"thinking" :`)) { + return body + } + + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body + } + + modified := false + + // Disable top-level thinking for retry to avoid structural/signature constraints upstream. + if _, exists := req["thinking"]; exists { + delete(req, "thinking") + modified = true + } + + messages, ok := req["messages"].([]any) + if !ok { + return body + } + + newMessages := make([]any, 0, len(messages)) + + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + newMessages = append(newMessages, msg) + continue + } + + role, _ := msgMap["role"].(string) + content, ok := msgMap["content"].([]any) + if !ok { + newMessages = append(newMessages, msg) + continue + } + + newContent := make([]any, 0, len(content)) + modifiedThisMsg := false + + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + newContent = append(newContent, block) + continue + } + + blockType, _ := blockMap["type"].(string) + switch blockType { + case "thinking": + modifiedThisMsg = true + thinkingText, _ := blockMap["thinking"].(string) + if thinkingText == "" { + continue + } + newContent = append(newContent, map[string]any{"type": "text", "text": thinkingText}) + continue + case "redacted_thinking": + modifiedThisMsg = true + continue + case "tool_use": + modifiedThisMsg = true + name, _ := blockMap["name"].(string) + id, _ := blockMap["id"].(string) + input := blockMap["input"] + inputJSON, _ := json.Marshal(input) + text := "(tool_use)" + if name != "" { + text += " name=" + name + } + if id != "" { + text += " id=" + id + } + if len(inputJSON) > 0 && string(inputJSON) != "null" { + text += " input=" + string(inputJSON) + } + newContent = append(newContent, map[string]any{"type": "text", "text": text}) + continue + case "tool_result": + modifiedThisMsg = true + toolUseID, _ := blockMap["tool_use_id"].(string) + isError, _ := blockMap["is_error"].(bool) + content := blockMap["content"] + contentJSON, _ := json.Marshal(content) + text := "(tool_result)" + if toolUseID != "" { + text += " tool_use_id=" + toolUseID + } + if isError { + text += " is_error=true" + } + if len(contentJSON) > 0 && string(contentJSON) != "null" { + text += "\n" + string(contentJSON) + } + newContent = append(newContent, map[string]any{"type": "text", "text": text}) + continue + } + + if blockType == "" { + if rawThinking, hasThinking := blockMap["thinking"]; hasThinking { + modifiedThisMsg = true + switch v := rawThinking.(type) { + case string: + if v != "" { + newContent = append(newContent, map[string]any{"type": "text", "text": v}) + } + default: + if b, err := json.Marshal(v); err == nil && len(b) > 0 { + newContent = append(newContent, map[string]any{"type": "text", "text": string(b)}) + } + } + continue + } + } + + newContent = append(newContent, block) + } + + if modifiedThisMsg { + modified = true + if len(newContent) == 0 { + placeholder := "(content removed)" + if role == "assistant" { + placeholder = "(assistant content removed)" + } + newContent = append(newContent, map[string]any{"type": "text", "text": placeholder}) + } + msgMap["content"] = newContent + } + + newMessages = append(newMessages, msgMap) + } + + if !modified { + return body + } + + req["messages"] = newMessages + newBody, err := json.Marshal(req) + if err != nil { + return body + } + return newBody +} + +// filterThinkingBlocksInternal removes invalid thinking blocks from request +// Strategy: +// - When thinking.type != "enabled": Remove all thinking blocks +// - When thinking.type == "enabled": Only remove thinking blocks without valid signatures +func filterThinkingBlocksInternal(body []byte, _ bool) []byte { + // Fast path: if body doesn't contain "thinking", skip parsing + if !bytes.Contains(body, []byte(`"type":"thinking"`)) && + !bytes.Contains(body, []byte(`"type": "thinking"`)) && + !bytes.Contains(body, []byte(`"type":"redacted_thinking"`)) && + !bytes.Contains(body, []byte(`"type": "redacted_thinking"`)) && + !bytes.Contains(body, []byte(`"thinking":`)) && + !bytes.Contains(body, []byte(`"thinking" :`)) { + return body + } + + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body + } + + // Check if thinking is enabled + thinkingEnabled := false + if thinking, ok := req["thinking"].(map[string]any); ok { + if thinkType, ok := thinking["type"].(string); ok && thinkType == "enabled" { + thinkingEnabled = true + } + } + + messages, ok := req["messages"].([]any) + if !ok { + return body + } + + filtered := false + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + continue + } + + role, _ := msgMap["role"].(string) + content, ok := msgMap["content"].([]any) + if !ok { + continue + } + + newContent := make([]any, 0, len(content)) + filteredThisMessage := false + + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + newContent = append(newContent, block) + continue + } + + blockType, _ := blockMap["type"].(string) + + if blockType == "thinking" || blockType == "redacted_thinking" { + // When thinking is enabled and this is an assistant message, + // only keep thinking blocks with valid signatures + if thinkingEnabled && role == "assistant" { + signature, _ := blockMap["signature"].(string) + if signature != "" && signature != "skip_thought_signature_validator" { + newContent = append(newContent, block) + continue + } + } + filtered = true + filteredThisMessage = true + continue + } + + // Handle blocks without type discriminator but with "thinking" key + if blockType == "" { + if _, hasThinking := blockMap["thinking"]; hasThinking { + filtered = true + filteredThisMessage = true + continue + } + } + + newContent = append(newContent, block) + } + + if filteredThisMessage { + msgMap["content"] = newContent + } + } + + if !filtered { + return body + } + + newBody, err := json.Marshal(req) + if err != nil { + return body + } + return newBody +} diff --git a/backend/internal/service/gateway_request_test.go b/backend/internal/service/gateway_request_test.go new file mode 100644 index 00000000..f92496fb --- /dev/null +++ b/backend/internal/service/gateway_request_test.go @@ -0,0 +1,298 @@ +package service + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseGatewayRequest(t *testing.T) { + body := []byte(`{"model":"claude-3-7-sonnet","stream":true,"metadata":{"user_id":"session_123e4567-e89b-12d3-a456-426614174000"},"system":[{"type":"text","text":"hello","cache_control":{"type":"ephemeral"}}],"messages":[{"content":"hi"}]}`) + parsed, err := ParseGatewayRequest(body) + require.NoError(t, err) + require.Equal(t, "claude-3-7-sonnet", parsed.Model) + require.True(t, parsed.Stream) + require.Equal(t, "session_123e4567-e89b-12d3-a456-426614174000", parsed.MetadataUserID) + require.True(t, parsed.HasSystem) + require.NotNil(t, parsed.System) + require.Len(t, parsed.Messages, 1) +} + +func TestParseGatewayRequest_SystemNull(t *testing.T) { + body := []byte(`{"model":"claude-3","system":null}`) + parsed, err := ParseGatewayRequest(body) + require.NoError(t, err) + // 显式传入 system:null 也应视为“字段已存在”,避免默认 system 被注入。 + require.True(t, parsed.HasSystem) + require.Nil(t, parsed.System) +} + +func TestParseGatewayRequest_InvalidModelType(t *testing.T) { + body := []byte(`{"model":123}`) + _, err := ParseGatewayRequest(body) + require.Error(t, err) +} + +func TestParseGatewayRequest_InvalidStreamType(t *testing.T) { + body := []byte(`{"stream":"true"}`) + _, err := ParseGatewayRequest(body) + require.Error(t, err) +} + +func TestFilterThinkingBlocks(t *testing.T) { + containsThinkingBlock := func(body []byte) bool { + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return false + } + messages, ok := req["messages"].([]any) + if !ok { + return false + } + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + continue + } + content, ok := msgMap["content"].([]any) + if !ok { + continue + } + for _, block := range content { + blockMap, ok := block.(map[string]any) + if !ok { + continue + } + blockType, _ := blockMap["type"].(string) + if blockType == "thinking" { + return true + } + if blockType == "" { + if _, hasThinking := blockMap["thinking"]; hasThinking { + return true + } + } + } + } + return false + } + + tests := []struct { + name string + input string + shouldFilter bool + expectError bool + }{ + { + name: "filters thinking blocks", + input: `{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"user","content":[{"type":"text","text":"Hello"},{"type":"thinking","thinking":"internal","signature":"invalid"},{"type":"text","text":"World"}]}]}`, + shouldFilter: true, + }, + { + name: "handles no thinking blocks", + input: `{"model":"claude-3-5-sonnet-20241022","messages":[{"role":"user","content":[{"type":"text","text":"Hello"}]}]}`, + shouldFilter: false, + }, + { + name: "handles invalid JSON gracefully", + input: `{invalid json`, + shouldFilter: false, + expectError: true, + }, + { + name: "handles multiple messages with thinking blocks", + input: `{"messages":[{"role":"user","content":[{"type":"text","text":"A"}]},{"role":"assistant","content":[{"type":"thinking","thinking":"think"},{"type":"text","text":"B"}]}]}`, + shouldFilter: true, + }, + { + name: "filters thinking blocks without type discriminator", + input: `{"messages":[{"role":"assistant","content":[{"thinking":{"text":"internal"}},{"type":"text","text":"B"}]}]}`, + shouldFilter: true, + }, + { + name: "does not filter tool_use input fields named thinking", + input: `{"messages":[{"role":"user","content":[{"type":"tool_use","id":"t1","name":"foo","input":{"thinking":"keepme","x":1}},{"type":"text","text":"Hello"}]}]}`, + shouldFilter: false, + }, + { + name: "handles empty messages array", + input: `{"messages":[]}`, + shouldFilter: false, + }, + { + name: "handles missing messages field", + input: `{"model":"claude-3"}`, + shouldFilter: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := FilterThinkingBlocks([]byte(tt.input)) + + if tt.expectError { + // For invalid JSON, should return original + require.Equal(t, tt.input, string(result)) + return + } + + if tt.shouldFilter { + require.False(t, containsThinkingBlock(result)) + } else { + // Ensure we don't rewrite JSON when no filtering is needed. + require.Equal(t, tt.input, string(result)) + } + + // Verify valid JSON returned (unless input was invalid) + var parsed map[string]any + err := json.Unmarshal(result, &parsed) + require.NoError(t, err) + }) + } +} + +func TestFilterThinkingBlocksForRetry_DisablesThinkingAndPreservesAsText(t *testing.T) { + input := []byte(`{ + "model":"claude-3-5-sonnet-20241022", + "thinking":{"type":"enabled","budget_tokens":1024}, + "messages":[ + {"role":"user","content":[{"type":"text","text":"Hi"}]}, + {"role":"assistant","content":[ + {"type":"thinking","thinking":"Let me think...","signature":"bad_sig"}, + {"type":"text","text":"Answer"} + ]} + ] + }`) + + out := FilterThinkingBlocksForRetry(input) + + var req map[string]any + require.NoError(t, json.Unmarshal(out, &req)) + _, hasThinking := req["thinking"] + require.False(t, hasThinking) + + msgs, ok := req["messages"].([]any) + require.True(t, ok) + require.Len(t, msgs, 2) + + assistant, ok := msgs[1].(map[string]any) + require.True(t, ok) + content, ok := assistant["content"].([]any) + require.True(t, ok) + require.Len(t, content, 2) + + first, ok := content[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", first["type"]) + require.Equal(t, "Let me think...", first["text"]) +} + +func TestFilterThinkingBlocksForRetry_DisablesThinkingEvenWithoutThinkingBlocks(t *testing.T) { + input := []byte(`{ + "model":"claude-3-5-sonnet-20241022", + "thinking":{"type":"enabled","budget_tokens":1024}, + "messages":[ + {"role":"user","content":[{"type":"text","text":"Hi"}]}, + {"role":"assistant","content":[{"type":"text","text":"Prefill"}]} + ] + }`) + + out := FilterThinkingBlocksForRetry(input) + + var req map[string]any + require.NoError(t, json.Unmarshal(out, &req)) + _, hasThinking := req["thinking"] + require.False(t, hasThinking) +} + +func TestFilterThinkingBlocksForRetry_RemovesRedactedThinkingAndKeepsValidContent(t *testing.T) { + input := []byte(`{ + "thinking":{"type":"enabled","budget_tokens":1024}, + "messages":[ + {"role":"assistant","content":[ + {"type":"redacted_thinking","data":"..."}, + {"type":"text","text":"Visible"} + ]} + ] + }`) + + out := FilterThinkingBlocksForRetry(input) + + var req map[string]any + require.NoError(t, json.Unmarshal(out, &req)) + _, hasThinking := req["thinking"] + require.False(t, hasThinking) + + msgs, ok := req["messages"].([]any) + require.True(t, ok) + msg0, ok := msgs[0].(map[string]any) + require.True(t, ok) + content, ok := msg0["content"].([]any) + require.True(t, ok) + require.Len(t, content, 1) + content0, ok := content[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", content0["type"]) + require.Equal(t, "Visible", content0["text"]) +} + +func TestFilterThinkingBlocksForRetry_EmptyContentGetsPlaceholder(t *testing.T) { + input := []byte(`{ + "thinking":{"type":"enabled"}, + "messages":[ + {"role":"assistant","content":[{"type":"redacted_thinking","data":"..."}]} + ] + }`) + + out := FilterThinkingBlocksForRetry(input) + + var req map[string]any + require.NoError(t, json.Unmarshal(out, &req)) + msgs, ok := req["messages"].([]any) + require.True(t, ok) + msg0, ok := msgs[0].(map[string]any) + require.True(t, ok) + content, ok := msg0["content"].([]any) + require.True(t, ok) + require.Len(t, content, 1) + content0, ok := content[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", content0["type"]) + require.NotEmpty(t, content0["text"]) +} + +func TestFilterSignatureSensitiveBlocksForRetry_DowngradesTools(t *testing.T) { + input := []byte(`{ + "thinking":{"type":"enabled","budget_tokens":1024}, + "messages":[ + {"role":"assistant","content":[ + {"type":"tool_use","id":"t1","name":"Bash","input":{"command":"ls"}}, + {"type":"tool_result","tool_use_id":"t1","content":"ok","is_error":false} + ]} + ] + }`) + + out := FilterSignatureSensitiveBlocksForRetry(input) + + var req map[string]any + require.NoError(t, json.Unmarshal(out, &req)) + _, hasThinking := req["thinking"] + require.False(t, hasThinking) + + msgs, ok := req["messages"].([]any) + require.True(t, ok) + msg0, ok := msgs[0].(map[string]any) + require.True(t, ok) + content, ok := msg0["content"].([]any) + require.True(t, ok) + require.Len(t, content, 2) + content0, ok := content[0].(map[string]any) + require.True(t, ok) + content1, ok := content[1].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", content0["type"]) + require.Equal(t, "text", content1["type"]) + require.Contains(t, content0["text"], "tool_use") + require.Contains(t, content1["text"], "tool_result") +} diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go new file mode 100644 index 00000000..b552f030 --- /dev/null +++ b/backend/internal/service/gateway_service.go @@ -0,0 +1,3031 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "regexp" + "sort" + "strings" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/claude" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + + "github.com/gin-gonic/gin" +) + +const ( + claudeAPIURL = "https://api.anthropic.com/v1/messages?beta=true" + claudeAPICountTokensURL = "https://api.anthropic.com/v1/messages/count_tokens?beta=true" + stickySessionTTL = time.Hour // 粘性会话TTL + defaultMaxLineSize = 40 * 1024 * 1024 + claudeCodeSystemPrompt = "You are Claude Code, Anthropic's official CLI for Claude." + maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 +) + +// sseDataRe matches SSE data lines with optional whitespace after colon. +// Some upstream APIs return non-standard "data:" without space (should be "data: "). +var ( + sseDataRe = regexp.MustCompile(`^data:\s*`) + sessionIDRegex = regexp.MustCompile(`session_([a-f0-9-]{36})`) + claudeCliUserAgentRe = regexp.MustCompile(`^claude-cli/\d+\.\d+\.\d+`) + + // claudeCodePromptPrefixes 用于检测 Claude Code 系统提示词的前缀列表 + // 支持多种变体:标准版、Agent SDK 版、Explore Agent 版、Compact 版等 + // 注意:前缀之间不应存在包含关系,否则会导致冗余匹配 + claudeCodePromptPrefixes = []string{ + "You are Claude Code, Anthropic's official CLI for Claude", // 标准版 & Agent SDK 版(含 running within...) + "You are a Claude agent, built on Anthropic's Claude Agent SDK", // Agent SDK 变体 + "You are a file search specialist for Claude Code", // Explore Agent 版 + "You are a helpful AI assistant tasked with summarizing conversations", // Compact 版 + } +) + +// ErrClaudeCodeOnly 表示分组仅允许 Claude Code 客户端访问 +var ErrClaudeCodeOnly = errors.New("this group only allows Claude Code clients") + +// allowedHeaders 白名单headers(参考CRS项目) +var allowedHeaders = map[string]bool{ + "accept": true, + "x-stainless-retry-count": true, + "x-stainless-timeout": true, + "x-stainless-lang": true, + "x-stainless-package-version": true, + "x-stainless-os": true, + "x-stainless-arch": true, + "x-stainless-runtime": true, + "x-stainless-runtime-version": true, + "x-stainless-helper-method": true, + "anthropic-dangerous-direct-browser-access": true, + "anthropic-version": true, + "x-app": true, + "anthropic-beta": true, + "accept-language": true, + "sec-fetch-mode": true, + "user-agent": true, + "content-type": true, +} + +// GatewayCache defines cache operations for gateway service +type GatewayCache interface { + GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) + SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error + RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error +} + +// derefGroupID safely dereferences *int64 to int64, returning 0 if nil +func derefGroupID(groupID *int64) int64 { + if groupID == nil { + return 0 + } + return *groupID +} + +type AccountWaitPlan struct { + AccountID int64 + MaxConcurrency int + Timeout time.Duration + MaxWaiting int +} + +type AccountSelectionResult struct { + Account *Account + Acquired bool + ReleaseFunc func() + WaitPlan *AccountWaitPlan // nil means no wait allowed +} + +// ClaudeUsage 表示Claude API返回的usage信息 +type ClaudeUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationInputTokens int `json:"cache_creation_input_tokens"` + CacheReadInputTokens int `json:"cache_read_input_tokens"` +} + +// ForwardResult 转发结果 +type ForwardResult struct { + RequestID string + Usage ClaudeUsage + Model string + Stream bool + Duration time.Duration + FirstTokenMs *int // 首字时间(流式请求) + ClientDisconnect bool // 客户端是否在流式传输过程中断开 + + // 图片生成计费字段(仅 gemini-3-pro-image 使用) + ImageCount int // 生成的图片数量 + ImageSize string // 图片尺寸 "1K", "2K", "4K" +} + +// UpstreamFailoverError indicates an upstream error that should trigger account failover. +type UpstreamFailoverError struct { + StatusCode int +} + +func (e *UpstreamFailoverError) Error() string { + return fmt.Sprintf("upstream error: %d (failover)", e.StatusCode) +} + +// GatewayService handles API gateway operations +type GatewayService struct { + accountRepo AccountRepository + groupRepo GroupRepository + usageLogRepo UsageLogRepository + userRepo UserRepository + userSubRepo UserSubscriptionRepository + cache GatewayCache + cfg *config.Config + schedulerSnapshot *SchedulerSnapshotService + billingService *BillingService + rateLimitService *RateLimitService + billingCacheService *BillingCacheService + identityService *IdentityService + httpUpstream HTTPUpstream + deferredService *DeferredService + concurrencyService *ConcurrencyService +} + +// NewGatewayService creates a new GatewayService +func NewGatewayService( + accountRepo AccountRepository, + groupRepo GroupRepository, + usageLogRepo UsageLogRepository, + userRepo UserRepository, + userSubRepo UserSubscriptionRepository, + cache GatewayCache, + cfg *config.Config, + schedulerSnapshot *SchedulerSnapshotService, + concurrencyService *ConcurrencyService, + billingService *BillingService, + rateLimitService *RateLimitService, + billingCacheService *BillingCacheService, + identityService *IdentityService, + httpUpstream HTTPUpstream, + deferredService *DeferredService, +) *GatewayService { + return &GatewayService{ + accountRepo: accountRepo, + groupRepo: groupRepo, + usageLogRepo: usageLogRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + cache: cache, + cfg: cfg, + schedulerSnapshot: schedulerSnapshot, + concurrencyService: concurrencyService, + billingService: billingService, + rateLimitService: rateLimitService, + billingCacheService: billingCacheService, + identityService: identityService, + httpUpstream: httpUpstream, + deferredService: deferredService, + } +} + +// GenerateSessionHash 从预解析请求计算粘性会话 hash +func (s *GatewayService) GenerateSessionHash(parsed *ParsedRequest) string { + if parsed == nil { + return "" + } + + // 1. 最高优先级:从 metadata.user_id 提取 session_xxx + if parsed.MetadataUserID != "" { + if match := sessionIDRegex.FindStringSubmatch(parsed.MetadataUserID); len(match) > 1 { + return match[1] + } + } + + // 2. 提取带 cache_control: {type: "ephemeral"} 的内容 + cacheableContent := s.extractCacheableContent(parsed) + if cacheableContent != "" { + return s.hashContent(cacheableContent) + } + + // 3. Fallback: 使用 system 内容 + if parsed.System != nil { + systemText := s.extractTextFromSystem(parsed.System) + if systemText != "" { + return s.hashContent(systemText) + } + } + + // 4. 最后 fallback: 使用第一条消息 + if len(parsed.Messages) > 0 { + if firstMsg, ok := parsed.Messages[0].(map[string]any); ok { + msgText := s.extractTextFromContent(firstMsg["content"]) + if msgText != "" { + return s.hashContent(msgText) + } + } + } + + return "" +} + +// BindStickySession sets session -> account binding with standard TTL. +func (s *GatewayService) BindStickySession(ctx context.Context, groupID *int64, sessionHash string, accountID int64) error { + if sessionHash == "" || accountID <= 0 || s.cache == nil { + return nil + } + return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, accountID, stickySessionTTL) +} + +func (s *GatewayService) extractCacheableContent(parsed *ParsedRequest) string { + if parsed == nil { + return "" + } + + var builder strings.Builder + + // 检查 system 中的 cacheable 内容 + if system, ok := parsed.System.([]any); ok { + for _, part := range system { + if partMap, ok := part.(map[string]any); ok { + if cc, ok := partMap["cache_control"].(map[string]any); ok { + if cc["type"] == "ephemeral" { + if text, ok := partMap["text"].(string); ok { + _, _ = builder.WriteString(text) + } + } + } + } + } + } + systemText := builder.String() + + // 检查 messages 中的 cacheable 内容 + for _, msg := range parsed.Messages { + if msgMap, ok := msg.(map[string]any); ok { + if msgContent, ok := msgMap["content"].([]any); ok { + for _, part := range msgContent { + if partMap, ok := part.(map[string]any); ok { + if cc, ok := partMap["cache_control"].(map[string]any); ok { + if cc["type"] == "ephemeral" { + return s.extractTextFromContent(msgMap["content"]) + } + } + } + } + } + } + } + + return systemText +} + +func (s *GatewayService) extractTextFromSystem(system any) string { + switch v := system.(type) { + case string: + return v + case []any: + var texts []string + for _, part := range v { + if partMap, ok := part.(map[string]any); ok { + if text, ok := partMap["text"].(string); ok { + texts = append(texts, text) + } + } + } + return strings.Join(texts, "") + } + return "" +} + +func (s *GatewayService) extractTextFromContent(content any) string { + switch v := content.(type) { + case string: + return v + case []any: + var texts []string + for _, part := range v { + if partMap, ok := part.(map[string]any); ok { + if partMap["type"] == "text" { + if text, ok := partMap["text"].(string); ok { + texts = append(texts, text) + } + } + } + } + return strings.Join(texts, "") + } + return "" +} + +func (s *GatewayService) hashContent(content string) string { + hash := sha256.Sum256([]byte(content)) + return hex.EncodeToString(hash[:16]) // 32字符 +} + +// replaceModelInBody 替换请求体中的model字段 +func (s *GatewayService) replaceModelInBody(body []byte, newModel string) []byte { + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return body + } + req["model"] = newModel + newBody, err := json.Marshal(req) + if err != nil { + return body + } + return newBody +} + +// SelectAccount 选择账号(粘性会话+优先级) +func (s *GatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { + return s.SelectAccountForModel(ctx, groupID, sessionHash, "") +} + +// SelectAccountForModel 选择支持指定模型的账号(粘性会话+优先级+模型映射) +func (s *GatewayService) SelectAccountForModel(ctx context.Context, groupID *int64, sessionHash string, requestedModel string) (*Account, error) { + return s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, nil) +} + +// SelectAccountForModelWithExclusions selects an account supporting the requested model while excluding specified accounts. +func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { + // 优先检查 context 中的强制平台(/antigravity 路由) + var platform string + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform != "" { + platform = forcePlatform + } else if groupID != nil { + group, resolvedGroupID, err := s.resolveGatewayGroup(ctx, groupID) + if err != nil { + return nil, err + } + groupID = resolvedGroupID + ctx = s.withGroupContext(ctx, group) + platform = group.Platform + } else { + // 无分组时只使用原生 anthropic 平台 + platform = PlatformAnthropic + } + + // anthropic/gemini 分组支持混合调度(包含启用了 mixed_scheduling 的 antigravity 账户) + // 注意:强制平台模式不走混合调度 + if (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform { + return s.selectAccountWithMixedScheduling(ctx, groupID, sessionHash, requestedModel, excludedIDs, platform) + } + + // antigravity 分组、强制平台模式或无分组使用单平台选择 + // 注意:强制平台模式也必须遵守分组限制,不再回退到全平台查询 + return s.selectAccountForModelWithPlatform(ctx, groupID, sessionHash, requestedModel, excludedIDs, platform) +} + +// SelectAccountWithLoadAwareness selects account with load-awareness and wait plan. +func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + cfg := s.schedulingConfig() + var stickyAccountID int64 + if sessionHash != "" && s.cache != nil { + if accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash); err == nil { + stickyAccountID = accountID + } + } + + // 检查 Claude Code 客户端限制(可能会替换 groupID 为降级分组) + group, groupID, err := s.checkClaudeCodeRestriction(ctx, groupID) + if err != nil { + return nil, err + } + ctx = s.withGroupContext(ctx, group) + + if s.concurrencyService == nil || !cfg.LoadBatchEnabled { + account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) + if err != nil { + return nil, err + } + result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err == nil && result.Acquired { + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID, group) + if err != nil { + return nil, err + } + preferOAuth := platform == PlatformGemini + + accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, errors.New("no available accounts") + } + + isExcluded := func(accountID int64) bool { + if excludedIDs == nil { + return false + } + _, excluded := excludedIDs[accountID] + return excluded + } + + // ============ Layer 1: 粘性会话优先 ============ + if sessionHash != "" && s.cache != nil { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && accountID > 0 && !isExcluded(accountID) { + // 粘性命中仅在当前可调度候选集中生效。 + accountByID := make(map[int64]*Account, len(accounts)) + for i := range accounts { + accountByID[accounts[i].ID] = &accounts[i] + } + account, ok := accountByID[accountID] + if ok && s.isAccountInGroup(account, groupID) && + s.isAccountAllowedForPlatform(account, platform, useMixed) && + account.IsSchedulableForModel(requestedModel) && + (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + } + } + + // ============ Layer 2: 负载感知选择 ============ + candidates := make([]*Account, 0, len(accounts)) + for i := range accounts { + acc := &accounts[i] + if isExcluded(acc.ID) { + continue + } + // Scheduler snapshots can be temporarily stale (bucket rebuild is throttled); + // re-check schedulability here so recently rate-limited/overloaded accounts + // are not selected again before the bucket is rebuilt. + if !acc.IsSchedulable() { + continue + } + if !s.isAccountAllowedForPlatform(acc, platform, useMixed) { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + candidates = append(candidates, acc) + } + + if len(candidates) == 0 { + return nil, errors.New("no available accounts") + } + + accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) + for _, acc := range candidates { + accountLoads = append(accountLoads, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: acc.Concurrency, + }) + } + + loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) + if err != nil { + if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, groupID, sessionHash, preferOAuth); ok { + return result, nil + } + } else { + type accountWithLoad struct { + account *Account + loadInfo *AccountLoadInfo + } + var available []accountWithLoad + for _, acc := range candidates { + loadInfo := loadMap[acc.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: acc.ID} + } + if loadInfo.LoadRate < 100 { + available = append(available, accountWithLoad{ + account: acc, + loadInfo: loadInfo, + }) + } + } + + if len(available) > 0 { + sort.SliceStable(available, func(i, j int) bool { + a, b := available[i], available[j] + if a.account.Priority != b.account.Priority { + return a.account.Priority < b.account.Priority + } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return a.loadInfo.LoadRate < b.loadInfo.LoadRate + } + switch { + case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: + return true + case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: + return false + case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: + if preferOAuth && a.account.Type != b.account.Type { + return a.account.Type == AccountTypeOAuth + } + return false + default: + return a.account.LastUsedAt.Before(*b.account.LastUsedAt) + } + }) + + for _, item := range available { + result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" && s.cache != nil { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, item.account.ID, stickySessionTTL) + } + return &AccountSelectionResult{ + Account: item.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } + } + + // ============ Layer 3: 兜底排队 ============ + sortAccountsByPriorityAndLastUsed(candidates, preferOAuth) + for _, acc := range candidates { + return &AccountSelectionResult{ + Account: acc, + WaitPlan: &AccountWaitPlan{ + AccountID: acc.ID, + MaxConcurrency: acc.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + return nil, errors.New("no available accounts") +} + +func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, groupID *int64, sessionHash string, preferOAuth bool) (*AccountSelectionResult, bool) { + ordered := append([]*Account(nil), candidates...) + sortAccountsByPriorityAndLastUsed(ordered, preferOAuth) + + for _, acc := range ordered { + result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" && s.cache != nil { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, acc.ID, stickySessionTTL) + } + return &AccountSelectionResult{ + Account: acc, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, true + } + } + + return nil, false +} + +func (s *GatewayService) schedulingConfig() config.GatewaySchedulingConfig { + if s.cfg != nil { + return s.cfg.Gateway.Scheduling + } + return config.GatewaySchedulingConfig{ + StickySessionMaxWaiting: 3, + StickySessionWaitTimeout: 45 * time.Second, + FallbackWaitTimeout: 30 * time.Second, + FallbackMaxWaiting: 100, + LoadBatchEnabled: true, + SlotCleanupInterval: 30 * time.Second, + } +} + +func (s *GatewayService) withGroupContext(ctx context.Context, group *Group) context.Context { + if !IsGroupContextValid(group) { + return ctx + } + if existing, ok := ctx.Value(ctxkey.Group).(*Group); ok && existing != nil && existing.ID == group.ID && IsGroupContextValid(existing) { + return ctx + } + return context.WithValue(ctx, ctxkey.Group, group) +} + +func (s *GatewayService) groupFromContext(ctx context.Context, groupID int64) *Group { + if group, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(group) && group.ID == groupID { + return group + } + return nil +} + +func (s *GatewayService) resolveGroupByID(ctx context.Context, groupID int64) (*Group, error) { + if group := s.groupFromContext(ctx, groupID); group != nil { + return group, nil + } + group, err := s.groupRepo.GetByIDLite(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("get group failed: %w", err) + } + return group, nil +} + +func (s *GatewayService) resolveGatewayGroup(ctx context.Context, groupID *int64) (*Group, *int64, error) { + if groupID == nil { + return nil, nil, nil + } + + currentID := *groupID + visited := map[int64]struct{}{} + for { + if _, seen := visited[currentID]; seen { + return nil, nil, fmt.Errorf("fallback group cycle detected") + } + visited[currentID] = struct{}{} + + group, err := s.resolveGroupByID(ctx, currentID) + if err != nil { + return nil, nil, err + } + + if !group.ClaudeCodeOnly || IsClaudeCodeClient(ctx) { + return group, ¤tID, nil + } + + if group.FallbackGroupID == nil { + return nil, nil, ErrClaudeCodeOnly + } + currentID = *group.FallbackGroupID + } +} + +// checkClaudeCodeRestriction 检查分组的 Claude Code 客户端限制 +// 如果分组启用了 claude_code_only 且请求不是来自 Claude Code 客户端: +// - 有降级分组:返回降级分组的 ID +// - 无降级分组:返回 ErrClaudeCodeOnly 错误 +func (s *GatewayService) checkClaudeCodeRestriction(ctx context.Context, groupID *int64) (*Group, *int64, error) { + if groupID == nil { + return nil, groupID, nil + } + + // 强制平台模式不检查 Claude Code 限制 + if _, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string); hasForcePlatform { + return nil, groupID, nil + } + + group, resolvedID, err := s.resolveGatewayGroup(ctx, groupID) + if err != nil { + return nil, nil, err + } + + return group, resolvedID, nil +} + +func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64, group *Group) (string, bool, error) { + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform != "" { + return forcePlatform, true, nil + } + if group != nil { + return group.Platform, false, nil + } + if groupID != nil { + group, err := s.resolveGroupByID(ctx, *groupID) + if err != nil { + return "", false, err + } + return group.Platform, false, nil + } + return PlatformAnthropic, false, nil +} + +func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + } + useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform + if useMixed { + platforms := []string{platform, PlatformAntigravity} + var accounts []Account + var err error + if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, platforms) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) + } + if err != nil { + return nil, useMixed, err + } + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + filtered = append(filtered, acc) + } + return filtered, useMixed, nil + } + + var accounts []Account + var err error + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) + } else if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, platform) + // 分组内无账号则返回空列表,由上层处理错误,不再回退到全平台查询 + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) + } + if err != nil { + return nil, useMixed, err + } + return accounts, useMixed, nil +} + +func (s *GatewayService) isAccountAllowedForPlatform(account *Account, platform string, useMixed bool) bool { + if account == nil { + return false + } + if useMixed { + if account.Platform == platform { + return true + } + return account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() + } + return account.Platform == platform +} + +// isAccountInGroup checks if the account belongs to the specified group. +// Returns true if groupID is nil (no group restriction) or account belongs to the group. +func (s *GatewayService) isAccountInGroup(account *Account, groupID *int64) bool { + if groupID == nil { + return true // 无分组限制 + } + if account == nil { + return false + } + for _, ag := range account.AccountGroups { + if ag.GroupID == *groupID { + return true + } + } + return false +} + +func (s *GatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { + if s.concurrencyService == nil { + return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil + } + return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) +} + +func (s *GatewayService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + +func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { + sort.SliceStable(accounts, func(i, j int) bool { + a, b := accounts[i], accounts[j] + if a.Priority != b.Priority { + return a.Priority < b.Priority + } + switch { + case a.LastUsedAt == nil && b.LastUsedAt != nil: + return true + case a.LastUsedAt != nil && b.LastUsedAt == nil: + return false + case a.LastUsedAt == nil && b.LastUsedAt == nil: + if preferOAuth && a.Type != b.Type { + return a.Type == AccountTypeOAuth + } + return false + default: + return a.LastUsedAt.Before(*b.LastUsedAt) + } + }) +} + +// selectAccountForModelWithPlatform 选择单平台账户(完全隔离) +func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { + preferOAuth := platform == PlatformGemini + // 1. 查询粘性会话 + if sessionHash != "" && s.cache != nil { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && accountID > 0 { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) + if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + return account, nil + } + } + } + } + + // 2. 获取可调度账号列表(单平台) + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform == "" { + hasForcePlatform = false + } + accounts, _, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + + // 3. 按优先级+最久未用选择(考虑模型支持) + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + if preferOAuth && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected == nil { + if requestedModel != "" { + return nil, fmt.Errorf("no available accounts supporting model: %s", requestedModel) + } + return nil, errors.New("no available accounts") + } + + // 4. 建立粘性绑定 + if sessionHash != "" && s.cache != nil { + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.ID, stickySessionTTL); err != nil { + log.Printf("set session account failed: session=%s account_id=%d err=%v", sessionHash, selected.ID, err) + } + } + + return selected, nil +} + +// selectAccountWithMixedScheduling 选择账户(支持混合调度) +// 查询原生平台账户 + 启用 mixed_scheduling 的 antigravity 账户 +func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, nativePlatform string) (*Account, error) { + preferOAuth := nativePlatform == PlatformGemini + + // 1. 查询粘性会话 + if sessionHash != "" && s.cache != nil { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && accountID > 0 { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 + if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + return account, nil + } + } + } + } + } + + // 2. 获取可调度账号列表 + accounts, _, err := s.listSchedulableAccounts(ctx, groupID, nativePlatform, false) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + + // 3. 按优先级+最久未用选择(考虑模型支持和混合调度) + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } + // 过滤:原生平台直接通过,antigravity 需要启用混合调度 + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + if preferOAuth && acc.Platform == PlatformGemini && selected.Platform == PlatformGemini && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected == nil { + if requestedModel != "" { + return nil, fmt.Errorf("no available accounts supporting model: %s", requestedModel) + } + return nil, errors.New("no available accounts") + } + + // 4. 建立粘性绑定 + if sessionHash != "" && s.cache != nil { + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.ID, stickySessionTTL); err != nil { + log.Printf("set session account failed: session=%s account_id=%d err=%v", sessionHash, selected.ID, err) + } + } + + return selected, nil +} + +// isModelSupportedByAccount 根据账户平台检查模型支持 +func (s *GatewayService) isModelSupportedByAccount(account *Account, requestedModel string) bool { + if account.Platform == PlatformAntigravity { + // Antigravity 平台使用专门的模型支持检查 + return IsAntigravityModelSupported(requestedModel) + } + // 其他平台使用账户的模型支持检查 + return account.IsModelSupported(requestedModel) +} + +// IsAntigravityModelSupported 检查 Antigravity 平台是否支持指定模型 +// 所有 claude- 和 gemini- 前缀的模型都能通过映射或透传支持 +func IsAntigravityModelSupported(requestedModel string) bool { + return strings.HasPrefix(requestedModel, "claude-") || + strings.HasPrefix(requestedModel, "gemini-") +} + +// GetAccessToken 获取账号凭证 +func (s *GatewayService) GetAccessToken(ctx context.Context, account *Account) (string, string, error) { + switch account.Type { + case AccountTypeOAuth, AccountTypeSetupToken: + // Both oauth and setup-token use OAuth token flow + return s.getOAuthToken(ctx, account) + case AccountTypeAPIKey: + apiKey := account.GetCredential("api_key") + if apiKey == "" { + return "", "", errors.New("api_key not found in credentials") + } + return apiKey, "apikey", nil + default: + return "", "", fmt.Errorf("unsupported account type: %s", account.Type) + } +} + +func (s *GatewayService) getOAuthToken(ctx context.Context, account *Account) (string, string, error) { + accessToken := account.GetCredential("access_token") + if accessToken == "" { + return "", "", errors.New("access_token not found in credentials") + } + // Token刷新由后台 TokenRefreshService 处理,此处只返回当前token + return accessToken, "oauth", nil +} + +// 重试相关常量 +const ( + // 最大尝试次数(包含首次请求)。过多重试会导致请求堆积与资源耗尽。 + maxRetryAttempts = 5 + + // 指数退避:第 N 次失败后的等待 = retryBaseDelay * 2^(N-1),并且上限为 retryMaxDelay。 + retryBaseDelay = 300 * time.Millisecond + retryMaxDelay = 3 * time.Second + + // 最大重试耗时(包含请求本身耗时 + 退避等待时间)。 + // 用于防止极端情况下 goroutine 长时间堆积导致资源耗尽。 + maxRetryElapsed = 10 * time.Second +) + +func (s *GatewayService) shouldRetryUpstreamError(account *Account, statusCode int) bool { + // OAuth/Setup Token 账号:仅 403 重试 + if account.IsOAuth() { + return statusCode == 403 + } + + // API Key 账号:未配置的错误码重试 + return !account.ShouldHandleErrorCode(statusCode) +} + +// shouldFailoverUpstreamError determines whether an upstream error should trigger account failover. +func (s *GatewayService) shouldFailoverUpstreamError(statusCode int) bool { + switch statusCode { + case 401, 403, 429, 529: + return true + default: + return statusCode >= 500 + } +} + +func retryBackoffDelay(attempt int) time.Duration { + // attempt 从 1 开始,表示第 attempt 次请求刚失败,需要等待后进行第 attempt+1 次请求。 + if attempt <= 0 { + return retryBaseDelay + } + delay := retryBaseDelay * time.Duration(1<<(attempt-1)) + if delay > retryMaxDelay { + return retryMaxDelay + } + return delay +} + +func sleepWithContext(ctx context.Context, d time.Duration) error { + if d <= 0 { + return nil + } + timer := time.NewTimer(d) + defer func() { + if !timer.Stop() { + select { + case <-timer.C: + default: + } + } + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + return nil + } +} + +// isClaudeCodeClient 判断请求是否来自 Claude Code 客户端 +// 简化判断:User-Agent 匹配 + metadata.user_id 存在 +func isClaudeCodeClient(userAgent string, metadataUserID string) bool { + if metadataUserID == "" { + return false + } + return claudeCliUserAgentRe.MatchString(userAgent) +} + +// systemIncludesClaudeCodePrompt 检查 system 中是否已包含 Claude Code 提示词 +// 使用前缀匹配支持多种变体(标准版、Agent SDK 版等) +func systemIncludesClaudeCodePrompt(system any) bool { + switch v := system.(type) { + case string: + return hasClaudeCodePrefix(v) + case []any: + for _, item := range v { + if m, ok := item.(map[string]any); ok { + if text, ok := m["text"].(string); ok && hasClaudeCodePrefix(text) { + return true + } + } + } + } + return false +} + +// hasClaudeCodePrefix 检查文本是否以 Claude Code 提示词的特征前缀开头 +func hasClaudeCodePrefix(text string) bool { + for _, prefix := range claudeCodePromptPrefixes { + if strings.HasPrefix(text, prefix) { + return true + } + } + return false +} + +// injectClaudeCodePrompt 在 system 开头注入 Claude Code 提示词 +// 处理 null、字符串、数组三种格式 +func injectClaudeCodePrompt(body []byte, system any) []byte { + claudeCodeBlock := map[string]any{ + "type": "text", + "text": claudeCodeSystemPrompt, + "cache_control": map[string]string{"type": "ephemeral"}, + } + + var newSystem []any + + switch v := system.(type) { + case nil: + newSystem = []any{claudeCodeBlock} + case string: + if v == "" || v == claudeCodeSystemPrompt { + newSystem = []any{claudeCodeBlock} + } else { + newSystem = []any{claudeCodeBlock, map[string]any{"type": "text", "text": v}} + } + case []any: + newSystem = make([]any, 0, len(v)+1) + newSystem = append(newSystem, claudeCodeBlock) + for _, item := range v { + if m, ok := item.(map[string]any); ok { + if text, ok := m["text"].(string); ok && text == claudeCodeSystemPrompt { + continue + } + } + newSystem = append(newSystem, item) + } + default: + newSystem = []any{claudeCodeBlock} + } + + result, err := sjson.SetBytes(body, "system", newSystem) + if err != nil { + log.Printf("Warning: failed to inject Claude Code prompt: %v", err) + return body + } + return result +} + +// enforceCacheControlLimit 强制执行 cache_control 块数量限制(最多 4 个) +// 超限时优先从 messages 中移除 cache_control,保护 system 中的缓存控制 +func enforceCacheControlLimit(body []byte) []byte { + var data map[string]any + if err := json.Unmarshal(body, &data); err != nil { + return body + } + + // 清理 thinking 块中的非法 cache_control(thinking 块不支持该字段) + removeCacheControlFromThinkingBlocks(data) + + // 计算当前 cache_control 块数量 + count := countCacheControlBlocks(data) + if count <= maxCacheControlBlocks { + return body + } + + // 超限:优先从 messages 中移除,再从 system 中移除 + for count > maxCacheControlBlocks { + if removeCacheControlFromMessages(data) { + count-- + continue + } + if removeCacheControlFromSystem(data) { + count-- + continue + } + break + } + + result, err := json.Marshal(data) + if err != nil { + return body + } + return result +} + +// countCacheControlBlocks 统计 system 和 messages 中的 cache_control 块数量 +// 注意:thinking 块不支持 cache_control,统计时跳过 +func countCacheControlBlocks(data map[string]any) int { + count := 0 + + // 统计 system 中的块 + if system, ok := data["system"].([]any); ok { + for _, item := range system { + if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } + if _, has := m["cache_control"]; has { + count++ + } + } + } + } + + // 统计 messages 中的块 + if messages, ok := data["messages"].([]any); ok { + for _, msg := range messages { + if msgMap, ok := msg.(map[string]any); ok { + if content, ok := msgMap["content"].([]any); ok { + for _, item := range content { + if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } + if _, has := m["cache_control"]; has { + count++ + } + } + } + } + } + } + } + + return count +} + +// removeCacheControlFromMessages 从 messages 中移除一个 cache_control(从头开始) +// 返回 true 表示成功移除,false 表示没有可移除的 +// 注意:跳过 thinking 块(它不支持 cache_control) +func removeCacheControlFromMessages(data map[string]any) bool { + messages, ok := data["messages"].([]any) + if !ok { + return false + } + + for _, msg := range messages { + msgMap, ok := msg.(map[string]any) + if !ok { + continue + } + content, ok := msgMap["content"].([]any) + if !ok { + continue + } + for _, item := range content { + if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + return true + } + } + } + } + return false +} + +// removeCacheControlFromSystem 从 system 中移除一个 cache_control(从尾部开始,保护注入的 prompt) +// 返回 true 表示成功移除,false 表示没有可移除的 +// 注意:跳过 thinking 块(它不支持 cache_control) +func removeCacheControlFromSystem(data map[string]any) bool { + system, ok := data["system"].([]any) + if !ok { + return false + } + + // 从尾部开始移除,保护开头注入的 Claude Code prompt + for i := len(system) - 1; i >= 0; i-- { + if m, ok := system[i].(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + return true + } + } + } + return false +} + +// removeCacheControlFromThinkingBlocks 强制清理所有 thinking 块中的非法 cache_control +// thinking 块不支持 cache_control 字段,这个函数确保所有 thinking 块都不含该字段 +func removeCacheControlFromThinkingBlocks(data map[string]any) { + // 清理 system 中的 thinking 块 + if system, ok := data["system"].([]any); ok { + for _, item := range system { + if m, ok := item.(map[string]any); ok { + if blockType, _ := m["type"].(string); blockType == "thinking" { + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + log.Printf("[Warning] Removed illegal cache_control from thinking block in system") + } + } + } + } + } + + // 清理 messages 中的 thinking 块 + if messages, ok := data["messages"].([]any); ok { + for msgIdx, msg := range messages { + if msgMap, ok := msg.(map[string]any); ok { + if content, ok := msgMap["content"].([]any); ok { + for contentIdx, item := range content { + if m, ok := item.(map[string]any); ok { + if blockType, _ := m["type"].(string); blockType == "thinking" { + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + log.Printf("[Warning] Removed illegal cache_control from thinking block in messages[%d].content[%d]", msgIdx, contentIdx) + } + } + } + } + } + } + } + } +} + +// Forward 转发请求到Claude API +func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, parsed *ParsedRequest) (*ForwardResult, error) { + startTime := time.Now() + if parsed == nil { + return nil, fmt.Errorf("parse request: empty request") + } + + body := parsed.Body + reqModel := parsed.Model + reqStream := parsed.Stream + + // 智能注入 Claude Code 系统提示词(仅 OAuth/SetupToken 账号需要) + // 条件:1) OAuth/SetupToken 账号 2) 不是 Claude Code 客户端 3) 不是 Haiku 模型 4) system 中还没有 Claude Code 提示词 + if account.IsOAuth() && + !isClaudeCodeClient(c.GetHeader("User-Agent"), parsed.MetadataUserID) && + !strings.Contains(strings.ToLower(reqModel), "haiku") && + !systemIncludesClaudeCodePrompt(parsed.System) { + body = injectClaudeCodePrompt(body, parsed.System) + } + + // 强制执行 cache_control 块数量限制(最多 4 个) + body = enforceCacheControlLimit(body) + + // 应用模型映射(仅对apikey类型账号) + originalModel := reqModel + if account.Type == AccountTypeAPIKey { + mappedModel := account.GetMappedModel(reqModel) + if mappedModel != reqModel { + // 替换请求体中的模型名 + body = s.replaceModelInBody(body, mappedModel) + reqModel = mappedModel + log.Printf("Model mapping applied: %s -> %s (account: %s)", originalModel, mappedModel, account.Name) + } + } + + // 获取凭证 + token, tokenType, err := s.GetAccessToken(ctx, account) + if err != nil { + return nil, err + } + + // 获取代理URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 重试循环 + var resp *http.Response + retryStart := time.Now() + for attempt := 1; attempt <= maxRetryAttempts; attempt++ { + // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel) + // Capture upstream request body for ops retry of this attempt. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + + if err != nil { + return nil, err + } + + // 发送请求 + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + if resp != nil && resp.Body != nil { + _ = resp.Body.Close() + } + // Ensure the client receives an error response (handlers assume Forward writes on non-failover errors). + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + setOpsUpstreamError(c, 0, safeErr, "") + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + c.JSON(http.StatusBadGateway, gin.H{ + "type": "error", + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream request failed", + }, + }) + return nil, fmt.Errorf("upstream request failed: %s", safeErr) + } + + // 优先检测thinking block签名错误(400)并重试一次 + if resp.StatusCode == 400 { + respBody, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + if readErr == nil { + _ = resp.Body.Close() + + if s.isThinkingBlockSignatureError(respBody) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "signature_error", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + + looksLikeToolSignatureError := func(msg string) bool { + m := strings.ToLower(msg) + return strings.Contains(m, "tool_use") || + strings.Contains(m, "tool_result") || + strings.Contains(m, "functioncall") || + strings.Contains(m, "function_call") || + strings.Contains(m, "functionresponse") || + strings.Contains(m, "function_response") + } + + // 避免在重试预算已耗尽时再发起额外请求 + if time.Since(retryStart) >= maxRetryElapsed { + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + break + } + log.Printf("Account %d: detected thinking block signature error, retrying with filtered thinking blocks", account.ID) + + // Conservative two-stage fallback: + // 1) Disable thinking + thinking->text (preserve content) + // 2) Only if upstream still errors AND error message points to tool/function signature issues: + // also downgrade tool_use/tool_result blocks to text. + + filteredBody := FilterThinkingBlocksForRetry(body) + retryReq, buildErr := s.buildUpstreamRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + if buildErr == nil { + retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + if retryErr == nil { + if retryResp.StatusCode < 400 { + log.Printf("Account %d: signature error retry succeeded (thinking downgraded)", account.ID) + resp = retryResp + break + } + + retryRespBody, retryReadErr := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) + _ = retryResp.Body.Close() + if retryReadErr == nil && retryResp.StatusCode == 400 && s.isThinkingBlockSignatureError(retryRespBody) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: retryResp.StatusCode, + UpstreamRequestID: retryResp.Header.Get("x-request-id"), + Kind: "signature_retry_thinking", + Message: extractUpstreamErrorMessage(retryRespBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(retryRespBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + msg2 := extractUpstreamErrorMessage(retryRespBody) + if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { + log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) + filteredBody2 := FilterSignatureSensitiveBlocksForRetry(body) + retryReq2, buildErr2 := s.buildUpstreamRequest(ctx, c, account, filteredBody2, token, tokenType, reqModel) + if buildErr2 == nil { + retryResp2, retryErr2 := s.httpUpstream.Do(retryReq2, proxyURL, account.ID, account.Concurrency) + if retryErr2 == nil { + resp = retryResp2 + break + } + if retryResp2 != nil && retryResp2.Body != nil { + _ = retryResp2.Body.Close() + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "signature_retry_tools_request_error", + Message: sanitizeUpstreamErrorMessage(retryErr2.Error()), + }) + log.Printf("Account %d: tool-downgrade signature retry failed: %v", account.ID, retryErr2) + } else { + log.Printf("Account %d: tool-downgrade signature retry build failed: %v", account.ID, buildErr2) + } + } + } + + // Fall back to the original retry response context. + resp = &http.Response{ + StatusCode: retryResp.StatusCode, + Header: retryResp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(retryRespBody)), + } + break + } + if retryResp != nil && retryResp.Body != nil { + _ = retryResp.Body.Close() + } + log.Printf("Account %d: signature error retry failed: %v", account.ID, retryErr) + } else { + log.Printf("Account %d: signature error retry build request failed: %v", account.ID, buildErr) + } + + // Retry failed: restore original response body and continue handling. + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + break + } + // 不是thinking签名错误,恢复响应体 + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + } + } + + // 检查是否需要通用重试(排除400,因为400已经在上面特殊处理过了) + if resp.StatusCode >= 400 && resp.StatusCode != 400 && s.shouldRetryUpstreamError(account, resp.StatusCode) { + if attempt < maxRetryAttempts { + elapsed := time.Since(retryStart) + if elapsed >= maxRetryElapsed { + break + } + + delay := retryBackoffDelay(attempt) + remaining := maxRetryElapsed - elapsed + if delay > remaining { + delay = remaining + } + if delay <= 0 { + break + } + + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + log.Printf("Account %d: upstream error %d, retry %d/%d after %v (elapsed=%v/%v)", + account.ID, resp.StatusCode, attempt, maxRetryAttempts, delay, elapsed, maxRetryElapsed) + if err := sleepWithContext(ctx, delay); err != nil { + return nil, err + } + continue + } + // 最后一次尝试也失败,跳出循环处理重试耗尽 + break + } + + // 不需要重试(成功或不可重试的错误),跳出循环 + // DEBUG: 输出响应 headers(用于检测 rate limit 信息) + if account.Platform == PlatformGemini && resp.StatusCode < 400 { + log.Printf("[DEBUG] Gemini API Response Headers for account %d:", account.ID) + for k, v := range resp.Header { + log.Printf("[DEBUG] %s: %v", k, v) + } + } + break + } + if resp == nil || resp.Body == nil { + return nil, errors.New("upstream request failed: empty response") + } + defer func() { _ = resp.Body.Close() }() + + // 处理重试耗尽的情况 + if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(account, resp.StatusCode) { + if s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + s.handleRetryExhaustedSideEffects(ctx, resp, account) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry_exhausted_failover", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + return s.handleRetryExhaustedError(ctx, resp, c, account) + } + + // 处理可切换账号的错误 + if resp.StatusCode >= 400 && s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + s.handleFailoverSideEffects(ctx, resp, account) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + // 处理错误响应(不可重试的错误) + if resp.StatusCode >= 400 { + // 可选:对部分 400 触发 failover(默认关闭以保持语义) + if resp.StatusCode == 400 && s.cfg != nil && s.cfg.Gateway.FailoverOn400 { + respBody, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + if readErr != nil { + // ReadAll failed, fall back to normal error handling without consuming the stream + return s.handleErrorResponse(ctx, resp, c, account) + } + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + if s.shouldFailoverOn400(respBody) { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover_on_400", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + if s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Account %d: 400 error, attempting failover: %s", + account.ID, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } else { + log.Printf("Account %d: 400 error, attempting failover", account.ID) + } + s.handleFailoverSideEffects(ctx, resp, account) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + } + return s.handleErrorResponse(ctx, resp, c, account) + } + + // 处理正常响应 + var usage *ClaudeUsage + var firstTokenMs *int + var clientDisconnect bool + if reqStream { + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, reqModel) + if err != nil { + if err.Error() == "have error in stream" { + return nil, &UpstreamFailoverError{ + StatusCode: 403, + } + } + return nil, err + } + usage = streamResult.usage + firstTokenMs = streamResult.firstTokenMs + clientDisconnect = streamResult.clientDisconnect + } else { + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, reqModel) + if err != nil { + return nil, err + } + } + + return &ForwardResult{ + RequestID: resp.Header.Get("x-request-id"), + Usage: *usage, + Model: originalModel, // 使用原始模型用于计费和日志 + Stream: reqStream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + ClientDisconnect: clientDisconnect, + }, nil +} + +func (s *GatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { + // 确定目标URL + targetURL := claudeAPIURL + if account.Type == AccountTypeAPIKey { + baseURL := account.GetBaseURL() + if baseURL != "" { + validatedURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + targetURL = validatedURL + "/v1/messages" + } + } + + // OAuth账号:应用统一指纹 + var fingerprint *Fingerprint + if account.IsOAuth() && s.identityService != nil { + // 1. 获取或创建指纹(包含随机生成的ClientID) + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + if err != nil { + log.Printf("Warning: failed to get fingerprint for account %d: %v", account.ID, err) + // 失败时降级为透传原始headers + } else { + fingerprint = fp + + // 2. 重写metadata.user_id(需要指纹中的ClientID和账号的account_uuid) + accountUUID := account.GetExtraString("account_uuid") + if accountUUID != "" && fp.ClientID != "" { + if newBody, err := s.identityService.RewriteUserID(body, account.ID, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { + body = newBody + } + } + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + // 设置认证头 + if tokenType == "oauth" { + req.Header.Set("authorization", "Bearer "+token) + } else { + req.Header.Set("x-api-key", token) + } + + // 白名单透传headers + for key, values := range c.Request.Header { + lowerKey := strings.ToLower(key) + if allowedHeaders[lowerKey] { + for _, v := range values { + req.Header.Add(key, v) + } + } + } + + // OAuth账号:应用缓存的指纹到请求头(覆盖白名单透传的头) + if fingerprint != nil { + s.identityService.ApplyFingerprint(req, fingerprint) + } + + // 确保必要的headers存在 + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") + } + if req.Header.Get("anthropic-version") == "" { + req.Header.Set("anthropic-version", "2023-06-01") + } + + // 处理anthropic-beta header(OAuth账号需要特殊处理) + if tokenType == "oauth" { + req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { + // API-key:仅在请求显式使用 beta 特性且客户端未提供时,按需补齐(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultAPIKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } + } + + return req, nil +} + +// getBetaHeader 处理anthropic-beta header +// 对于OAuth账号,需要确保包含oauth-2025-04-20 +func (s *GatewayService) getBetaHeader(modelID string, clientBetaHeader string) string { + // 如果客户端传了anthropic-beta + if clientBetaHeader != "" { + // 已包含oauth beta则直接返回 + if strings.Contains(clientBetaHeader, claude.BetaOAuth) { + return clientBetaHeader + } + + // 需要添加oauth beta + parts := strings.Split(clientBetaHeader, ",") + for i, p := range parts { + parts[i] = strings.TrimSpace(p) + } + + // 在claude-code-20250219后面插入oauth beta + claudeCodeIdx := -1 + for i, p := range parts { + if p == claude.BetaClaudeCode { + claudeCodeIdx = i + break + } + } + + if claudeCodeIdx >= 0 { + // 在claude-code后面插入 + newParts := make([]string, 0, len(parts)+1) + newParts = append(newParts, parts[:claudeCodeIdx+1]...) + newParts = append(newParts, claude.BetaOAuth) + newParts = append(newParts, parts[claudeCodeIdx+1:]...) + return strings.Join(newParts, ",") + } + + // 没有claude-code,放在第一位 + return claude.BetaOAuth + "," + clientBetaHeader + } + + // 客户端没传,根据模型生成 + // haiku 模型不需要 claude-code beta + if strings.Contains(strings.ToLower(modelID), "haiku") { + return claude.HaikuBetaHeader + } + + return claude.DefaultBetaHeader +} + +func requestNeedsBetaFeatures(body []byte) bool { + tools := gjson.GetBytes(body, "tools") + if tools.Exists() && tools.IsArray() && len(tools.Array()) > 0 { + return true + } + if strings.EqualFold(gjson.GetBytes(body, "thinking.type").String(), "enabled") { + return true + } + return false +} + +func defaultAPIKeyBetaHeader(body []byte) string { + modelID := gjson.GetBytes(body, "model").String() + if strings.Contains(strings.ToLower(modelID), "haiku") { + return claude.APIKeyHaikuBetaHeader + } + return claude.APIKeyBetaHeader +} + +func truncateForLog(b []byte, maxBytes int) string { + if maxBytes <= 0 { + maxBytes = 2048 + } + if len(b) > maxBytes { + b = b[:maxBytes] + } + s := string(b) + // 保持一行,避免污染日志格式 + s = strings.ReplaceAll(s, "\n", "\\n") + s = strings.ReplaceAll(s, "\r", "\\r") + return s +} + +// isThinkingBlockSignatureError 检测是否是thinking block相关错误 +// 这类错误可以通过过滤thinking blocks并重试来解决 +func (s *GatewayService) isThinkingBlockSignatureError(respBody []byte) bool { + msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if msg == "" { + return false + } + + // Log for debugging + log.Printf("[SignatureCheck] Checking error message: %s", msg) + + // 检测signature相关的错误(更宽松的匹配) + // 例如: "Invalid `signature` in `thinking` block", "***.signature" 等 + if strings.Contains(msg, "signature") { + log.Printf("[SignatureCheck] Detected signature error") + return true + } + + // 检测 thinking block 顺序/类型错误 + // 例如: "Expected `thinking` or `redacted_thinking`, but found `text`" + if strings.Contains(msg, "expected") && (strings.Contains(msg, "thinking") || strings.Contains(msg, "redacted_thinking")) { + log.Printf("[SignatureCheck] Detected thinking block type error") + return true + } + + // 检测空消息内容错误(可能是过滤 thinking blocks 后导致的) + // 例如: "all messages must have non-empty content" + if strings.Contains(msg, "non-empty content") || strings.Contains(msg, "empty content") { + log.Printf("[SignatureCheck] Detected empty content error") + return true + } + + return false +} + +func (s *GatewayService) shouldFailoverOn400(respBody []byte) bool { + // 只对“可能是兼容性差异导致”的 400 允许切换,避免无意义重试。 + // 默认保守:无法识别则不切换。 + msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(respBody))) + if msg == "" { + return false + } + + // 缺少/错误的 beta header:换账号/链路可能成功(尤其是混合调度时)。 + // 更精确匹配 beta 相关的兼容性问题,避免误触发切换。 + if strings.Contains(msg, "anthropic-beta") || + strings.Contains(msg, "beta feature") || + strings.Contains(msg, "requires beta") { + return true + } + + // thinking/tool streaming 等兼容性约束(常见于中间转换链路) + if strings.Contains(msg, "thinking") || strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") { + return true + } + if strings.Contains(msg, "tool_use") || strings.Contains(msg, "tool_result") || strings.Contains(msg, "tools") { + return true + } + + return false +} + +func extractUpstreamErrorMessage(body []byte) string { + // Claude 风格:{"type":"error","error":{"type":"...","message":"..."}} + if m := gjson.GetBytes(body, "error.message").String(); strings.TrimSpace(m) != "" { + inner := strings.TrimSpace(m) + // 有些上游会把完整 JSON 作为字符串塞进 message + if strings.HasPrefix(inner, "{") { + if innerMsg := gjson.Get(inner, "error.message").String(); strings.TrimSpace(innerMsg) != "" { + return innerMsg + } + } + return m + } + + // 兜底:尝试顶层 message + return gjson.GetBytes(body, "message").String() +} + +func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + // Enrich Ops error logs with upstream status + message, and optionally a truncated body snippet. + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + // 处理上游错误,标记账号状态 + shouldDisable := false + if s.rateLimitService != nil { + shouldDisable = s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) + } + if shouldDisable { + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + // 记录上游错误响应体摘要便于排障(可选:由配置控制;不回显到客户端) + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + + // 根据状态码返回适当的自定义错误响应(不透传上游详细信息) + var errType, errMsg string + var statusCode int + + switch resp.StatusCode { + case 400: + c.Data(http.StatusBadRequest, "application/json", body) + summary := upstreamMsg + if summary == "" { + summary = truncateForLog(body, 512) + } + if summary == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, summary) + case 401: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream authentication failed, please contact administrator" + case 403: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream access forbidden, please contact administrator" + case 429: + statusCode = http.StatusTooManyRequests + errType = "rate_limit_error" + errMsg = "Upstream rate limit exceeded, please retry later" + case 529: + statusCode = http.StatusServiceUnavailable + errType = "overloaded_error" + errMsg = "Upstream service overloaded, please retry later" + case 500, 502, 503, 504: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream service temporarily unavailable" + default: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream request failed" + } + + // 返回自定义错误响应 + c.JSON(statusCode, gin.H{ + "type": "error", + "error": gin.H{ + "type": errType, + "message": errMsg, + }, + }) + + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) +} + +func (s *GatewayService) handleRetryExhaustedSideEffects(ctx context.Context, resp *http.Response, account *Account) { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + statusCode := resp.StatusCode + + // OAuth/Setup Token 账号的 403:标记账号异常 + if account.IsOAuth() && statusCode == 403 { + s.rateLimitService.HandleUpstreamError(ctx, account, statusCode, resp.Header, body) + log.Printf("Account %d: marked as error after %d retries for status %d", account.ID, maxRetryAttempts, statusCode) + } else { + // API Key 未配置错误码:不标记账号状态 + log.Printf("Account %d: upstream error %d after %d retries (not marking account)", account.ID, statusCode, maxRetryAttempts) + } +} + +func (s *GatewayService) handleFailoverSideEffects(ctx context.Context, resp *http.Response, account *Account) { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) +} + +// handleRetryExhaustedError 处理重试耗尽后的错误 +// OAuth 403:标记账号异常 +// API Key 未配置错误码:仅返回错误,不标记账号 +func (s *GatewayService) handleRetryExhaustedError(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { + // Capture upstream error body before side-effects consume the stream. + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + s.handleRetryExhaustedSideEffects(ctx, resp, account) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry_exhausted", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream error %d retries_exhausted (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + + // 返回统一的重试耗尽错误响应 + c.JSON(http.StatusBadGateway, gin.H{ + "type": "error", + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream request failed after retries", + }, + }) + + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d (retries exhausted)", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d (retries exhausted) message=%s", resp.StatusCode, upstreamMsg) +} + +// streamingResult 流式响应结果 +type streamingResult struct { + usage *ClaudeUsage + firstTokenMs *int + clientDisconnect bool // 客户端是否在流式传输过程中断开 +} + +func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string) (*streamingResult, error) { + // 更新5h窗口状态 + s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) + + if s.cfg != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + } + + // 设置SSE响应头 + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + // 透传其他响应头 + if v := resp.Header.Get("x-request-id"); v != "" { + c.Header("x-request-id", v) + } + + w := c.Writer + flusher, ok := w.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + usage := &ClaudeUsage{} + var firstTokenMs *int + scanner := bufio.NewScanner(resp.Body) + // 设置更大的buffer以处理长行 + maxLineSize := defaultMaxLineSize + if s.cfg != nil && s.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + type scanEvent struct { + line string + err error + } + // 独立 goroutine 读取上游,避免读取阻塞导致超时/keepalive无法处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + streamInterval := time.Duration(0) + if s.cfg != nil && s.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + // 仅监控上游数据间隔超时,避免下游写入阻塞导致误判 + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + // 仅发送一次错误事件,避免多次写入导致协议混乱(写失败时尽力通知客户端) + errorEventSent := false + sendErrorEvent := func(reason string) { + if errorEventSent { + return + } + errorEventSent = true + _, _ = fmt.Fprintf(w, "event: error\ndata: {\"error\":\"%s\"}\n\n", reason) + flusher.Flush() + } + + needModelReplace := originalModel != mappedModel + clientDisconnected := false // 客户端断开标志,断开后继续读取上游以获取完整usage + + for { + select { + case ev, ok := <-events: + if !ok { + // 上游完成,返回结果 + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: clientDisconnected}, nil + } + if ev.err != nil { + // 检测 context 取消(客户端断开会导致 context 取消,进而影响上游读取) + if errors.Is(ev.err, context.Canceled) || errors.Is(ev.err, context.DeadlineExceeded) { + log.Printf("Context canceled during streaming, returning collected usage") + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + // 客户端已通过写入失败检测到断开,上游也出错了,返回已收集的 usage + if clientDisconnected { + log.Printf("Upstream read error after client disconnect: %v, returning collected usage", ev.err) + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + // 客户端未断开,正常的错误处理 + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, ev.err) + sendErrorEvent("response_too_large") + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, ev.err + } + sendErrorEvent("stream_read_error") + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", ev.err) + } + line := ev.line + if line == "event: error" { + // 上游返回错误事件,如果客户端已断开仍返回已收集的 usage + if clientDisconnected { + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + return nil, errors.New("have error in stream") + } + + // Extract data from SSE line (supports both "data: " and "data:" formats) + var data string + if sseDataRe.MatchString(line) { + data = sseDataRe.ReplaceAllString(line, "") + // 如果有模型映射,替换响应中的model字段 + if needModelReplace { + line = s.replaceModelInSSELine(line, mappedModel, originalModel) + } + } + + // 写入客户端(统一处理 data 行和非 data 行) + if !clientDisconnected { + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + clientDisconnected = true + log.Printf("Client disconnected during streaming, continuing to drain upstream for billing") + } else { + flusher.Flush() + } + } + + // 无论客户端是否断开,都解析 usage(仅对 data 行) + if data != "" { + if firstTokenMs == nil && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsage(data, usage) + } + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + if clientDisconnected { + // 客户端已断开,上游也超时了,返回已收集的 usage + log.Printf("Upstream timeout after client disconnect, returning collected usage") + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil + } + log.Printf("Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) + // 处理流超时,可能标记账户为临时不可调度或错误状态 + if s.rateLimitService != nil { + s.rateLimitService.HandleStreamTimeout(ctx, account, originalModel) + } + sendErrorEvent("stream_timeout") + return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") + } + } + +} + +// replaceModelInSSELine 替换SSE数据行中的model字段 +func (s *GatewayService) replaceModelInSSELine(line, fromModel, toModel string) string { + if !sseDataRe.MatchString(line) { + return line + } + data := sseDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + return line + } + + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return line + } + + // 只替换 message_start 事件中的 message.model + if event["type"] != "message_start" { + return line + } + + msg, ok := event["message"].(map[string]any) + if !ok { + return line + } + + model, ok := msg["model"].(string) + if !ok || model != fromModel { + return line + } + + msg["model"] = toModel + newData, err := json.Marshal(event) + if err != nil { + return line + } + + return "data: " + string(newData) +} + +func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { + // 解析message_start获取input tokens(标准Claude API格式) + var msgStart struct { + Type string `json:"type"` + Message struct { + Usage ClaudeUsage `json:"usage"` + } `json:"message"` + } + if json.Unmarshal([]byte(data), &msgStart) == nil && msgStart.Type == "message_start" { + usage.InputTokens = msgStart.Message.Usage.InputTokens + usage.CacheCreationInputTokens = msgStart.Message.Usage.CacheCreationInputTokens + usage.CacheReadInputTokens = msgStart.Message.Usage.CacheReadInputTokens + } + + // 解析message_delta获取tokens(兼容GLM等把所有usage放在delta中的API) + var msgDelta struct { + Type string `json:"type"` + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationInputTokens int `json:"cache_creation_input_tokens"` + CacheReadInputTokens int `json:"cache_read_input_tokens"` + } `json:"usage"` + } + if json.Unmarshal([]byte(data), &msgDelta) == nil && msgDelta.Type == "message_delta" { + // output_tokens 总是从 message_delta 获取 + usage.OutputTokens = msgDelta.Usage.OutputTokens + + // 如果 message_start 中没有值,则从 message_delta 获取(兼容GLM等API) + if usage.InputTokens == 0 { + usage.InputTokens = msgDelta.Usage.InputTokens + } + if usage.CacheCreationInputTokens == 0 { + usage.CacheCreationInputTokens = msgDelta.Usage.CacheCreationInputTokens + } + if usage.CacheReadInputTokens == 0 { + usage.CacheReadInputTokens = msgDelta.Usage.CacheReadInputTokens + } + } +} + +func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*ClaudeUsage, error) { + // 更新5h窗口状态 + s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + // 解析usage + var response struct { + Usage ClaudeUsage `json:"usage"` + } + if err := json.Unmarshal(body, &response); err != nil { + return nil, fmt.Errorf("parse response: %w", err) + } + + // 如果有模型映射,替换响应中的model字段 + if originalModel != mappedModel { + body = s.replaceModelInResponseBody(body, mappedModel, originalModel) + } + + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + + contentType := "application/json" + if s.cfg != nil && !s.cfg.Security.ResponseHeaders.Enabled { + if upstreamType := resp.Header.Get("Content-Type"); upstreamType != "" { + contentType = upstreamType + } + } + + // 写入响应 + c.Data(resp.StatusCode, contentType, body) + + return &response.Usage, nil +} + +// replaceModelInResponseBody 替换响应体中的model字段 +func (s *GatewayService) replaceModelInResponseBody(body []byte, fromModel, toModel string) []byte { + var resp map[string]any + if err := json.Unmarshal(body, &resp); err != nil { + return body + } + + model, ok := resp["model"].(string) + if !ok || model != fromModel { + return body + } + + resp["model"] = toModel + newBody, err := json.Marshal(resp) + if err != nil { + return body + } + + return newBody +} + +// RecordUsageInput 记录使用量的输入参数 +type RecordUsageInput struct { + Result *ForwardResult + APIKey *APIKey + User *User + Account *Account + Subscription *UserSubscription // 可选:订阅信息 + UserAgent string // 请求的 User-Agent + IPAddress string // 请求的客户端 IP 地址 +} + +// RecordUsage 记录使用量并扣费(或更新订阅用量) +func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInput) error { + result := input.Result + apiKey := input.APIKey + user := input.User + account := input.Account + subscription := input.Subscription + + // 获取费率倍数 + multiplier := s.cfg.Default.RateMultiplier + if apiKey.GroupID != nil && apiKey.Group != nil { + multiplier = apiKey.Group.RateMultiplier + } + + var cost *CostBreakdown + + // 根据请求类型选择计费方式 + if result.ImageCount > 0 { + // 图片生成计费 + var groupConfig *ImagePriceConfig + if apiKey.Group != nil { + groupConfig = &ImagePriceConfig{ + Price1K: apiKey.Group.ImagePrice1K, + Price2K: apiKey.Group.ImagePrice2K, + Price4K: apiKey.Group.ImagePrice4K, + } + } + cost = s.billingService.CalculateImageCost(result.Model, result.ImageSize, result.ImageCount, groupConfig, multiplier) + } else { + // Token 计费 + tokens := UsageTokens{ + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + } + var err error + cost, err = s.billingService.CalculateCost(result.Model, tokens, multiplier) + if err != nil { + log.Printf("Calculate cost failed: %v", err) + cost = &CostBreakdown{ActualCost: 0} + } + } + + // 判断计费方式:订阅模式 vs 余额模式 + isSubscriptionBilling := subscription != nil && apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + billingType := BillingTypeBalance + if isSubscriptionBilling { + billingType = BillingTypeSubscription + } + + // 创建使用日志 + durationMs := int(result.Duration.Milliseconds()) + var imageSize *string + if result.ImageSize != "" { + imageSize = &result.ImageSize + } + accountRateMultiplier := account.BillingRateMultiplier() + usageLog := &UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + AccountRateMultiplier: &accountRateMultiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + ImageCount: result.ImageCount, + ImageSize: imageSize, + CreatedAt: time.Now(), + } + + // 添加 UserAgent + if input.UserAgent != "" { + usageLog.UserAgent = &input.UserAgent + } + + // 添加 IPAddress + if input.IPAddress != "" { + usageLog.IPAddress = &input.IPAddress + } + + // 添加分组和订阅关联 + if apiKey.GroupID != nil { + usageLog.GroupID = apiKey.GroupID + } + if subscription != nil { + usageLog.SubscriptionID = &subscription.ID + } + + inserted, err := s.usageLogRepo.Create(ctx, usageLog) + if err != nil { + log.Printf("Create usage log failed: %v", err) + } + + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + log.Printf("[SIMPLE MODE] Usage recorded (not billed): user=%d, tokens=%d", usageLog.UserID, usageLog.TotalTokens()) + s.deferredService.ScheduleLastUsedUpdate(account.ID) + return nil + } + + shouldBill := inserted || err != nil + + // 根据计费类型执行扣费 + if isSubscriptionBilling { + // 订阅模式:更新订阅用量(使用 TotalCost 原始费用,不考虑倍率) + if shouldBill && cost.TotalCost > 0 { + if err := s.userSubRepo.IncrementUsage(ctx, subscription.ID, cost.TotalCost); err != nil { + log.Printf("Increment subscription usage failed: %v", err) + } + // 异步更新订阅缓存 + s.billingCacheService.QueueUpdateSubscriptionUsage(user.ID, *apiKey.GroupID, cost.TotalCost) + } + } else { + // 余额模式:扣除用户余额(使用 ActualCost 考虑倍率后的费用) + if shouldBill && cost.ActualCost > 0 { + if err := s.userRepo.DeductBalance(ctx, user.ID, cost.ActualCost); err != nil { + log.Printf("Deduct balance failed: %v", err) + } + // 异步更新余额缓存 + s.billingCacheService.QueueDeductBalance(user.ID, cost.ActualCost) + } + } + + // Schedule batch update for account last_used_at + s.deferredService.ScheduleLastUsedUpdate(account.ID) + + return nil +} + +// ForwardCountTokens 转发 count_tokens 请求到上游 API +// 特点:不记录使用量、仅支持非流式响应 +func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, account *Account, parsed *ParsedRequest) error { + if parsed == nil { + s.countTokensError(c, http.StatusBadRequest, "invalid_request_error", "Request body is empty") + return fmt.Errorf("parse request: empty request") + } + + body := parsed.Body + reqModel := parsed.Model + + // Antigravity 账户不支持 count_tokens 转发,直接返回空值 + if account.Platform == PlatformAntigravity { + c.JSON(http.StatusOK, gin.H{"input_tokens": 0}) + return nil + } + + // 应用模型映射(仅对 apikey 类型账号) + if account.Type == AccountTypeAPIKey { + if reqModel != "" { + mappedModel := account.GetMappedModel(reqModel) + if mappedModel != reqModel { + body = s.replaceModelInBody(body, mappedModel) + reqModel = mappedModel + log.Printf("CountTokens model mapping applied: %s -> %s (account: %s)", parsed.Model, mappedModel, account.Name) + } + } + } + + // 获取凭证 + token, tokenType, err := s.GetAccessToken(ctx, account) + if err != nil { + s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Failed to get access token") + return err + } + + // 构建上游请求 + upstreamReq, err := s.buildCountTokensRequest(ctx, c, account, body, token, tokenType, reqModel) + if err != nil { + s.countTokensError(c, http.StatusInternalServerError, "api_error", "Failed to build request") + return err + } + + // 获取代理URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 发送请求 + resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + setOpsUpstreamError(c, 0, sanitizeUpstreamErrorMessage(err.Error()), "") + s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Request failed") + return fmt.Errorf("upstream request failed: %w", err) + } + + // 读取响应体 + respBody, err := io.ReadAll(resp.Body) + _ = resp.Body.Close() + if err != nil { + s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Failed to read response") + return err + } + + // 检测 thinking block 签名错误(400)并重试一次(过滤 thinking blocks) + if resp.StatusCode == 400 && s.isThinkingBlockSignatureError(respBody) { + log.Printf("Account %d: detected thinking block signature error on count_tokens, retrying with filtered thinking blocks", account.ID) + + filteredBody := FilterThinkingBlocksForRetry(body) + retryReq, buildErr := s.buildCountTokensRequest(ctx, c, account, filteredBody, token, tokenType, reqModel) + if buildErr == nil { + retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) + if retryErr == nil { + resp = retryResp + respBody, err = io.ReadAll(resp.Body) + _ = resp.Body.Close() + if err != nil { + s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Failed to read response") + return err + } + } + } + } + + // 处理错误响应 + if resp.StatusCode >= 400 { + // 标记账号状态(429/529等) + s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + + // 记录上游错误摘要便于排障(不回显请求内容) + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "count_tokens upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + + // 返回简化的错误响应 + errMsg := "Upstream request failed" + switch resp.StatusCode { + case 429: + errMsg = "Rate limit exceeded" + case 529: + errMsg = "Service overloaded" + } + s.countTokensError(c, resp.StatusCode, "upstream_error", errMsg) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) + } + + // 透传成功响应 + c.Data(resp.StatusCode, "application/json", respBody) + return nil +} + +// buildCountTokensRequest 构建 count_tokens 上游请求 +func (s *GatewayService) buildCountTokensRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token, tokenType, modelID string) (*http.Request, error) { + // 确定目标 URL + targetURL := claudeAPICountTokensURL + if account.Type == AccountTypeAPIKey { + baseURL := account.GetBaseURL() + if baseURL != "" { + validatedURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + targetURL = validatedURL + "/v1/messages/count_tokens" + } + } + + // OAuth 账号:应用统一指纹和重写 userID + if account.IsOAuth() && s.identityService != nil { + fp, err := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + if err == nil { + accountUUID := account.GetExtraString("account_uuid") + if accountUUID != "" && fp.ClientID != "" { + if newBody, err := s.identityService.RewriteUserID(body, account.ID, accountUUID, fp.ClientID); err == nil && len(newBody) > 0 { + body = newBody + } + } + } + } + + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + // 设置认证头 + if tokenType == "oauth" { + req.Header.Set("authorization", "Bearer "+token) + } else { + req.Header.Set("x-api-key", token) + } + + // 白名单透传 headers + for key, values := range c.Request.Header { + lowerKey := strings.ToLower(key) + if allowedHeaders[lowerKey] { + for _, v := range values { + req.Header.Add(key, v) + } + } + } + + // OAuth 账号:应用指纹到请求头 + if account.IsOAuth() && s.identityService != nil { + fp, _ := s.identityService.GetOrCreateFingerprint(ctx, account.ID, c.Request.Header) + if fp != nil { + s.identityService.ApplyFingerprint(req, fp) + } + } + + // 确保必要的 headers 存在 + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") + } + if req.Header.Get("anthropic-version") == "" { + req.Header.Set("anthropic-version", "2023-06-01") + } + + // OAuth 账号:处理 anthropic-beta header + if tokenType == "oauth" { + req.Header.Set("anthropic-beta", s.getBetaHeader(modelID, c.GetHeader("anthropic-beta"))) + } else if s.cfg != nil && s.cfg.Gateway.InjectBetaForAPIKey && req.Header.Get("anthropic-beta") == "" { + // API-key:与 messages 同步的按需 beta 注入(默认关闭) + if requestNeedsBetaFeatures(body) { + if beta := defaultAPIKeyBetaHeader(body); beta != "" { + req.Header.Set("anthropic-beta", beta) + } + } + } + + return req, nil +} + +// countTokensError 返回 count_tokens 错误响应 +func (s *GatewayService) countTokensError(c *gin.Context, status int, errType, message string) { + c.JSON(status, gin.H{ + "type": "error", + "error": gin.H{ + "type": errType, + "message": message, + }, + }) +} + +func (s *GatewayService) validateUpstreamBaseURL(raw string) (string, error) { + if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { + normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.cfg.Security.URLAllowlist.UpstreamHosts, + RequireAllowlist: true, + AllowPrivate: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil +} + +// GetAvailableModels returns the list of models available for a group +// It aggregates model_mapping keys from all schedulable accounts in the group +func (s *GatewayService) GetAvailableModels(ctx context.Context, groupID *int64, platform string) []string { + var accounts []Account + var err error + + if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupID(ctx, *groupID) + } else { + accounts, err = s.accountRepo.ListSchedulable(ctx) + } + + if err != nil || len(accounts) == 0 { + return nil + } + + // Filter by platform if specified + if platform != "" { + filtered := make([]Account, 0) + for _, acc := range accounts { + if acc.Platform == platform { + filtered = append(filtered, acc) + } + } + accounts = filtered + } + + // Collect unique models from all accounts + modelSet := make(map[string]struct{}) + hasAnyMapping := false + + for _, acc := range accounts { + mapping := acc.GetModelMapping() + if len(mapping) > 0 { + hasAnyMapping = true + for model := range mapping { + modelSet[model] = struct{}{} + } + } + } + + // If no account has model_mapping, return nil (use default) + if !hasAnyMapping { + return nil + } + + // Convert to slice + models := make([]string, 0, len(modelSet)) + for model := range modelSet { + models = append(models, model) + } + + return models +} diff --git a/backend/internal/service/gateway_service_benchmark_test.go b/backend/internal/service/gateway_service_benchmark_test.go new file mode 100644 index 00000000..f15a85d6 --- /dev/null +++ b/backend/internal/service/gateway_service_benchmark_test.go @@ -0,0 +1,50 @@ +package service + +import ( + "strconv" + "testing" +) + +var benchmarkStringSink string + +// BenchmarkGenerateSessionHash_Metadata 关注 JSON 解析与正则匹配开销。 +func BenchmarkGenerateSessionHash_Metadata(b *testing.B) { + svc := &GatewayService{} + body := []byte(`{"metadata":{"user_id":"session_123e4567-e89b-12d3-a456-426614174000"},"messages":[{"content":"hello"}]}`) + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + parsed, err := ParseGatewayRequest(body) + if err != nil { + b.Fatalf("解析请求失败: %v", err) + } + benchmarkStringSink = svc.GenerateSessionHash(parsed) + } +} + +// BenchmarkExtractCacheableContent_System 关注字符串拼接路径的性能。 +func BenchmarkExtractCacheableContent_System(b *testing.B) { + svc := &GatewayService{} + req := buildSystemCacheableRequest(12) + + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkStringSink = svc.extractCacheableContent(req) + } +} + +func buildSystemCacheableRequest(parts int) *ParsedRequest { + systemParts := make([]any, 0, parts) + for i := 0; i < parts; i++ { + systemParts = append(systemParts, map[string]any{ + "text": "system_part_" + strconv.Itoa(i), + "cache_control": map[string]any{ + "type": "ephemeral", + }, + }) + } + return &ParsedRequest{ + System: systemParts, + HasSystem: true, + } +} diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go new file mode 100644 index 00000000..75de90f2 --- /dev/null +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -0,0 +1,2818 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "math" + mathrand "math/rand" + "net/http" + "regexp" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" + "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + + "github.com/gin-gonic/gin" +) + +const geminiStickySessionTTL = time.Hour + +const ( + geminiMaxRetries = 5 + geminiRetryBaseDelay = 1 * time.Second + geminiRetryMaxDelay = 16 * time.Second +) + +type GeminiMessagesCompatService struct { + accountRepo AccountRepository + groupRepo GroupRepository + cache GatewayCache + schedulerSnapshot *SchedulerSnapshotService + tokenProvider *GeminiTokenProvider + rateLimitService *RateLimitService + httpUpstream HTTPUpstream + antigravityGatewayService *AntigravityGatewayService + cfg *config.Config +} + +func NewGeminiMessagesCompatService( + accountRepo AccountRepository, + groupRepo GroupRepository, + cache GatewayCache, + schedulerSnapshot *SchedulerSnapshotService, + tokenProvider *GeminiTokenProvider, + rateLimitService *RateLimitService, + httpUpstream HTTPUpstream, + antigravityGatewayService *AntigravityGatewayService, + cfg *config.Config, +) *GeminiMessagesCompatService { + return &GeminiMessagesCompatService{ + accountRepo: accountRepo, + groupRepo: groupRepo, + cache: cache, + schedulerSnapshot: schedulerSnapshot, + tokenProvider: tokenProvider, + rateLimitService: rateLimitService, + httpUpstream: httpUpstream, + antigravityGatewayService: antigravityGatewayService, + cfg: cfg, + } +} + +// GetTokenProvider returns the token provider for OAuth accounts +func (s *GeminiMessagesCompatService) GetTokenProvider() *GeminiTokenProvider { + return s.tokenProvider +} + +func (s *GeminiMessagesCompatService) SelectAccountForModel(ctx context.Context, groupID *int64, sessionHash string, requestedModel string) (*Account, error) { + return s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, nil) +} + +func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { + // 优先检查 context 中的强制平台(/antigravity 路由) + var platform string + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform != "" { + platform = forcePlatform + } else if groupID != nil { + // 根据分组 platform 决定查询哪种账号 + var group *Group + if ctxGroup, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(ctxGroup) && ctxGroup.ID == *groupID { + group = ctxGroup + } else { + var err error + group, err = s.groupRepo.GetByIDLite(ctx, *groupID) + if err != nil { + return nil, fmt.Errorf("get group failed: %w", err) + } + } + platform = group.Platform + } else { + // 无分组时只使用原生 gemini 平台 + platform = PlatformGemini + } + + // gemini 分组支持混合调度(包含启用了 mixed_scheduling 的 antigravity 账户) + // 注意:强制平台模式不走混合调度 + useMixedScheduling := platform == PlatformGemini && !hasForcePlatform + + cacheKey := "gemini:" + sessionHash + + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + if err == nil && accountID > 0 { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + // 检查账号是否有效:原生平台直接匹配,antigravity 需要启用混合调度 + if err == nil && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + valid := false + if account.Platform == platform { + valid = true + } else if useMixedScheduling && account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() { + valid = true + } + if valid { + usable := true + if s.rateLimitService != nil && requestedModel != "" { + ok, err := s.rateLimitService.PreCheckUsage(ctx, account, requestedModel) + if err != nil { + log.Printf("[Gemini PreCheck] Account %d precheck error: %v", account.ID, err) + } + if !ok { + usable = false + } + } + if usable { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), cacheKey, geminiStickySessionTTL) + return account, nil + } + } + } + } + } + } + + // 查询可调度账户(强制平台模式:优先按分组查找,找不到再查全部) + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + // 强制平台模式下,分组中找不到账户时回退查询全部 + if len(accounts) == 0 && groupID != nil && hasForcePlatform { + accounts, err = s.listSchedulableAccountsOnce(ctx, nil, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + } + + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // 混合调度模式下:原生平台直接通过,antigravity 需要启用 mixed_scheduling + // 非混合调度模式(antigravity 分组):不需要过滤 + if useMixedScheduling && acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + if s.rateLimitService != nil && requestedModel != "" { + ok, err := s.rateLimitService.PreCheckUsage(ctx, acc, requestedModel) + if err != nil { + log.Printf("[Gemini PreCheck] Account %d precheck error: %v", acc.ID, err) + } + if !ok { + continue + } + } + if selected == nil { + selected = acc + continue + } + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + // Prefer OAuth accounts when both are unused (more compatible for Code Assist flows). + if acc.Type == AccountTypeOAuth && selected.Type != AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected == nil { + if requestedModel != "" { + return nil, fmt.Errorf("no available Gemini accounts supporting model: %s", requestedModel) + } + return nil, errors.New("no available Gemini accounts") + } + + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), cacheKey, selected.ID, geminiStickySessionTTL) + } + + return selected, nil +} + +// isModelSupportedByAccount 根据账户平台检查模型支持 +func (s *GeminiMessagesCompatService) isModelSupportedByAccount(account *Account, requestedModel string) bool { + if account.Platform == PlatformAntigravity { + return IsAntigravityModelSupported(requestedModel) + } + return account.IsModelSupported(requestedModel) +} + +// GetAntigravityGatewayService 返回 AntigravityGatewayService +func (s *GeminiMessagesCompatService) GetAntigravityGatewayService() *AntigravityGatewayService { + return s.antigravityGatewayService +} + +func (s *GeminiMessagesCompatService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + +func (s *GeminiMessagesCompatService) listSchedulableAccountsOnce(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, error) { + if s.schedulerSnapshot != nil { + accounts, _, err := s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + return accounts, err + } + + useMixedScheduling := platform == PlatformGemini && !hasForcePlatform + queryPlatforms := []string{platform} + if useMixedScheduling { + queryPlatforms = []string{platform, PlatformAntigravity} + } + + if groupID != nil { + return s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, queryPlatforms) + } + return s.accountRepo.ListSchedulableByPlatforms(ctx, queryPlatforms) +} + +func (s *GeminiMessagesCompatService) validateUpstreamBaseURL(raw string) (string, error) { + if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { + normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.cfg.Security.URLAllowlist.UpstreamHosts, + RequireAllowlist: true, + AllowPrivate: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil +} + +// HasAntigravityAccounts 检查是否有可用的 antigravity 账户 +func (s *GeminiMessagesCompatService) HasAntigravityAccounts(ctx context.Context, groupID *int64) (bool, error) { + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, PlatformAntigravity, false) + if err != nil { + return false, err + } + return len(accounts) > 0, nil +} + +// SelectAccountForAIStudioEndpoints selects an account that is likely to succeed against +// generativelanguage.googleapis.com (e.g. GET /v1beta/models). +// +// Preference order: +// 1) API key accounts (AI Studio) +// 2) OAuth accounts without project_id (AI Studio OAuth) +// 3) OAuth accounts explicitly marked as ai_studio +// 4) Any remaining Gemini accounts (fallback) +func (s *GeminiMessagesCompatService) SelectAccountForAIStudioEndpoints(ctx context.Context, groupID *int64) (*Account, error) { + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, PlatformGemini, true) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + if len(accounts) == 0 { + return nil, errors.New("no available Gemini accounts") + } + + rank := func(a *Account) int { + if a == nil { + return 999 + } + switch a.Type { + case AccountTypeAPIKey: + if strings.TrimSpace(a.GetCredential("api_key")) != "" { + return 0 + } + return 9 + case AccountTypeOAuth: + if strings.TrimSpace(a.GetCredential("project_id")) == "" { + return 1 + } + if strings.TrimSpace(a.GetCredential("oauth_type")) == "ai_studio" { + return 2 + } + // Code Assist OAuth tokens often lack AI Studio scopes for models listing. + return 3 + default: + return 10 + } + } + + var selected *Account + for i := range accounts { + acc := &accounts[i] + if selected == nil { + selected = acc + continue + } + + r1, r2 := rank(acc), rank(selected) + if r1 < r2 { + selected = acc + continue + } + if r1 > r2 { + continue + } + + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + if acc.Type == AccountTypeOAuth && selected.Type != AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected == nil { + return nil, errors.New("no available Gemini accounts") + } + return selected, nil +} + +func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Context, account *Account, body []byte) (*ForwardResult, error) { + startTime := time.Now() + + var req struct { + Model string `json:"model"` + Stream bool `json:"stream"` + } + if err := json.Unmarshal(body, &req); err != nil { + return nil, fmt.Errorf("parse request: %w", err) + } + if strings.TrimSpace(req.Model) == "" { + return nil, fmt.Errorf("missing model") + } + + originalModel := req.Model + mappedModel := req.Model + if account.Type == AccountTypeAPIKey { + mappedModel = account.GetMappedModel(req.Model) + } + + geminiReq, err := convertClaudeMessagesToGeminiGenerateContent(body) + if err != nil { + return nil, s.writeClaudeError(c, http.StatusBadRequest, "invalid_request_error", err.Error()) + } + originalClaudeBody := body + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + var requestIDHeader string + var buildReq func(ctx context.Context) (*http.Request, string, error) + useUpstreamStream := req.Stream + if account.Type == AccountTypeOAuth && !req.Stream && strings.TrimSpace(account.GetCredential("project_id")) != "" { + // Code Assist's non-streaming generateContent may return no content; use streaming upstream and aggregate. + useUpstreamStream = true + } + + switch account.Type { + case AccountTypeAPIKey: + buildReq = func(ctx context.Context) (*http.Request, string, error) { + apiKey := account.GetCredential("api_key") + if strings.TrimSpace(apiKey) == "" { + return nil, "", errors.New("gemini api_key not configured") + } + + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, "", err + } + + action := "generateContent" + if req.Stream { + action = "streamGenerateContent" + } + fullURL := fmt.Sprintf("%s/v1beta/models/%s:%s", strings.TrimRight(normalizedBaseURL, "/"), mappedModel, action) + if req.Stream { + fullURL += "?alt=sse" + } + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(geminiReq)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("x-goog-api-key", apiKey) + return upstreamReq, "x-request-id", nil + } + requestIDHeader = "x-request-id" + + case AccountTypeOAuth: + buildReq = func(ctx context.Context) (*http.Request, string, error) { + if s.tokenProvider == nil { + return nil, "", errors.New("gemini token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, "", err + } + + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + action := "generateContent" + if useUpstreamStream { + action = "streamGenerateContent" + } + + // Two modes for OAuth: + // 1. With project_id -> Code Assist API (wrapped request) + // 2. Without project_id -> AI Studio API (direct OAuth, like API key but with Bearer token) + if projectID != "" { + // Mode 1: Code Assist API + baseURL, err := s.validateUpstreamBaseURL(geminicli.GeminiCliBaseURL) + if err != nil { + return nil, "", err + } + fullURL := fmt.Sprintf("%s/v1internal:%s", strings.TrimRight(baseURL, "/"), action) + if useUpstreamStream { + fullURL += "?alt=sse" + } + + wrapped := map[string]any{ + "model": mappedModel, + "project": projectID, + } + var inner any + if err := json.Unmarshal(geminiReq, &inner); err != nil { + return nil, "", fmt.Errorf("failed to parse gemini request: %w", err) + } + wrapped["request"] = inner + wrappedBytes, _ := json.Marshal(wrapped) + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(wrappedBytes)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("Authorization", "Bearer "+accessToken) + upstreamReq.Header.Set("User-Agent", geminicli.GeminiCLIUserAgent) + return upstreamReq, "x-request-id", nil + } else { + // Mode 2: AI Studio API with OAuth (like API key mode, but using Bearer token) + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, "", err + } + + fullURL := fmt.Sprintf("%s/v1beta/models/%s:%s", strings.TrimRight(normalizedBaseURL, "/"), mappedModel, action) + if useUpstreamStream { + fullURL += "?alt=sse" + } + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(geminiReq)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("Authorization", "Bearer "+accessToken) + return upstreamReq, "x-request-id", nil + } + } + requestIDHeader = "x-request-id" + + default: + return nil, fmt.Errorf("unsupported account type: %s", account.Type) + } + + var resp *http.Response + signatureRetryStage := 0 + for attempt := 1; attempt <= geminiMaxRetries; attempt++ { + upstreamReq, idHeader, err := buildReq(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + // Local build error: don't retry. + if strings.Contains(err.Error(), "missing project_id") { + return nil, s.writeClaudeError(c, http.StatusBadRequest, "invalid_request_error", err.Error()) + } + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", err.Error()) + } + requestIDHeader = idHeader + + // Capture upstream request body for ops retry of this attempt. + if c != nil { + // In this code path `body` is already the JSON sent to upstream. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + if attempt < geminiMaxRetries { + log.Printf("Gemini account %d: upstream request failed, retry %d/%d: %v", account.ID, attempt, geminiMaxRetries, err) + sleepGeminiBackoff(attempt) + continue + } + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries: "+safeErr) + } + + // Special-case: signature/thought_signature validation errors are not transient, but may be fixed by + // downgrading Claude thinking/tool history to plain text (conservative two-stage retry). + if resp.StatusCode == http.StatusBadRequest && signatureRetryStage < 2 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + + if isGeminiSignatureRelatedError(respBody) { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "signature_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + var strippedClaudeBody []byte + stageName := "" + switch signatureRetryStage { + case 0: + // Stage 1: disable thinking + thinking->text + strippedClaudeBody = FilterThinkingBlocksForRetry(originalClaudeBody) + stageName = "thinking-only" + signatureRetryStage = 1 + default: + // Stage 2: additionally downgrade tool_use/tool_result blocks to text + strippedClaudeBody = FilterSignatureSensitiveBlocksForRetry(originalClaudeBody) + stageName = "thinking+tools" + signatureRetryStage = 2 + } + retryGeminiReq, txErr := convertClaudeMessagesToGeminiGenerateContent(strippedClaudeBody) + if txErr == nil { + log.Printf("Gemini account %d: detected signature-related 400, retrying with downgraded Claude blocks (%s)", account.ID, stageName) + geminiReq = retryGeminiReq + // Consume one retry budget attempt and continue with the updated request payload. + sleepGeminiBackoff(1) + continue + } + } + + // Restore body for downstream error handling. + resp = &http.Response{ + StatusCode: http.StatusBadRequest, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break + } + + if resp.StatusCode >= 400 && s.shouldRetryGeminiUpstreamError(account, resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + // Don't treat insufficient-scope as transient. + if resp.StatusCode == 403 && isGeminiInsufficientScope(resp.Header, respBody) { + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break + } + if resp.StatusCode == 429 { + // Mark as rate-limited early so concurrent requests avoid this account. + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + } + if attempt < geminiMaxRetries { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + log.Printf("Gemini account %d: upstream status %d, retry %d/%d", account.ID, resp.StatusCode, attempt, geminiMaxRetries) + sleepGeminiBackoff(attempt) + continue + } + // Final attempt: surface the upstream error body (mapped below) instead of a generic retry error. + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break + } + + break + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + tempMatched := false + if s.rateLimitService != nil { + tempMatched = s.rateLimitService.HandleTempUnschedulable(ctx, account, resp.StatusCode, respBody) + } + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + if tempMatched { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + return nil, s.writeGeminiMappedError(c, account, resp.StatusCode, upstreamReqID, respBody) + } + + requestID := resp.Header.Get(requestIDHeader) + if requestID == "" { + requestID = resp.Header.Get("x-goog-request-id") + } + if requestID != "" { + c.Header("x-request-id", requestID) + } + + var usage *ClaudeUsage + var firstTokenMs *int + if req.Stream { + streamRes, err := s.handleStreamingResponse(c, resp, startTime, originalModel) + if err != nil { + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } else { + if useUpstreamStream { + collected, usageObj, err := collectGeminiSSE(resp.Body, true) + if err != nil { + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Failed to read upstream stream") + } + claudeResp, usageObj2 := convertGeminiToClaudeMessage(collected, originalModel) + c.JSON(http.StatusOK, claudeResp) + usage = usageObj2 + if usageObj != nil && (usageObj.InputTokens > 0 || usageObj.OutputTokens > 0) { + usage = usageObj + } + } else { + usage, err = s.handleNonStreamingResponse(c, resp, originalModel) + if err != nil { + return nil, err + } + } + } + + return &ForwardResult{ + RequestID: requestID, + Usage: *usage, + Model: originalModel, + Stream: req.Stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func isGeminiSignatureRelatedError(respBody []byte) bool { + msg := strings.ToLower(strings.TrimSpace(extractAntigravityErrorMessage(respBody))) + if msg == "" { + msg = strings.ToLower(string(respBody)) + } + return strings.Contains(msg, "thought_signature") || strings.Contains(msg, "signature") +} + +func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.Context, account *Account, originalModel string, action string, stream bool, body []byte) (*ForwardResult, error) { + startTime := time.Now() + + if strings.TrimSpace(originalModel) == "" { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Missing model in URL") + } + if strings.TrimSpace(action) == "" { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Missing action in URL") + } + if len(body) == 0 { + return nil, s.writeGoogleError(c, http.StatusBadRequest, "Request body is empty") + } + + switch action { + case "generateContent", "streamGenerateContent", "countTokens": + // ok + default: + return nil, s.writeGoogleError(c, http.StatusNotFound, "Unsupported action: "+action) + } + + mappedModel := originalModel + if account.Type == AccountTypeAPIKey { + mappedModel = account.GetMappedModel(originalModel) + } + + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + useUpstreamStream := stream + upstreamAction := action + if account.Type == AccountTypeOAuth && !stream && action == "generateContent" && strings.TrimSpace(account.GetCredential("project_id")) != "" { + // Code Assist's non-streaming generateContent may return no content; use streaming upstream and aggregate. + useUpstreamStream = true + upstreamAction = "streamGenerateContent" + } + forceAIStudio := action == "countTokens" + + var requestIDHeader string + var buildReq func(ctx context.Context) (*http.Request, string, error) + + switch account.Type { + case AccountTypeAPIKey: + buildReq = func(ctx context.Context) (*http.Request, string, error) { + apiKey := account.GetCredential("api_key") + if strings.TrimSpace(apiKey) == "" { + return nil, "", errors.New("gemini api_key not configured") + } + + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, "", err + } + + fullURL := fmt.Sprintf("%s/v1beta/models/%s:%s", strings.TrimRight(normalizedBaseURL, "/"), mappedModel, upstreamAction) + if useUpstreamStream { + fullURL += "?alt=sse" + } + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(body)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("x-goog-api-key", apiKey) + return upstreamReq, "x-request-id", nil + } + requestIDHeader = "x-request-id" + + case AccountTypeOAuth: + buildReq = func(ctx context.Context) (*http.Request, string, error) { + if s.tokenProvider == nil { + return nil, "", errors.New("gemini token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, "", err + } + + projectID := strings.TrimSpace(account.GetCredential("project_id")) + + // Two modes for OAuth: + // 1. With project_id -> Code Assist API (wrapped request) + // 2. Without project_id -> AI Studio API (direct OAuth, like API key but with Bearer token) + if projectID != "" && !forceAIStudio { + // Mode 1: Code Assist API + baseURL, err := s.validateUpstreamBaseURL(geminicli.GeminiCliBaseURL) + if err != nil { + return nil, "", err + } + fullURL := fmt.Sprintf("%s/v1internal:%s", strings.TrimRight(baseURL, "/"), upstreamAction) + if useUpstreamStream { + fullURL += "?alt=sse" + } + + wrapped := map[string]any{ + "model": mappedModel, + "project": projectID, + } + var inner any + if err := json.Unmarshal(body, &inner); err != nil { + return nil, "", fmt.Errorf("failed to parse gemini request: %w", err) + } + wrapped["request"] = inner + wrappedBytes, _ := json.Marshal(wrapped) + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(wrappedBytes)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("Authorization", "Bearer "+accessToken) + upstreamReq.Header.Set("User-Agent", geminicli.GeminiCLIUserAgent) + return upstreamReq, "x-request-id", nil + } else { + // Mode 2: AI Studio API with OAuth (like API key mode, but using Bearer token) + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, "", err + } + + fullURL := fmt.Sprintf("%s/v1beta/models/%s:%s", strings.TrimRight(normalizedBaseURL, "/"), mappedModel, upstreamAction) + if useUpstreamStream { + fullURL += "?alt=sse" + } + + upstreamReq, err := http.NewRequestWithContext(ctx, http.MethodPost, fullURL, bytes.NewReader(body)) + if err != nil { + return nil, "", err + } + upstreamReq.Header.Set("Content-Type", "application/json") + upstreamReq.Header.Set("Authorization", "Bearer "+accessToken) + return upstreamReq, "x-request-id", nil + } + } + requestIDHeader = "x-request-id" + + default: + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Unsupported account type: "+account.Type) + } + + var resp *http.Response + for attempt := 1; attempt <= geminiMaxRetries; attempt++ { + upstreamReq, idHeader, err := buildReq(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err + } + // Local build error: don't retry. + if strings.Contains(err.Error(), "missing project_id") { + return nil, s.writeGoogleError(c, http.StatusBadRequest, err.Error()) + } + return nil, s.writeGoogleError(c, http.StatusBadGateway, err.Error()) + } + requestIDHeader = idHeader + + // Capture upstream request body for ops retry of this attempt. + if c != nil { + // In this code path `body` is already the JSON sent to upstream. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + if attempt < geminiMaxRetries { + log.Printf("Gemini account %d: upstream request failed, retry %d/%d: %v", account.ID, attempt, geminiMaxRetries, err) + sleepGeminiBackoff(attempt) + continue + } + if action == "countTokens" { + estimated := estimateGeminiCountTokens(body) + c.JSON(http.StatusOK, map[string]any{"totalTokens": estimated}) + return &ForwardResult{ + RequestID: "", + Usage: ClaudeUsage{}, + Model: originalModel, + Stream: false, + Duration: time.Since(startTime), + FirstTokenMs: nil, + }, nil + } + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries: "+safeErr) + } + + if resp.StatusCode >= 400 && s.shouldRetryGeminiUpstreamError(account, resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + // Don't treat insufficient-scope as transient. + if resp.StatusCode == 403 && isGeminiInsufficientScope(resp.Header, respBody) { + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break + } + if resp.StatusCode == 429 { + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + } + if attempt < geminiMaxRetries { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + log.Printf("Gemini account %d: upstream status %d, retry %d/%d", account.ID, resp.StatusCode, attempt, geminiMaxRetries) + sleepGeminiBackoff(attempt) + continue + } + if action == "countTokens" { + estimated := estimateGeminiCountTokens(body) + c.JSON(http.StatusOK, map[string]any{"totalTokens": estimated}) + return &ForwardResult{ + RequestID: "", + Usage: ClaudeUsage{}, + Model: originalModel, + Stream: false, + Duration: time.Since(startTime), + FirstTokenMs: nil, + }, nil + } + // Final attempt: surface the upstream error body (passed through below) instead of a generic retry error. + resp = &http.Response{ + StatusCode: resp.StatusCode, + Header: resp.Header.Clone(), + Body: io.NopCloser(bytes.NewReader(respBody)), + } + break + } + + break + } + defer func() { _ = resp.Body.Close() }() + + requestID := resp.Header.Get(requestIDHeader) + if requestID == "" { + requestID = resp.Header.Get("x-goog-request-id") + } + if requestID != "" { + c.Header("x-request-id", requestID) + } + + isOAuth := account.Type == AccountTypeOAuth + + if resp.StatusCode >= 400 { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + tempMatched := false + if s.rateLimitService != nil { + tempMatched = s.rateLimitService.HandleTempUnschedulable(ctx, account, resp.StatusCode, respBody) + } + s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + + // Best-effort fallback for OAuth tokens missing AI Studio scopes when calling countTokens. + // This avoids Gemini SDKs failing hard during preflight token counting. + if action == "countTokens" && isOAuth && isGeminiInsufficientScope(resp.Header, respBody) { + estimated := estimateGeminiCountTokens(body) + c.JSON(http.StatusOK, map[string]any{"totalTokens": estimated}) + return &ForwardResult{ + RequestID: requestID, + Usage: ClaudeUsage{}, + Model: originalModel, + Stream: false, + Duration: time.Since(startTime), + FirstTokenMs: nil, + }, nil + } + + if tempMatched { + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + respBody = unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + log.Printf("[Gemini] native upstream error %d: %s", resp.StatusCode, truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes)) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/json" + } + c.Data(resp.StatusCode, contentType, respBody) + if upstreamMsg == "" { + return nil, fmt.Errorf("gemini upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("gemini upstream error: %d message=%s", resp.StatusCode, upstreamMsg) + } + + var usage *ClaudeUsage + var firstTokenMs *int + + if stream { + streamRes, err := s.handleNativeStreamingResponse(c, resp, startTime, isOAuth) + if err != nil { + return nil, err + } + usage = streamRes.usage + firstTokenMs = streamRes.firstTokenMs + } else { + if useUpstreamStream { + collected, usageObj, err := collectGeminiSSE(resp.Body, isOAuth) + if err != nil { + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Failed to read upstream stream") + } + b, _ := json.Marshal(collected) + c.Data(http.StatusOK, "application/json", b) + usage = usageObj + } else { + usageResp, err := s.handleNativeNonStreamingResponse(c, resp, isOAuth) + if err != nil { + return nil, err + } + usage = usageResp + } + } + + if usage == nil { + usage = &ClaudeUsage{} + } + + return &ForwardResult{ + RequestID: requestID, + Usage: *usage, + Model: originalModel, + Stream: stream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func (s *GeminiMessagesCompatService) shouldRetryGeminiUpstreamError(account *Account, statusCode int) bool { + switch statusCode { + case 429, 500, 502, 503, 504, 529: + return true + case 403: + // GeminiCli OAuth occasionally returns 403 transiently (activation/quota propagation); allow retry. + if account == nil || account.Type != AccountTypeOAuth { + return false + } + oauthType := strings.ToLower(strings.TrimSpace(account.GetCredential("oauth_type"))) + if oauthType == "" && strings.TrimSpace(account.GetCredential("project_id")) != "" { + // Legacy/implicit Code Assist OAuth accounts. + oauthType = "code_assist" + } + return oauthType == "code_assist" + default: + return false + } +} + +func (s *GeminiMessagesCompatService) shouldFailoverGeminiUpstreamError(statusCode int) bool { + switch statusCode { + case 401, 403, 429, 529: + return true + default: + return statusCode >= 500 + } +} + +func sleepGeminiBackoff(attempt int) { + delay := geminiRetryBaseDelay * time.Duration(1< geminiRetryMaxDelay { + delay = geminiRetryMaxDelay + } + + // +/- 20% jitter + r := mathrand.New(mathrand.NewSource(time.Now().UnixNano())) + jitter := time.Duration(float64(delay) * 0.2 * (r.Float64()*2 - 1)) + sleepFor := delay + jitter + if sleepFor < 0 { + sleepFor = 0 + } + time.Sleep(sleepFor) +} + +var ( + sensitiveQueryParamRegex = regexp.MustCompile(`(?i)([?&](?:key|client_secret|access_token|refresh_token)=)[^&"\s]+`) + retryInRegex = regexp.MustCompile(`Please retry in ([0-9.]+)s`) +) + +func sanitizeUpstreamErrorMessage(msg string) string { + if msg == "" { + return msg + } + return sensitiveQueryParamRegex.ReplaceAllString(msg, `$1***`) +} + +func (s *GeminiMessagesCompatService) writeGeminiMappedError(c *gin.Context, account *Account, upstreamStatus int, upstreamRequestID string, body []byte) error { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, upstreamStatus, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: upstreamStatus, + UpstreamRequestID: upstreamRequestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf("[Gemini] upstream error %d: %s", upstreamStatus, truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes)) + } + + var statusCode int + var errType, errMsg string + + if mapped := mapGeminiErrorBodyToClaudeError(body); mapped != nil { + errType = mapped.Type + if mapped.Message != "" { + errMsg = mapped.Message + } + if mapped.StatusCode > 0 { + statusCode = mapped.StatusCode + } + } + + switch upstreamStatus { + case 400: + if statusCode == 0 { + statusCode = http.StatusBadRequest + } + if errType == "" { + errType = "invalid_request_error" + } + if errMsg == "" { + errMsg = "Invalid request" + } + case 401: + if statusCode == 0 { + statusCode = http.StatusBadGateway + } + if errType == "" { + errType = "authentication_error" + } + if errMsg == "" { + errMsg = "Upstream authentication failed, please contact administrator" + } + case 403: + if statusCode == 0 { + statusCode = http.StatusBadGateway + } + if errType == "" { + errType = "permission_error" + } + if errMsg == "" { + errMsg = "Upstream access forbidden, please contact administrator" + } + case 404: + if statusCode == 0 { + statusCode = http.StatusNotFound + } + if errType == "" { + errType = "not_found_error" + } + if errMsg == "" { + errMsg = "Resource not found" + } + case 429: + if statusCode == 0 { + statusCode = http.StatusTooManyRequests + } + if errType == "" { + errType = "rate_limit_error" + } + if errMsg == "" { + errMsg = "Upstream rate limit exceeded, please retry later" + } + case 529: + if statusCode == 0 { + statusCode = http.StatusServiceUnavailable + } + if errType == "" { + errType = "overloaded_error" + } + if errMsg == "" { + errMsg = "Upstream service overloaded, please retry later" + } + case 500, 502, 503, 504: + if statusCode == 0 { + statusCode = http.StatusBadGateway + } + if errType == "" { + switch upstreamStatus { + case 504: + errType = "timeout_error" + case 503: + errType = "overloaded_error" + default: + errType = "api_error" + } + } + if errMsg == "" { + errMsg = "Upstream service temporarily unavailable" + } + default: + if statusCode == 0 { + statusCode = http.StatusBadGateway + } + if errType == "" { + errType = "upstream_error" + } + if errMsg == "" { + errMsg = "Upstream request failed" + } + } + + c.JSON(statusCode, gin.H{ + "type": "error", + "error": gin.H{"type": errType, "message": errMsg}, + }) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", upstreamStatus) + } + return fmt.Errorf("upstream error: %d message=%s", upstreamStatus, upstreamMsg) +} + +type claudeErrorMapping struct { + Type string + Message string + StatusCode int +} + +func mapGeminiErrorBodyToClaudeError(body []byte) *claudeErrorMapping { + if len(body) == 0 { + return nil + } + + var parsed struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + Status string `json:"status"` + } `json:"error"` + } + if err := json.Unmarshal(body, &parsed); err != nil { + return nil + } + if strings.TrimSpace(parsed.Error.Status) == "" && parsed.Error.Code == 0 && strings.TrimSpace(parsed.Error.Message) == "" { + return nil + } + + mapped := &claudeErrorMapping{ + Type: mapGeminiStatusToClaudeErrorType(parsed.Error.Status), + Message: "", + } + if mapped.Type == "" { + mapped.Type = "upstream_error" + } + + switch strings.ToUpper(strings.TrimSpace(parsed.Error.Status)) { + case "INVALID_ARGUMENT": + mapped.StatusCode = http.StatusBadRequest + case "NOT_FOUND": + mapped.StatusCode = http.StatusNotFound + case "RESOURCE_EXHAUSTED": + mapped.StatusCode = http.StatusTooManyRequests + default: + // Keep StatusCode unset and let HTTP status mapping decide. + } + + // Keep messages generic by default; upstream error message can be long or include sensitive fragments. + return mapped +} + +func mapGeminiStatusToClaudeErrorType(status string) string { + switch strings.ToUpper(strings.TrimSpace(status)) { + case "INVALID_ARGUMENT": + return "invalid_request_error" + case "PERMISSION_DENIED": + return "permission_error" + case "NOT_FOUND": + return "not_found_error" + case "RESOURCE_EXHAUSTED": + return "rate_limit_error" + case "UNAUTHENTICATED": + return "authentication_error" + case "UNAVAILABLE": + return "overloaded_error" + case "INTERNAL": + return "api_error" + case "DEADLINE_EXCEEDED": + return "timeout_error" + default: + return "" + } +} + +type geminiStreamResult struct { + usage *ClaudeUsage + firstTokenMs *int +} + +func (s *GeminiMessagesCompatService) handleNonStreamingResponse(c *gin.Context, resp *http.Response, originalModel string) (*ClaudeUsage, error) { + body, err := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + if err != nil { + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Failed to read upstream response") + } + + geminiResp, err := unwrapGeminiResponse(body) + if err != nil { + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Failed to parse upstream response") + } + + claudeResp, usage := convertGeminiToClaudeMessage(geminiResp, originalModel) + c.JSON(http.StatusOK, claudeResp) + + return usage, nil +} + +func (s *GeminiMessagesCompatService) handleStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time, originalModel string) (*geminiStreamResult, error) { + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + c.Status(http.StatusOK) + + flusher, ok := c.Writer.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + messageID := "msg_" + randomHex(12) + messageStart := map[string]any{ + "type": "message_start", + "message": map[string]any{ + "id": messageID, + "type": "message", + "role": "assistant", + "model": originalModel, + "content": []any{}, + "stop_reason": nil, + "stop_sequence": nil, + "usage": map[string]any{ + "input_tokens": 0, + "output_tokens": 0, + }, + }, + } + writeSSE(c.Writer, "message_start", messageStart) + flusher.Flush() + + var firstTokenMs *int + var usage ClaudeUsage + finishReason := "" + sawToolUse := false + + nextBlockIndex := 0 + openBlockIndex := -1 + openBlockType := "" + seenText := "" + openToolIndex := -1 + openToolID := "" + openToolName := "" + seenToolJSON := "" + + reader := bufio.NewReader(resp.Body) + for { + line, err := reader.ReadString('\n') + if err != nil && !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("stream read error: %w", err) + } + + if !strings.HasPrefix(line, "data:") { + if errors.Is(err, io.EOF) { + break + } + continue + } + payload := strings.TrimSpace(strings.TrimPrefix(line, "data:")) + if payload == "" || payload == "[DONE]" { + if errors.Is(err, io.EOF) { + break + } + continue + } + + geminiResp, err := unwrapGeminiResponse([]byte(payload)) + if err != nil { + continue + } + + if fr := extractGeminiFinishReason(geminiResp); fr != "" { + finishReason = fr + } + + parts := extractGeminiParts(geminiResp) + for _, part := range parts { + if text, ok := part["text"].(string); ok && text != "" { + delta, newSeen := computeGeminiTextDelta(seenText, text) + seenText = newSeen + if delta == "" { + continue + } + + if openBlockType != "text" { + if openBlockIndex >= 0 { + writeSSE(c.Writer, "content_block_stop", map[string]any{ + "type": "content_block_stop", + "index": openBlockIndex, + }) + } + openBlockType = "text" + openBlockIndex = nextBlockIndex + nextBlockIndex++ + writeSSE(c.Writer, "content_block_start", map[string]any{ + "type": "content_block_start", + "index": openBlockIndex, + "content_block": map[string]any{ + "type": "text", + "text": "", + }, + }) + } + + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + writeSSE(c.Writer, "content_block_delta", map[string]any{ + "type": "content_block_delta", + "index": openBlockIndex, + "delta": map[string]any{ + "type": "text_delta", + "text": delta, + }, + }) + flusher.Flush() + continue + } + + if fc, ok := part["functionCall"].(map[string]any); ok && fc != nil { + name, _ := fc["name"].(string) + args := fc["args"] + if strings.TrimSpace(name) == "" { + name = "tool" + } + + // Close any open text block before tool_use. + if openBlockIndex >= 0 { + writeSSE(c.Writer, "content_block_stop", map[string]any{ + "type": "content_block_stop", + "index": openBlockIndex, + }) + openBlockIndex = -1 + openBlockType = "" + } + + // If we receive streamed tool args in pieces, keep a single tool block open and emit deltas. + if openToolIndex >= 0 && openToolName != name { + writeSSE(c.Writer, "content_block_stop", map[string]any{ + "type": "content_block_stop", + "index": openToolIndex, + }) + openToolIndex = -1 + openToolName = "" + seenToolJSON = "" + } + + if openToolIndex < 0 { + openToolID = "toolu_" + randomHex(8) + openToolIndex = nextBlockIndex + openToolName = name + nextBlockIndex++ + sawToolUse = true + + writeSSE(c.Writer, "content_block_start", map[string]any{ + "type": "content_block_start", + "index": openToolIndex, + "content_block": map[string]any{ + "type": "tool_use", + "id": openToolID, + "name": name, + "input": map[string]any{}, + }, + }) + } + + argsJSONText := "{}" + switch v := args.(type) { + case nil: + // keep default "{}" + case string: + if strings.TrimSpace(v) != "" { + argsJSONText = v + } + default: + if b, err := json.Marshal(args); err == nil && len(b) > 0 { + argsJSONText = string(b) + } + } + + delta, newSeen := computeGeminiTextDelta(seenToolJSON, argsJSONText) + seenToolJSON = newSeen + if delta != "" { + writeSSE(c.Writer, "content_block_delta", map[string]any{ + "type": "content_block_delta", + "index": openToolIndex, + "delta": map[string]any{ + "type": "input_json_delta", + "partial_json": delta, + }, + }) + } + flusher.Flush() + } + } + + if u := extractGeminiUsage(geminiResp); u != nil { + usage = *u + } + + // Process the final unterminated line at EOF as well. + if errors.Is(err, io.EOF) { + break + } + } + + if openBlockIndex >= 0 { + writeSSE(c.Writer, "content_block_stop", map[string]any{ + "type": "content_block_stop", + "index": openBlockIndex, + }) + } + if openToolIndex >= 0 { + writeSSE(c.Writer, "content_block_stop", map[string]any{ + "type": "content_block_stop", + "index": openToolIndex, + }) + } + + stopReason := mapGeminiFinishReasonToClaudeStopReason(finishReason) + if sawToolUse { + stopReason = "tool_use" + } + + usageObj := map[string]any{ + "output_tokens": usage.OutputTokens, + } + if usage.InputTokens > 0 { + usageObj["input_tokens"] = usage.InputTokens + } + writeSSE(c.Writer, "message_delta", map[string]any{ + "type": "message_delta", + "delta": map[string]any{ + "stop_reason": stopReason, + "stop_sequence": nil, + }, + "usage": usageObj, + }) + writeSSE(c.Writer, "message_stop", map[string]any{ + "type": "message_stop", + }) + flusher.Flush() + + return &geminiStreamResult{usage: &usage, firstTokenMs: firstTokenMs}, nil +} + +func writeSSE(w io.Writer, event string, data any) { + if event != "" { + _, _ = fmt.Fprintf(w, "event: %s\n", event) + } + b, _ := json.Marshal(data) + _, _ = fmt.Fprintf(w, "data: %s\n\n", string(b)) +} + +func randomHex(nBytes int) string { + b := make([]byte, nBytes) + _, _ = rand.Read(b) + return hex.EncodeToString(b) +} + +func (s *GeminiMessagesCompatService) writeClaudeError(c *gin.Context, status int, errType, message string) error { + c.JSON(status, gin.H{ + "type": "error", + "error": gin.H{"type": errType, "message": message}, + }) + return fmt.Errorf("%s", message) +} + +func (s *GeminiMessagesCompatService) writeGoogleError(c *gin.Context, status int, message string) error { + c.JSON(status, gin.H{ + "error": gin.H{ + "code": status, + "message": message, + "status": googleapi.HTTPStatusToGoogleStatus(status), + }, + }) + return fmt.Errorf("%s", message) +} + +func unwrapIfNeeded(isOAuth bool, raw []byte) []byte { + if !isOAuth { + return raw + } + inner, err := unwrapGeminiResponse(raw) + if err != nil { + return raw + } + b, err := json.Marshal(inner) + if err != nil { + return raw + } + return b +} + +func collectGeminiSSE(body io.Reader, isOAuth bool) (map[string]any, *ClaudeUsage, error) { + reader := bufio.NewReader(body) + + var last map[string]any + var lastWithParts map[string]any + usage := &ClaudeUsage{} + + for { + line, err := reader.ReadString('\n') + if len(line) > 0 { + trimmed := strings.TrimRight(line, "\r\n") + if strings.HasPrefix(trimmed, "data:") { + payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) + switch payload { + case "", "[DONE]": + if payload == "[DONE]" { + return pickGeminiCollectResult(last, lastWithParts), usage, nil + } + default: + var parsed map[string]any + if isOAuth { + inner, err := unwrapGeminiResponse([]byte(payload)) + if err == nil && inner != nil { + parsed = inner + } + } else { + _ = json.Unmarshal([]byte(payload), &parsed) + } + if parsed != nil { + last = parsed + if u := extractGeminiUsage(parsed); u != nil { + usage = u + } + if parts := extractGeminiParts(parsed); len(parts) > 0 { + lastWithParts = parsed + } + } + } + } + } + + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, nil, err + } + } + + return pickGeminiCollectResult(last, lastWithParts), usage, nil +} + +func pickGeminiCollectResult(last map[string]any, lastWithParts map[string]any) map[string]any { + if lastWithParts != nil { + return lastWithParts + } + if last != nil { + return last + } + return map[string]any{} +} + +type geminiNativeStreamResult struct { + usage *ClaudeUsage + firstTokenMs *int +} + +func isGeminiInsufficientScope(headers http.Header, body []byte) bool { + if strings.Contains(strings.ToLower(headers.Get("Www-Authenticate")), "insufficient_scope") { + return true + } + lower := strings.ToLower(string(body)) + return strings.Contains(lower, "insufficient authentication scopes") || strings.Contains(lower, "access_token_scope_insufficient") +} + +func estimateGeminiCountTokens(reqBody []byte) int { + var obj map[string]any + if err := json.Unmarshal(reqBody, &obj); err != nil { + return 0 + } + + var texts []string + + // systemInstruction.parts[].text + if si, ok := obj["systemInstruction"].(map[string]any); ok { + if parts, ok := si["parts"].([]any); ok { + for _, p := range parts { + if pm, ok := p.(map[string]any); ok { + if t, ok := pm["text"].(string); ok && strings.TrimSpace(t) != "" { + texts = append(texts, t) + } + } + } + } + } + + // contents[].parts[].text + if contents, ok := obj["contents"].([]any); ok { + for _, c := range contents { + cm, ok := c.(map[string]any) + if !ok { + continue + } + parts, ok := cm["parts"].([]any) + if !ok { + continue + } + for _, p := range parts { + pm, ok := p.(map[string]any) + if !ok { + continue + } + if t, ok := pm["text"].(string); ok && strings.TrimSpace(t) != "" { + texts = append(texts, t) + } + } + } + } + + total := 0 + for _, t := range texts { + total += estimateTokensForText(t) + } + if total < 0 { + return 0 + } + return total +} + +func estimateTokensForText(s string) int { + s = strings.TrimSpace(s) + if s == "" { + return 0 + } + runes := []rune(s) + if len(runes) == 0 { + return 0 + } + ascii := 0 + for _, r := range runes { + if r <= 0x7f { + ascii++ + } + } + asciiRatio := float64(ascii) / float64(len(runes)) + if asciiRatio >= 0.8 { + // Roughly 4 chars per token for English-like text. + return (len(runes) + 3) / 4 + } + // For CJK-heavy text, approximate 1 rune per token. + return len(runes) +} + +type UpstreamHTTPResult struct { + StatusCode int + Headers http.Header + Body []byte +} + +func (s *GeminiMessagesCompatService) handleNativeNonStreamingResponse(c *gin.Context, resp *http.Response, isOAuth bool) (*ClaudeUsage, error) { + // Log response headers for debugging + log.Printf("[GeminiAPI] ========== Response Headers ==========") + for key, values := range resp.Header { + if strings.HasPrefix(strings.ToLower(key), "x-ratelimit") { + log.Printf("[GeminiAPI] %s: %v", key, values) + } + } + log.Printf("[GeminiAPI] ========================================") + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var parsed map[string]any + if isOAuth { + parsed, err = unwrapGeminiResponse(respBody) + if err == nil && parsed != nil { + respBody, _ = json.Marshal(parsed) + } + } else { + _ = json.Unmarshal(respBody, &parsed) + } + + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "application/json" + } + c.Data(resp.StatusCode, contentType, respBody) + + if parsed != nil { + if u := extractGeminiUsage(parsed); u != nil { + return u, nil + } + } + return &ClaudeUsage{}, nil +} + +func (s *GeminiMessagesCompatService) handleNativeStreamingResponse(c *gin.Context, resp *http.Response, startTime time.Time, isOAuth bool) (*geminiNativeStreamResult, error) { + // Log response headers for debugging + log.Printf("[GeminiAPI] ========== Streaming Response Headers ==========") + for key, values := range resp.Header { + if strings.HasPrefix(strings.ToLower(key), "x-ratelimit") { + log.Printf("[GeminiAPI] %s: %v", key, values) + } + } + log.Printf("[GeminiAPI] ====================================================") + + if s.cfg != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + } + + c.Status(resp.StatusCode) + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "text/event-stream; charset=utf-8" + } + c.Header("Content-Type", contentType) + + flusher, ok := c.Writer.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + reader := bufio.NewReader(resp.Body) + usage := &ClaudeUsage{} + var firstTokenMs *int + + for { + line, err := reader.ReadString('\n') + if len(line) > 0 { + trimmed := strings.TrimRight(line, "\r\n") + if strings.HasPrefix(trimmed, "data:") { + payload := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:")) + // Keepalive / done markers + if payload == "" || payload == "[DONE]" { + _, _ = io.WriteString(c.Writer, line) + flusher.Flush() + } else { + var rawToWrite string + rawToWrite = payload + + var parsed map[string]any + if isOAuth { + inner, err := unwrapGeminiResponse([]byte(payload)) + if err == nil && inner != nil { + parsed = inner + if b, err := json.Marshal(inner); err == nil { + rawToWrite = string(b) + } + } + } else { + _ = json.Unmarshal([]byte(payload), &parsed) + } + + if parsed != nil { + if u := extractGeminiUsage(parsed); u != nil { + usage = u + } + } + + if firstTokenMs == nil { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + if isOAuth { + // SSE format requires double newline (\n\n) to separate events + _, _ = fmt.Fprintf(c.Writer, "data: %s\n\n", rawToWrite) + } else { + // Pass-through for AI Studio responses. + _, _ = io.WriteString(c.Writer, line) + } + flusher.Flush() + } + } else { + _, _ = io.WriteString(c.Writer, line) + flusher.Flush() + } + } + + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + } + + return &geminiNativeStreamResult{usage: usage, firstTokenMs: firstTokenMs}, nil +} + +// ForwardAIStudioGET forwards a GET request to AI Studio (generativelanguage.googleapis.com) for +// endpoints like /v1beta/models and /v1beta/models/{model}. +// +// This is used to support Gemini SDKs that call models listing endpoints before generation. +func (s *GeminiMessagesCompatService) ForwardAIStudioGET(ctx context.Context, account *Account, path string) (*UpstreamHTTPResult, error) { + if account == nil { + return nil, errors.New("account is nil") + } + path = strings.TrimSpace(path) + if path == "" || !strings.HasPrefix(path, "/") { + return nil, errors.New("invalid path") + } + + baseURL := strings.TrimSpace(account.GetCredential("base_url")) + if baseURL == "" { + baseURL = geminicli.AIStudioBaseURL + } + normalizedBaseURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + fullURL := strings.TrimRight(normalizedBaseURL, "/") + path + + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fullURL, nil) + if err != nil { + return nil, err + } + + switch account.Type { + case AccountTypeAPIKey: + apiKey := strings.TrimSpace(account.GetCredential("api_key")) + if apiKey == "" { + return nil, errors.New("gemini api_key not configured") + } + req.Header.Set("x-goog-api-key", apiKey) + case AccountTypeOAuth: + if s.tokenProvider == nil { + return nil, errors.New("gemini token provider not configured") + } + accessToken, err := s.tokenProvider.GetAccessToken(ctx, account) + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+accessToken) + default: + return nil, fmt.Errorf("unsupported account type: %s", account.Type) + } + + resp, err := s.httpUpstream.Do(req, proxyURL, account.ID, account.Concurrency) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) + wwwAuthenticate := resp.Header.Get("Www-Authenticate") + filteredHeaders := responseheaders.FilterHeaders(resp.Header, s.cfg.Security.ResponseHeaders) + if wwwAuthenticate != "" { + filteredHeaders.Set("Www-Authenticate", wwwAuthenticate) + } + return &UpstreamHTTPResult{ + StatusCode: resp.StatusCode, + Headers: filteredHeaders, + Body: body, + }, nil +} + +func unwrapGeminiResponse(raw []byte) (map[string]any, error) { + var outer map[string]any + if err := json.Unmarshal(raw, &outer); err != nil { + return nil, err + } + if resp, ok := outer["response"].(map[string]any); ok && resp != nil { + return resp, nil + } + return outer, nil +} + +func convertGeminiToClaudeMessage(geminiResp map[string]any, originalModel string) (map[string]any, *ClaudeUsage) { + usage := extractGeminiUsage(geminiResp) + if usage == nil { + usage = &ClaudeUsage{} + } + + contentBlocks := make([]any, 0) + sawToolUse := false + if candidates, ok := geminiResp["candidates"].([]any); ok && len(candidates) > 0 { + if cand, ok := candidates[0].(map[string]any); ok { + if content, ok := cand["content"].(map[string]any); ok { + if parts, ok := content["parts"].([]any); ok { + for _, part := range parts { + pm, ok := part.(map[string]any) + if !ok { + continue + } + if text, ok := pm["text"].(string); ok && text != "" { + contentBlocks = append(contentBlocks, map[string]any{ + "type": "text", + "text": text, + }) + } + if fc, ok := pm["functionCall"].(map[string]any); ok { + name, _ := fc["name"].(string) + if strings.TrimSpace(name) == "" { + name = "tool" + } + args := fc["args"] + sawToolUse = true + contentBlocks = append(contentBlocks, map[string]any{ + "type": "tool_use", + "id": "toolu_" + randomHex(8), + "name": name, + "input": args, + }) + } + } + } + } + } + } + + stopReason := mapGeminiFinishReasonToClaudeStopReason(extractGeminiFinishReason(geminiResp)) + if sawToolUse { + stopReason = "tool_use" + } + + resp := map[string]any{ + "id": "msg_" + randomHex(12), + "type": "message", + "role": "assistant", + "model": originalModel, + "content": contentBlocks, + "stop_reason": stopReason, + "stop_sequence": nil, + "usage": map[string]any{ + "input_tokens": usage.InputTokens, + "output_tokens": usage.OutputTokens, + }, + } + + return resp, usage +} + +func extractGeminiUsage(geminiResp map[string]any) *ClaudeUsage { + usageMeta, ok := geminiResp["usageMetadata"].(map[string]any) + if !ok || usageMeta == nil { + return nil + } + prompt, _ := asInt(usageMeta["promptTokenCount"]) + cand, _ := asInt(usageMeta["candidatesTokenCount"]) + return &ClaudeUsage{ + InputTokens: prompt, + OutputTokens: cand, + } +} + +func asInt(v any) (int, bool) { + switch t := v.(type) { + case float64: + return int(t), true + case int: + return t, true + case int64: + return int(t), true + case json.Number: + i, err := t.Int64() + if err != nil { + return 0, false + } + return int(i), true + default: + return 0, false + } +} + +func (s *GeminiMessagesCompatService) handleGeminiUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, body []byte) { + if s.rateLimitService != nil && (statusCode == 401 || statusCode == 403 || statusCode == 529) { + s.rateLimitService.HandleUpstreamError(ctx, account, statusCode, headers, body) + return + } + if statusCode != 429 { + return + } + + oauthType := account.GeminiOAuthType() + tierID := account.GeminiTierID() + projectID := strings.TrimSpace(account.GetCredential("project_id")) + isCodeAssist := account.IsGeminiCodeAssist() + + resetAt := ParseGeminiRateLimitResetTime(body) + if resetAt == nil { + // 根据账号类型使用不同的默认重置时间 + var ra time.Time + if isCodeAssist { + // Code Assist: fallback cooldown by tier + cooldown := geminiCooldownForTier(tierID) + if s.rateLimitService != nil { + cooldown = s.rateLimitService.GeminiCooldown(ctx, account) + } + ra = time.Now().Add(cooldown) + log.Printf("[Gemini 429] Account %d (Code Assist, tier=%s, project=%s) rate limited, cooldown=%v", account.ID, tierID, projectID, time.Until(ra).Truncate(time.Second)) + } else { + // API Key / AI Studio OAuth: PST 午夜 + if ts := nextGeminiDailyResetUnix(); ts != nil { + ra = time.Unix(*ts, 0) + log.Printf("[Gemini 429] Account %d (API Key/AI Studio, type=%s) rate limited, reset at PST midnight (%v)", account.ID, account.Type, ra) + } else { + // 兜底:5 分钟 + ra = time.Now().Add(5 * time.Minute) + log.Printf("[Gemini 429] Account %d rate limited, fallback to 5min", account.ID) + } + } + _ = s.accountRepo.SetRateLimited(ctx, account.ID, ra) + return + } + + // 使用解析到的重置时间 + resetTime := time.Unix(*resetAt, 0) + _ = s.accountRepo.SetRateLimited(ctx, account.ID, resetTime) + log.Printf("[Gemini 429] Account %d rate limited until %v (oauth_type=%s, tier=%s)", + account.ID, resetTime, oauthType, tierID) +} + +// ParseGeminiRateLimitResetTime 解析 Gemini 格式的 429 响应,返回重置时间的 Unix 时间戳 +func ParseGeminiRateLimitResetTime(body []byte) *int64 { + // Try to parse metadata.quotaResetDelay like "12.345s" + var parsed map[string]any + if err := json.Unmarshal(body, &parsed); err == nil { + if errObj, ok := parsed["error"].(map[string]any); ok { + if msg, ok := errObj["message"].(string); ok { + if looksLikeGeminiDailyQuota(msg) { + if ts := nextGeminiDailyResetUnix(); ts != nil { + return ts + } + } + } + if details, ok := errObj["details"].([]any); ok { + for _, d := range details { + dm, ok := d.(map[string]any) + if !ok { + continue + } + if meta, ok := dm["metadata"].(map[string]any); ok { + if v, ok := meta["quotaResetDelay"].(string); ok { + if dur, err := time.ParseDuration(v); err == nil { + ts := time.Now().Unix() + int64(dur.Seconds()) + return &ts + } + } + } + } + } + } + } + + // Match "Please retry in Xs" + matches := retryInRegex.FindStringSubmatch(string(body)) + if len(matches) == 2 { + if dur, err := time.ParseDuration(matches[1] + "s"); err == nil { + ts := time.Now().Unix() + int64(math.Ceil(dur.Seconds())) + return &ts + } + } + + return nil +} + +func looksLikeGeminiDailyQuota(message string) bool { + m := strings.ToLower(message) + if strings.Contains(m, "per day") || strings.Contains(m, "requests per day") || strings.Contains(m, "quota") && strings.Contains(m, "per day") { + return true + } + return false +} + +func nextGeminiDailyResetUnix() *int64 { + reset := geminiDailyResetTime(time.Now()) + ts := reset.Unix() + return &ts +} + +func extractGeminiFinishReason(geminiResp map[string]any) string { + if candidates, ok := geminiResp["candidates"].([]any); ok && len(candidates) > 0 { + if cand, ok := candidates[0].(map[string]any); ok { + if fr, ok := cand["finishReason"].(string); ok { + return fr + } + } + } + return "" +} + +func extractGeminiParts(geminiResp map[string]any) []map[string]any { + if candidates, ok := geminiResp["candidates"].([]any); ok && len(candidates) > 0 { + if cand, ok := candidates[0].(map[string]any); ok { + if content, ok := cand["content"].(map[string]any); ok { + if partsAny, ok := content["parts"].([]any); ok && len(partsAny) > 0 { + out := make([]map[string]any, 0, len(partsAny)) + for _, p := range partsAny { + pm, ok := p.(map[string]any) + if !ok { + continue + } + out = append(out, pm) + } + return out + } + } + } + } + return nil +} + +func computeGeminiTextDelta(seen, incoming string) (delta, newSeen string) { + incoming = strings.TrimSuffix(incoming, "\u0000") + if incoming == "" { + return "", seen + } + + // Cumulative mode: incoming contains full text so far. + if strings.HasPrefix(incoming, seen) { + return strings.TrimPrefix(incoming, seen), incoming + } + // Duplicate/rewind: ignore. + if strings.HasPrefix(seen, incoming) { + return "", seen + } + // Delta mode: treat incoming as incremental chunk. + return incoming, seen + incoming +} + +func mapGeminiFinishReasonToClaudeStopReason(finishReason string) string { + switch strings.ToUpper(strings.TrimSpace(finishReason)) { + case "MAX_TOKENS": + return "max_tokens" + case "STOP": + return "end_turn" + default: + return "end_turn" + } +} + +func convertClaudeMessagesToGeminiGenerateContent(body []byte) ([]byte, error) { + var req map[string]any + if err := json.Unmarshal(body, &req); err != nil { + return nil, err + } + + toolUseIDToName := make(map[string]string) + + systemText := extractClaudeSystemText(req["system"]) + contents, err := convertClaudeMessagesToGeminiContents(req["messages"], toolUseIDToName) + if err != nil { + return nil, err + } + + out := make(map[string]any) + if systemText != "" { + out["systemInstruction"] = map[string]any{ + "parts": []any{map[string]any{"text": systemText}}, + } + } + out["contents"] = contents + + if tools := convertClaudeToolsToGeminiTools(req["tools"]); tools != nil { + out["tools"] = tools + } + + generationConfig := convertClaudeGenerationConfig(req) + if generationConfig != nil { + out["generationConfig"] = generationConfig + } + + stripGeminiFunctionIDs(out) + return json.Marshal(out) +} + +func stripGeminiFunctionIDs(req map[string]any) { + // Defensive cleanup: some upstreams reject unexpected `id` fields in functionCall/functionResponse. + contents, ok := req["contents"].([]any) + if !ok { + return + } + for _, c := range contents { + cm, ok := c.(map[string]any) + if !ok { + continue + } + contentParts, ok := cm["parts"].([]any) + if !ok { + continue + } + for _, p := range contentParts { + pm, ok := p.(map[string]any) + if !ok { + continue + } + if fc, ok := pm["functionCall"].(map[string]any); ok && fc != nil { + delete(fc, "id") + } + if fr, ok := pm["functionResponse"].(map[string]any); ok && fr != nil { + delete(fr, "id") + } + } + } +} + +func extractClaudeSystemText(system any) string { + switch v := system.(type) { + case string: + return strings.TrimSpace(v) + case []any: + var parts []string + for _, p := range v { + pm, ok := p.(map[string]any) + if !ok { + continue + } + if t, _ := pm["type"].(string); t != "text" { + continue + } + if text, ok := pm["text"].(string); ok && strings.TrimSpace(text) != "" { + parts = append(parts, text) + } + } + return strings.TrimSpace(strings.Join(parts, "\n")) + default: + return "" + } +} + +func convertClaudeMessagesToGeminiContents(messages any, toolUseIDToName map[string]string) ([]any, error) { + arr, ok := messages.([]any) + if !ok { + return nil, errors.New("messages must be an array") + } + + out := make([]any, 0, len(arr)) + for _, m := range arr { + mm, ok := m.(map[string]any) + if !ok { + continue + } + role, _ := mm["role"].(string) + role = strings.ToLower(strings.TrimSpace(role)) + gRole := "user" + if role == "assistant" { + gRole = "model" + } + + parts := make([]any, 0) + switch content := mm["content"].(type) { + case string: + // 字符串形式的 content,保留所有内容(包括空白) + parts = append(parts, map[string]any{"text": content}) + case []any: + // 如果只有一个 block,不过滤空白(让上游 API 报错) + singleBlock := len(content) == 1 + + for _, block := range content { + bm, ok := block.(map[string]any) + if !ok { + continue + } + bt, _ := bm["type"].(string) + switch bt { + case "text": + if text, ok := bm["text"].(string); ok { + // 单个 block 时保留所有内容(包括空白) + // 多个 blocks 时过滤掉空白 + if singleBlock || strings.TrimSpace(text) != "" { + parts = append(parts, map[string]any{"text": text}) + } + } + case "tool_use": + id, _ := bm["id"].(string) + name, _ := bm["name"].(string) + if strings.TrimSpace(id) != "" && strings.TrimSpace(name) != "" { + toolUseIDToName[id] = name + } + parts = append(parts, map[string]any{ + "functionCall": map[string]any{ + "name": name, + "args": bm["input"], + }, + }) + case "tool_result": + toolUseID, _ := bm["tool_use_id"].(string) + name := toolUseIDToName[toolUseID] + if name == "" { + name = "tool" + } + parts = append(parts, map[string]any{ + "functionResponse": map[string]any{ + "name": name, + "response": map[string]any{ + "content": extractClaudeContentText(bm["content"]), + }, + }, + }) + case "image": + if src, ok := bm["source"].(map[string]any); ok { + if srcType, _ := src["type"].(string); srcType == "base64" { + mediaType, _ := src["media_type"].(string) + data, _ := src["data"].(string) + if mediaType != "" && data != "" { + parts = append(parts, map[string]any{ + "inlineData": map[string]any{ + "mimeType": mediaType, + "data": data, + }, + }) + } + } + } + default: + // best-effort: preserve unknown blocks as text + if b, err := json.Marshal(bm); err == nil { + parts = append(parts, map[string]any{"text": string(b)}) + } + } + } + default: + // ignore + } + + out = append(out, map[string]any{ + "role": gRole, + "parts": parts, + }) + } + return out, nil +} + +func extractClaudeContentText(v any) string { + switch t := v.(type) { + case string: + return t + case []any: + var sb strings.Builder + for _, part := range t { + pm, ok := part.(map[string]any) + if !ok { + continue + } + if pm["type"] == "text" { + if text, ok := pm["text"].(string); ok { + _, _ = sb.WriteString(text) + } + } + } + return sb.String() + default: + b, _ := json.Marshal(t) + return string(b) + } +} + +func convertClaudeToolsToGeminiTools(tools any) []any { + arr, ok := tools.([]any) + if !ok || len(arr) == 0 { + return nil + } + + funcDecls := make([]any, 0, len(arr)) + for _, t := range arr { + tm, ok := t.(map[string]any) + if !ok { + continue + } + + var name, desc string + var params any + + // 检查是否为 custom 类型工具 (MCP) + toolType, _ := tm["type"].(string) + if toolType == "custom" { + // Custom 格式: 从 custom 字段获取 description 和 input_schema + custom, ok := tm["custom"].(map[string]any) + if !ok { + continue + } + name, _ = tm["name"].(string) + desc, _ = custom["description"].(string) + params = custom["input_schema"] + } else { + // 标准格式: 从顶层字段获取 + name, _ = tm["name"].(string) + desc, _ = tm["description"].(string) + params = tm["input_schema"] + } + + if name == "" { + continue + } + + // 为 nil params 提供默认值 + if params == nil { + params = map[string]any{ + "type": "object", + "properties": map[string]any{}, + } + } + // 清理 JSON Schema + cleanedParams := cleanToolSchema(params) + + funcDecls = append(funcDecls, map[string]any{ + "name": name, + "description": desc, + "parameters": cleanedParams, + }) + } + + if len(funcDecls) == 0 { + return nil + } + return []any{ + map[string]any{ + "functionDeclarations": funcDecls, + }, + } +} + +// cleanToolSchema 清理工具的 JSON Schema,移除 Gemini 不支持的字段 +func cleanToolSchema(schema any) any { + if schema == nil { + return nil + } + + switch v := schema.(type) { + case map[string]any: + cleaned := make(map[string]any) + for key, value := range v { + // 跳过不支持的字段 + if key == "$schema" || key == "$id" || key == "$ref" || + key == "additionalProperties" || key == "minLength" || + key == "maxLength" || key == "minItems" || key == "maxItems" { + continue + } + // 递归清理嵌套对象 + cleaned[key] = cleanToolSchema(value) + } + // 规范化 type 字段为大写 + if typeVal, ok := cleaned["type"].(string); ok { + cleaned["type"] = strings.ToUpper(typeVal) + } + return cleaned + case []any: + cleaned := make([]any, len(v)) + for i, item := range v { + cleaned[i] = cleanToolSchema(item) + } + return cleaned + default: + return v + } +} + +func convertClaudeGenerationConfig(req map[string]any) map[string]any { + out := make(map[string]any) + if mt, ok := asInt(req["max_tokens"]); ok && mt > 0 { + out["maxOutputTokens"] = mt + } + if temp, ok := req["temperature"].(float64); ok { + out["temperature"] = temp + } + if topP, ok := req["top_p"].(float64); ok { + out["topP"] = topP + } + if stopSeq, ok := req["stop_sequences"].([]any); ok && len(stopSeq) > 0 { + out["stopSequences"] = stopSeq + } + if len(out) == 0 { + return nil + } + return out +} diff --git a/backend/internal/service/gemini_messages_compat_service_test.go b/backend/internal/service/gemini_messages_compat_service_test.go new file mode 100644 index 00000000..d49f2eb3 --- /dev/null +++ b/backend/internal/service/gemini_messages_compat_service_test.go @@ -0,0 +1,128 @@ +package service + +import ( + "testing" +) + +// TestConvertClaudeToolsToGeminiTools_CustomType 测试custom类型工具转换 +func TestConvertClaudeToolsToGeminiTools_CustomType(t *testing.T) { + tests := []struct { + name string + tools any + expectedLen int + description string + }{ + { + name: "Standard tools", + tools: []any{ + map[string]any{ + "name": "get_weather", + "description": "Get weather info", + "input_schema": map[string]any{"type": "object"}, + }, + }, + expectedLen: 1, + description: "标准工具格式应该正常转换", + }, + { + name: "Custom type tool (MCP format)", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "mcp_tool", + "custom": map[string]any{ + "description": "MCP tool description", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "Custom类型工具应该从custom字段读取", + }, + { + name: "Mixed standard and custom tools", + tools: []any{ + map[string]any{ + "name": "standard_tool", + "description": "Standard", + "input_schema": map[string]any{"type": "object"}, + }, + map[string]any{ + "type": "custom", + "name": "custom_tool", + "custom": map[string]any{ + "description": "Custom", + "input_schema": map[string]any{"type": "object"}, + }, + }, + }, + expectedLen: 1, + description: "混合工具应该都能正确转换", + }, + { + name: "Custom tool without custom field", + tools: []any{ + map[string]any{ + "type": "custom", + "name": "invalid_custom", + // 缺少 custom 字段 + }, + }, + expectedLen: 0, // 应该被跳过 + description: "缺少custom字段的custom工具应该被跳过", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := convertClaudeToolsToGeminiTools(tt.tools) + + if tt.expectedLen == 0 { + if result != nil { + t.Errorf("%s: expected nil result, got %v", tt.description, result) + } + return + } + + if result == nil { + t.Fatalf("%s: expected non-nil result", tt.description) + } + + if len(result) != 1 { + t.Errorf("%s: expected 1 tool declaration, got %d", tt.description, len(result)) + return + } + + toolDecl, ok := result[0].(map[string]any) + if !ok { + t.Fatalf("%s: result[0] is not map[string]any", tt.description) + } + + funcDecls, ok := toolDecl["functionDeclarations"].([]any) + if !ok { + t.Fatalf("%s: functionDeclarations is not []any", tt.description) + } + + toolsArr, _ := tt.tools.([]any) + expectedFuncCount := 0 + for _, tool := range toolsArr { + toolMap, _ := tool.(map[string]any) + if toolMap["name"] != "" { + // 检查是否为有效的custom工具 + if toolMap["type"] == "custom" { + if toolMap["custom"] != nil { + expectedFuncCount++ + } + } else { + expectedFuncCount++ + } + } + } + + if len(funcDecls) != expectedFuncCount { + t.Errorf("%s: expected %d function declarations, got %d", + tt.description, expectedFuncCount, len(funcDecls)) + } + }) + } +} diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go new file mode 100644 index 00000000..c99cb87d --- /dev/null +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -0,0 +1,609 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +// mockAccountRepoForGemini Gemini 测试用的 mock +type mockAccountRepoForGemini struct { + accounts []Account + accountsByID map[int64]*Account +} + +func (m *mockAccountRepoForGemini) GetByID(ctx context.Context, id int64) (*Account, error) { + if acc, ok := m.accountsByID[id]; ok { + return acc, nil + } + return nil, errors.New("account not found") +} + +func (m *mockAccountRepoForGemini) GetByIDs(ctx context.Context, ids []int64) ([]*Account, error) { + var result []*Account + for _, id := range ids { + if acc, ok := m.accountsByID[id]; ok { + result = append(result, acc) + } + } + return result, nil +} + +func (m *mockAccountRepoForGemini) ExistsByID(ctx context.Context, id int64) (bool, error) { + if m.accountsByID == nil { + return false, nil + } + _, ok := m.accountsByID[id] + return ok, nil +} + +func (m *mockAccountRepoForGemini) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { + var result []Account + for _, acc := range m.accounts { + if acc.Platform == platform && acc.IsSchedulable() { + result = append(result, acc) + } + } + return result, nil +} + +func (m *mockAccountRepoForGemini) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { + // 测试时不区分 groupID,直接按 platform 过滤 + return m.ListSchedulableByPlatform(ctx, platform) +} + +// Stub methods to implement AccountRepository interface +func (m *mockAccountRepoForGemini) Create(ctx context.Context, account *Account) error { return nil } +func (m *mockAccountRepoForGemini) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) Update(ctx context.Context, account *Account) error { return nil } +func (m *mockAccountRepoForGemini) Delete(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForGemini) List(ctx context.Context, params pagination.PaginationParams) ([]Account, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockAccountRepoForGemini) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]Account, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockAccountRepoForGemini) ListByGroup(ctx context.Context, groupID int64) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) ListActive(ctx context.Context) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) ListByPlatform(ctx context.Context, platform string) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) UpdateLastUsed(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForGemini) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + return nil +} +func (m *mockAccountRepoForGemini) SetError(ctx context.Context, id int64, errorMsg string) error { + return nil +} +func (m *mockAccountRepoForGemini) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + return nil +} +func (m *mockAccountRepoForGemini) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + return 0, nil +} +func (m *mockAccountRepoForGemini) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + return nil +} +func (m *mockAccountRepoForGemini) ListSchedulable(ctx context.Context) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]Account, error) { + return nil, nil +} +func (m *mockAccountRepoForGemini) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]Account, error) { + var result []Account + platformSet := make(map[string]bool) + for _, p := range platforms { + platformSet[p] = true + } + for _, acc := range m.accounts { + if platformSet[acc.Platform] && acc.IsSchedulable() { + result = append(result, acc) + } + } + return result, nil +} +func (m *mockAccountRepoForGemini) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]Account, error) { + return m.ListSchedulableByPlatforms(ctx, platforms) +} +func (m *mockAccountRepoForGemini) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + return nil +} +func (m *mockAccountRepoForGemini) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { + return nil +} +func (m *mockAccountRepoForGemini) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + return nil +} +func (m *mockAccountRepoForGemini) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + return nil +} +func (m *mockAccountRepoForGemini) ClearTempUnschedulable(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForGemini) ClearRateLimit(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForGemini) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return nil +} +func (m *mockAccountRepoForGemini) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + return nil +} +func (m *mockAccountRepoForGemini) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + return nil +} +func (m *mockAccountRepoForGemini) BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) { + return 0, nil +} + +// Verify interface implementation +var _ AccountRepository = (*mockAccountRepoForGemini)(nil) + +// mockGroupRepoForGemini Gemini 测试用的 group repo mock +type mockGroupRepoForGemini struct { + groups map[int64]*Group + getByIDCalls int + getByIDLiteCalls int +} + +func (m *mockGroupRepoForGemini) GetByID(ctx context.Context, id int64) (*Group, error) { + m.getByIDCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, errors.New("group not found") +} + +func (m *mockGroupRepoForGemini) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + m.getByIDLiteCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, errors.New("group not found") +} + +// Stub methods to implement GroupRepository interface +func (m *mockGroupRepoForGemini) Create(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGemini) Update(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGemini) Delete(ctx context.Context, id int64) error { return nil } +func (m *mockGroupRepoForGemini) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + return nil, nil +} +func (m *mockGroupRepoForGemini) List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGemini) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGemini) ListActive(ctx context.Context) ([]Group, error) { return nil, nil } +func (m *mockGroupRepoForGemini) ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error) { + return nil, nil +} +func (m *mockGroupRepoForGemini) ExistsByName(ctx context.Context, name string) (bool, error) { + return false, nil +} +func (m *mockGroupRepoForGemini) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} +func (m *mockGroupRepoForGemini) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} + +var _ GroupRepository = (*mockGroupRepoForGemini)(nil) + +// mockGatewayCacheForGemini Gemini 测试用的 cache mock +type mockGatewayCacheForGemini struct { + sessionBindings map[string]int64 +} + +func (m *mockGatewayCacheForGemini) GetSessionAccountID(ctx context.Context, groupID int64, sessionHash string) (int64, error) { + if id, ok := m.sessionBindings[sessionHash]; ok { + return id, nil + } + return 0, errors.New("not found") +} + +func (m *mockGatewayCacheForGemini) SetSessionAccountID(ctx context.Context, groupID int64, sessionHash string, accountID int64, ttl time.Duration) error { + if m.sessionBindings == nil { + m.sessionBindings = make(map[string]int64) + } + m.sessionBindings[sessionHash] = accountID + return nil +} + +func (m *mockGatewayCacheForGemini) RefreshSessionTTL(ctx context.Context, groupID int64, sessionHash string, ttl time.Duration) error { + return nil +} + +// TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform 测试 Gemini 单平台选择 +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiPlatform(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 3, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, // 应被隔离 + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + // 无分组时使用 gemini 平台 + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "应选择优先级最高的 gemini 账户") + require.Equal(t, PlatformGemini, acc.Platform, "无分组时应只返回 gemini 平台账户") +} + +func TestGeminiMessagesCompatService_GroupResolution_ReusesContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(7) + group := &Group{ + ID: groupID, + Platform: PlatformGemini, + Status: StatusActive, + Hydrated: true, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 0, groupRepo.getByIDLiteCalls) +} + +func TestGeminiMessagesCompatService_GroupResolution_UsesLiteFetch(t *testing.T) { + ctx := context.Background() + groupID := int64(7) + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{ + groups: map[int64]*Group{ + groupID: {ID: groupID, Platform: PlatformGemini}, + }, + } + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + +// TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_AntigravityGroup 测试 antigravity 分组 +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_AntigravityGroup(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, // 应被隔离 + {ID: 2, Platform: PlatformAntigravity, Priority: 1, Status: StatusActive, Schedulable: true}, // 应被选择 + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{ + groups: map[int64]*Group{ + 1: {ID: 1, Platform: PlatformAntigravity}, + }, + } + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + groupID := int64(1) + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID) + require.Equal(t, PlatformAntigravity, acc.Platform, "antigravity 分组应只返回 antigravity 账户") +} + +// TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_OAuthPreferred 测试 OAuth 优先 +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_OAuthPreferred(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Type: AccountTypeAPIKey, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: nil}, + {ID: 2, Platform: PlatformGemini, Type: AccountTypeOAuth, Priority: 1, Status: StatusActive, Schedulable: true, LastUsedAt: nil}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "同优先级且都未使用时,应优先选择 OAuth 账户") + require.Equal(t, AccountTypeOAuth, acc.Type) +} + +// TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_NoAvailableAccounts 测试无可用账户 +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_NoAvailableAccounts(t *testing.T) { + ctx := context.Background() + + repo := &mockAccountRepoForGemini{ + accounts: []Account{}, + accountsByID: map[int64]*Account{}, + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "", "gemini-2.5-flash", nil) + require.Error(t, err) + require.Nil(t, acc) + require.Contains(t, err.Error(), "no available") +} + +// TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_StickySession 测试粘性会话 +func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_StickySession(t *testing.T) { + ctx := context.Background() + + t.Run("粘性会话命中-同平台", func(t *testing.T) { + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + // 注意:缓存键使用 "gemini:" 前缀 + cache := &mockGatewayCacheForGemini{ + sessionBindings: map[string]int64{"gemini:session-123": 1}, + } + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "session-123", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(1), acc.ID, "应返回粘性会话绑定的账户") + }) + + t.Run("粘性会话平台不匹配-降级选择", func(t *testing.T) { + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformAntigravity, Priority: 2, Status: StatusActive, Schedulable: true}, // 粘性会话绑定 + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{ + sessionBindings: map[string]int64{"gemini:session-123": 1}, // 绑定 antigravity 账户 + } + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + // 无分组时使用 gemini 平台,粘性会话绑定的 antigravity 账户平台不匹配 + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "session-123", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, int64(2), acc.ID, "粘性会话账户平台不匹配,应降级选择 gemini 账户") + require.Equal(t, PlatformGemini, acc.Platform) + }) + + t.Run("粘性会话不命中无前缀缓存键", func(t *testing.T) { + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 2, Status: StatusActive, Schedulable: true}, + {ID: 2, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + // 缓存键没有 "gemini:" 前缀,不应命中 + cache := &mockGatewayCacheForGemini{ + sessionBindings: map[string]int64{"session-123": 1}, + } + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, nil, "session-123", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + // 粘性会话未命中,按优先级选择 + require.Equal(t, int64(2), acc.ID, "粘性会话未命中,应按优先级选择") + }) +} + +// TestGeminiPlatformRouting_DocumentRouteDecision 测试平台路由决策逻辑 +func TestGeminiPlatformRouting_DocumentRouteDecision(t *testing.T) { + tests := []struct { + name string + platform string + expectedService string // "gemini" 表示 ForwardNative, "antigravity" 表示 ForwardGemini + }{ + { + name: "Gemini平台走ForwardNative", + platform: PlatformGemini, + expectedService: "gemini", + }, + { + name: "Antigravity平台走ForwardGemini", + platform: PlatformAntigravity, + expectedService: "antigravity", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{Platform: tt.platform} + + // 模拟 Handler 层的路由逻辑 + var serviceName string + if account.Platform == PlatformAntigravity { + serviceName = "antigravity" + } else { + serviceName = "gemini" + } + + require.Equal(t, tt.expectedService, serviceName, + "平台 %s 应该路由到 %s 服务", tt.platform, tt.expectedService) + }) + } +} + +func TestGeminiMessagesCompatService_isModelSupportedByAccount(t *testing.T) { + svc := &GeminiMessagesCompatService{} + + tests := []struct { + name string + account *Account + model string + expected bool + }{ + { + name: "Antigravity平台-支持gemini模型", + account: &Account{Platform: PlatformAntigravity}, + model: "gemini-2.5-flash", + expected: true, + }, + { + name: "Antigravity平台-支持claude模型", + account: &Account{Platform: PlatformAntigravity}, + model: "claude-3-5-sonnet-20241022", + expected: true, + }, + { + name: "Antigravity平台-不支持gpt模型", + account: &Account{Platform: PlatformAntigravity}, + model: "gpt-4", + expected: false, + }, + { + name: "Gemini平台-无映射配置-支持所有模型", + account: &Account{Platform: PlatformGemini}, + model: "gemini-2.5-flash", + expected: true, + }, + { + name: "Gemini平台-有映射配置-只支持配置的模型", + account: &Account{ + Platform: PlatformGemini, + Credentials: map[string]any{"model_mapping": map[string]any{"gemini-1.5-pro": "x"}}, + }, + model: "gemini-2.5-flash", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := svc.isModelSupportedByAccount(tt.account, tt.model) + require.Equal(t, tt.expected, got) + }) + } +} diff --git a/backend/internal/service/gemini_oauth.go b/backend/internal/service/gemini_oauth.go new file mode 100644 index 00000000..d129ae52 --- /dev/null +++ b/backend/internal/service/gemini_oauth.go @@ -0,0 +1,13 @@ +package service + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" +) + +// GeminiOAuthClient performs Google OAuth token exchange/refresh for Gemini integration. +type GeminiOAuthClient interface { + ExchangeCode(ctx context.Context, oauthType, code, codeVerifier, redirectURI, proxyURL string) (*geminicli.TokenResponse, error) + RefreshToken(ctx context.Context, oauthType, refreshToken, proxyURL string) (*geminicli.TokenResponse, error) +} diff --git a/backend/internal/service/gemini_oauth_service.go b/backend/internal/service/gemini_oauth_service.go new file mode 100644 index 00000000..bc84baeb --- /dev/null +++ b/backend/internal/service/gemini_oauth_service.go @@ -0,0 +1,1074 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" + "github.com/Wei-Shaw/sub2api/internal/pkg/httpclient" +) + +const ( + // Canonical tier IDs used by sub2api (2026-aligned). + GeminiTierGoogleOneFree = "google_one_free" + GeminiTierGoogleAIPro = "google_ai_pro" + GeminiTierGoogleAIUltra = "google_ai_ultra" + GeminiTierGCPStandard = "gcp_standard" + GeminiTierGCPEnterprise = "gcp_enterprise" + GeminiTierAIStudioFree = "aistudio_free" + GeminiTierAIStudioPaid = "aistudio_paid" + GeminiTierGoogleOneUnknown = "google_one_unknown" + + // Legacy/compat tier IDs that may exist in historical data or upstream responses. + legacyTierAIPremium = "AI_PREMIUM" + legacyTierGoogleOneStandard = "GOOGLE_ONE_STANDARD" + legacyTierGoogleOneBasic = "GOOGLE_ONE_BASIC" + legacyTierFree = "FREE" + legacyTierGoogleOneUnknown = "GOOGLE_ONE_UNKNOWN" + legacyTierGoogleOneUnlimited = "GOOGLE_ONE_UNLIMITED" +) + +const ( + GB = 1024 * 1024 * 1024 + TB = 1024 * GB + + StorageTierUnlimited = 100 * TB // 100TB + StorageTierAIPremium = 2 * TB // 2TB + StorageTierStandard = 200 * GB // 200GB + StorageTierBasic = 100 * GB // 100GB + StorageTierFree = 15 * GB // 15GB +) + +type GeminiOAuthService struct { + sessionStore *geminicli.SessionStore + proxyRepo ProxyRepository + oauthClient GeminiOAuthClient + codeAssist GeminiCliCodeAssistClient + cfg *config.Config +} + +type GeminiOAuthCapabilities struct { + AIStudioOAuthEnabled bool `json:"ai_studio_oauth_enabled"` + RequiredRedirectURIs []string `json:"required_redirect_uris"` +} + +func NewGeminiOAuthService( + proxyRepo ProxyRepository, + oauthClient GeminiOAuthClient, + codeAssist GeminiCliCodeAssistClient, + cfg *config.Config, +) *GeminiOAuthService { + return &GeminiOAuthService{ + sessionStore: geminicli.NewSessionStore(), + proxyRepo: proxyRepo, + oauthClient: oauthClient, + codeAssist: codeAssist, + cfg: cfg, + } +} + +func (s *GeminiOAuthService) GetOAuthConfig() *GeminiOAuthCapabilities { + // AI Studio OAuth is only enabled when the operator configures a custom OAuth client. + clientID := strings.TrimSpace(s.cfg.Gemini.OAuth.ClientID) + clientSecret := strings.TrimSpace(s.cfg.Gemini.OAuth.ClientSecret) + enabled := clientID != "" && clientSecret != "" && + (clientID != geminicli.GeminiCLIOAuthClientID || clientSecret != geminicli.GeminiCLIOAuthClientSecret) + + return &GeminiOAuthCapabilities{ + AIStudioOAuthEnabled: enabled, + RequiredRedirectURIs: []string{geminicli.AIStudioOAuthRedirectURI}, + } +} + +type GeminiAuthURLResult struct { + AuthURL string `json:"auth_url"` + SessionID string `json:"session_id"` + State string `json:"state"` +} + +func (s *GeminiOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64, redirectURI, projectID, oauthType, tierID string) (*GeminiAuthURLResult, error) { + state, err := geminicli.GenerateState() + if err != nil { + return nil, fmt.Errorf("failed to generate state: %w", err) + } + codeVerifier, err := geminicli.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("failed to generate code verifier: %w", err) + } + codeChallenge := geminicli.GenerateCodeChallenge(codeVerifier) + sessionID, err := geminicli.GenerateSessionID() + if err != nil { + return nil, fmt.Errorf("failed to generate session ID: %w", err) + } + + var proxyURL string + if proxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // OAuth client selection: + // - code_assist: always use built-in Gemini CLI OAuth client (public) + // - google_one: always use built-in Gemini CLI OAuth client (public) + // - ai_studio: requires a user-provided OAuth client + oauthCfg := geminicli.OAuthConfig{ + ClientID: s.cfg.Gemini.OAuth.ClientID, + ClientSecret: s.cfg.Gemini.OAuth.ClientSecret, + Scopes: s.cfg.Gemini.OAuth.Scopes, + } + if oauthType == "code_assist" || oauthType == "google_one" { + // Force use of built-in Gemini CLI OAuth client + oauthCfg.ClientID = "" + oauthCfg.ClientSecret = "" + } + + session := &geminicli.OAuthSession{ + State: state, + CodeVerifier: codeVerifier, + ProxyURL: proxyURL, + RedirectURI: redirectURI, + ProjectID: strings.TrimSpace(projectID), + TierID: canonicalGeminiTierIDForOAuthType(oauthType, tierID), + OAuthType: oauthType, + CreatedAt: time.Now(), + } + s.sessionStore.Set(sessionID, session) + + effectiveCfg, err := geminicli.EffectiveOAuthConfig(oauthCfg, oauthType) + if err != nil { + return nil, err + } + + isBuiltinClient := effectiveCfg.ClientID == geminicli.GeminiCLIOAuthClientID && + effectiveCfg.ClientSecret == geminicli.GeminiCLIOAuthClientSecret + + // AI Studio OAuth requires a user-provided OAuth client (built-in Gemini CLI client is scope-restricted). + if oauthType == "ai_studio" && isBuiltinClient { + return nil, fmt.Errorf("AI Studio OAuth requires a custom OAuth Client (GEMINI_OAUTH_CLIENT_ID / GEMINI_OAUTH_CLIENT_SECRET). If you don't want to configure an OAuth client, please use an AI Studio API Key account instead") + } + + // Redirect URI strategy: + // - built-in Gemini CLI OAuth client: use upstream redirect URI (codeassist.google.com/authcode) + // - custom OAuth client: use localhost callback for manual copy/paste flow + if isBuiltinClient { + redirectURI = geminicli.GeminiCLIRedirectURI + } else { + redirectURI = geminicli.AIStudioOAuthRedirectURI + } + session.RedirectURI = redirectURI + s.sessionStore.Set(sessionID, session) + + authURL, err := geminicli.BuildAuthorizationURL(effectiveCfg, state, codeChallenge, redirectURI, session.ProjectID, oauthType) + if err != nil { + return nil, err + } + + return &GeminiAuthURLResult{ + AuthURL: authURL, + SessionID: sessionID, + State: state, + }, nil +} + +type GeminiExchangeCodeInput struct { + SessionID string + State string + Code string + ProxyID *int64 + OAuthType string // "code_assist" 或 "ai_studio" + // TierID is a user-selected tier to be used when auto detection is unavailable or fails. + // If empty, the service will fall back to the tier stored in the OAuth session (if any). + TierID string +} + +type GeminiTokenInfo struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + TokenType string `json:"token_type"` + Scope string `json:"scope,omitempty"` + ProjectID string `json:"project_id,omitempty"` + OAuthType string `json:"oauth_type,omitempty"` // "code_assist" 或 "ai_studio" + TierID string `json:"tier_id,omitempty"` // Canonical tier id (e.g. google_one_free, gcp_standard, aistudio_free) + Extra map[string]any `json:"extra,omitempty"` // Drive metadata +} + +// validateTierID validates tier_id format and length +func validateTierID(tierID string) error { + if tierID == "" { + return nil // Empty is allowed + } + if len(tierID) > 64 { + return fmt.Errorf("tier_id exceeds maximum length of 64 characters") + } + // Allow alphanumeric, underscore, hyphen, and slash (for tier paths) + if !regexp.MustCompile(`^[a-zA-Z0-9_/-]+$`).MatchString(tierID) { + return fmt.Errorf("tier_id contains invalid characters") + } + return nil +} + +func canonicalGeminiTierID(raw string) string { + raw = strings.TrimSpace(raw) + if raw == "" { + return "" + } + + lower := strings.ToLower(raw) + switch lower { + case GeminiTierGoogleOneFree, + GeminiTierGoogleAIPro, + GeminiTierGoogleAIUltra, + GeminiTierGCPStandard, + GeminiTierGCPEnterprise, + GeminiTierAIStudioFree, + GeminiTierAIStudioPaid, + GeminiTierGoogleOneUnknown: + return lower + } + + upper := strings.ToUpper(raw) + switch upper { + // Google One legacy tiers + case legacyTierAIPremium: + return GeminiTierGoogleAIPro + case legacyTierGoogleOneUnlimited: + return GeminiTierGoogleAIUltra + case legacyTierFree, legacyTierGoogleOneBasic, legacyTierGoogleOneStandard: + return GeminiTierGoogleOneFree + case legacyTierGoogleOneUnknown: + return GeminiTierGoogleOneUnknown + + // Code Assist legacy tiers + case "STANDARD", "PRO", "LEGACY": + return GeminiTierGCPStandard + case "ENTERPRISE", "ULTRA": + return GeminiTierGCPEnterprise + } + + // Some Code Assist responses use kebab-case tier identifiers. + switch lower { + case "standard-tier", "pro-tier": + return GeminiTierGCPStandard + case "ultra-tier": + return GeminiTierGCPEnterprise + } + + return "" +} + +func canonicalGeminiTierIDForOAuthType(oauthType, tierID string) string { + oauthType = strings.ToLower(strings.TrimSpace(oauthType)) + canonical := canonicalGeminiTierID(tierID) + if canonical == "" { + return "" + } + + switch oauthType { + case "google_one": + switch canonical { + case GeminiTierGoogleOneFree, GeminiTierGoogleAIPro, GeminiTierGoogleAIUltra: + return canonical + default: + return "" + } + case "code_assist": + switch canonical { + case GeminiTierGCPStandard, GeminiTierGCPEnterprise: + return canonical + default: + return "" + } + case "ai_studio": + switch canonical { + case GeminiTierAIStudioFree, GeminiTierAIStudioPaid: + return canonical + default: + return "" + } + default: + // Unknown oauth type: accept canonical tier. + return canonical + } +} + +// extractTierIDFromAllowedTiers extracts tierID from LoadCodeAssist response +// Prioritizes IsDefault tier, falls back to first non-empty tier +func extractTierIDFromAllowedTiers(allowedTiers []geminicli.AllowedTier) string { + tierID := "LEGACY" + // First pass: look for default tier + for _, tier := range allowedTiers { + if tier.IsDefault && strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + // Second pass: if still LEGACY, take first non-empty tier + if tierID == "LEGACY" { + for _, tier := range allowedTiers { + if strings.TrimSpace(tier.ID) != "" { + tierID = strings.TrimSpace(tier.ID) + break + } + } + } + return tierID +} + +// inferGoogleOneTier infers Google One tier from Drive storage limit +func inferGoogleOneTier(storageBytes int64) string { + log.Printf("[GeminiOAuth] inferGoogleOneTier - input: %d bytes (%.2f TB)", storageBytes, float64(storageBytes)/float64(TB)) + + if storageBytes <= 0 { + log.Printf("[GeminiOAuth] inferGoogleOneTier - storageBytes <= 0, returning UNKNOWN") + return GeminiTierGoogleOneUnknown + } + + if storageBytes > StorageTierUnlimited { + log.Printf("[GeminiOAuth] inferGoogleOneTier - > %d bytes (100TB), returning UNLIMITED", StorageTierUnlimited) + return GeminiTierGoogleAIUltra + } + if storageBytes >= StorageTierAIPremium { + log.Printf("[GeminiOAuth] inferGoogleOneTier - >= %d bytes (2TB), returning google_ai_pro", StorageTierAIPremium) + return GeminiTierGoogleAIPro + } + if storageBytes >= StorageTierFree { + log.Printf("[GeminiOAuth] inferGoogleOneTier - >= %d bytes (15GB), returning FREE", StorageTierFree) + return GeminiTierGoogleOneFree + } + + log.Printf("[GeminiOAuth] inferGoogleOneTier - < %d bytes (15GB), returning UNKNOWN", StorageTierFree) + return GeminiTierGoogleOneUnknown +} + +// FetchGoogleOneTier fetches Google One tier from Drive API. +// Note: LoadCodeAssist API is NOT called for Google One accounts because: +// 1. It's designed for GCP IAM (enterprise), not personal Google accounts +// 2. Personal accounts will get 403/404 from cloudaicompanion.googleapis.com +// 3. Google consumer (Google One) and enterprise (GCP) systems are physically isolated +func (s *GeminiOAuthService) FetchGoogleOneTier(ctx context.Context, accessToken, proxyURL string) (string, *geminicli.DriveStorageInfo, error) { + log.Printf("[GeminiOAuth] Starting FetchGoogleOneTier (Google One personal account)") + + // Use Drive API to infer tier from storage quota (requires drive.readonly scope) + log.Printf("[GeminiOAuth] Calling Drive API for storage quota...") + driveClient := geminicli.NewDriveClient() + + storageInfo, err := driveClient.GetStorageQuota(ctx, accessToken, proxyURL) + if err != nil { + // Check if it's a 403 (scope not granted) + if strings.Contains(err.Error(), "status 403") { + log.Printf("[GeminiOAuth] Drive API scope not available (403): %v", err) + return GeminiTierGoogleOneUnknown, nil, err + } + // Other errors + log.Printf("[GeminiOAuth] Failed to fetch Drive storage: %v", err) + return GeminiTierGoogleOneUnknown, nil, err + } + + log.Printf("[GeminiOAuth] Drive API response - Limit: %d bytes (%.2f TB), Usage: %d bytes (%.2f GB)", + storageInfo.Limit, float64(storageInfo.Limit)/float64(TB), + storageInfo.Usage, float64(storageInfo.Usage)/float64(GB)) + + tierID := inferGoogleOneTier(storageInfo.Limit) + log.Printf("[GeminiOAuth] Inferred tier from storage: %s", tierID) + + return tierID, storageInfo, nil +} + +// RefreshAccountGoogleOneTier 刷新单个账号的 Google One Tier +func (s *GeminiOAuthService) RefreshAccountGoogleOneTier( + ctx context.Context, + account *Account, +) (tierID string, extra map[string]any, credentials map[string]any, err error) { + if account == nil { + return "", nil, nil, fmt.Errorf("account is nil") + } + + // 验证账号类型 + oauthType, ok := account.Credentials["oauth_type"].(string) + if !ok || oauthType != "google_one" { + return "", nil, nil, fmt.Errorf("not a google_one OAuth account") + } + + // 获取 access_token + accessToken, ok := account.Credentials["access_token"].(string) + if !ok || accessToken == "" { + return "", nil, nil, fmt.Errorf("missing access_token") + } + + // 获取 proxy URL + var proxyURL string + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // 调用 Drive API + tierID, storageInfo, err := s.FetchGoogleOneTier(ctx, accessToken, proxyURL) + if err != nil { + return "", nil, nil, err + } + + // 构建 extra 数据(保留原有 extra 字段) + extra = make(map[string]any) + for k, v := range account.Extra { + extra[k] = v + } + if storageInfo != nil { + extra["drive_storage_limit"] = storageInfo.Limit + extra["drive_storage_usage"] = storageInfo.Usage + extra["drive_tier_updated_at"] = time.Now().Format(time.RFC3339) + } + + // 构建 credentials 数据 + credentials = make(map[string]any) + for k, v := range account.Credentials { + credentials[k] = v + } + credentials["tier_id"] = tierID + + return tierID, extra, credentials, nil +} + +func (s *GeminiOAuthService) ExchangeCode(ctx context.Context, input *GeminiExchangeCodeInput) (*GeminiTokenInfo, error) { + log.Printf("[GeminiOAuth] ========== ExchangeCode START ==========") + log.Printf("[GeminiOAuth] SessionID: %s", input.SessionID) + + session, ok := s.sessionStore.Get(input.SessionID) + if !ok { + log.Printf("[GeminiOAuth] ERROR: Session not found or expired") + return nil, fmt.Errorf("session not found or expired") + } + if strings.TrimSpace(input.State) == "" || input.State != session.State { + log.Printf("[GeminiOAuth] ERROR: Invalid state") + return nil, fmt.Errorf("invalid state") + } + + proxyURL := session.ProxyURL + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + log.Printf("[GeminiOAuth] ProxyURL: %s", proxyURL) + + redirectURI := session.RedirectURI + + // Resolve oauth_type early (defaults to code_assist for backward compatibility). + oauthType := session.OAuthType + if oauthType == "" { + oauthType = "code_assist" + } + log.Printf("[GeminiOAuth] OAuth Type: %s", oauthType) + log.Printf("[GeminiOAuth] Project ID from session: %s", session.ProjectID) + + // If the session was created for AI Studio OAuth, ensure a custom OAuth client is configured. + if oauthType == "ai_studio" { + effectiveCfg, err := geminicli.EffectiveOAuthConfig(geminicli.OAuthConfig{ + ClientID: s.cfg.Gemini.OAuth.ClientID, + ClientSecret: s.cfg.Gemini.OAuth.ClientSecret, + Scopes: s.cfg.Gemini.OAuth.Scopes, + }, "ai_studio") + if err != nil { + return nil, err + } + isBuiltinClient := effectiveCfg.ClientID == geminicli.GeminiCLIOAuthClientID && + effectiveCfg.ClientSecret == geminicli.GeminiCLIOAuthClientSecret + if isBuiltinClient { + return nil, fmt.Errorf("AI Studio OAuth requires a custom OAuth Client. Please use an AI Studio API Key account, or configure GEMINI_OAUTH_CLIENT_ID / GEMINI_OAUTH_CLIENT_SECRET and re-authorize") + } + } + + // code_assist always uses the built-in client and its fixed redirect URI. + if oauthType == "code_assist" { + redirectURI = geminicli.GeminiCLIRedirectURI + } + + tokenResp, err := s.oauthClient.ExchangeCode(ctx, oauthType, input.Code, session.CodeVerifier, redirectURI, proxyURL) + if err != nil { + log.Printf("[GeminiOAuth] ERROR: Failed to exchange code: %v", err) + return nil, fmt.Errorf("failed to exchange code: %w", err) + } + log.Printf("[GeminiOAuth] Token exchange successful") + log.Printf("[GeminiOAuth] Token scope: %s", tokenResp.Scope) + log.Printf("[GeminiOAuth] Token expires_in: %d seconds", tokenResp.ExpiresIn) + + sessionProjectID := strings.TrimSpace(session.ProjectID) + s.sessionStore.Delete(input.SessionID) + + // 计算过期时间:减去 5 分钟安全时间窗口(考虑网络延迟和时钟偏差) + // 同时设置下界保护,防止 expires_in 过小导致过去时间(引发刷新风暴) + const safetyWindow = 300 // 5 minutes + const minTTL = 30 // minimum 30 seconds + expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - safetyWindow + minExpiresAt := time.Now().Unix() + minTTL + if expiresAt < minExpiresAt { + expiresAt = minExpiresAt + } + + projectID := sessionProjectID + var tierID string + fallbackTierID := canonicalGeminiTierIDForOAuthType(oauthType, input.TierID) + if fallbackTierID == "" { + fallbackTierID = canonicalGeminiTierIDForOAuthType(oauthType, session.TierID) + } + + log.Printf("[GeminiOAuth] ========== Account Type Detection START ==========") + log.Printf("[GeminiOAuth] OAuth Type: %s", oauthType) + + // 对于 code_assist 模式,project_id 是必需的,需要调用 Code Assist API + // 对于 google_one 模式,使用个人 Google 账号,不需要 project_id,配额由 Google 网关自动识别 + // 对于 ai_studio 模式,project_id 是可选的(不影响使用 AI Studio API) + switch oauthType { + case "code_assist": + log.Printf("[GeminiOAuth] Processing code_assist OAuth type") + if projectID == "" { + log.Printf("[GeminiOAuth] No project_id provided, attempting to fetch from LoadCodeAssist API...") + var err error + projectID, tierID, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + // 记录警告但不阻断流程,允许后续补充 project_id + fmt.Printf("[GeminiOAuth] Warning: Failed to fetch project_id during token exchange: %v\n", err) + log.Printf("[GeminiOAuth] WARNING: Failed to fetch project_id: %v", err) + } else { + log.Printf("[GeminiOAuth] Successfully fetched project_id: %s, tier_id: %s", projectID, tierID) + } + } else { + log.Printf("[GeminiOAuth] User provided project_id: %s, fetching tier_id...", projectID) + // 用户手动填了 project_id,仍需调用 LoadCodeAssist 获取 tierID + _, fetchedTierID, err := s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + fmt.Printf("[GeminiOAuth] Warning: Failed to fetch tierID: %v\n", err) + log.Printf("[GeminiOAuth] WARNING: Failed to fetch tier_id: %v", err) + } else { + tierID = fetchedTierID + log.Printf("[GeminiOAuth] Successfully fetched tier_id: %s", tierID) + } + } + if strings.TrimSpace(projectID) == "" { + log.Printf("[GeminiOAuth] ERROR: Missing project_id for Code Assist OAuth") + return nil, fmt.Errorf("missing project_id for Code Assist OAuth: please fill Project ID (optional field) and regenerate the auth URL, or ensure your Google account has an ACTIVE GCP project") + } + // Prefer auto-detected tier; fall back to user-selected tier. + tierID = canonicalGeminiTierIDForOAuthType(oauthType, tierID) + if tierID == "" { + if fallbackTierID != "" { + tierID = fallbackTierID + log.Printf("[GeminiOAuth] Using fallback tier_id from user/session: %s", tierID) + } else { + tierID = GeminiTierGCPStandard + log.Printf("[GeminiOAuth] Using default tier_id: %s", tierID) + } + } + log.Printf("[GeminiOAuth] Final code_assist result - project_id: %s, tier_id: %s", projectID, tierID) + + case "google_one": + log.Printf("[GeminiOAuth] Processing google_one OAuth type") + + // Google One accounts use cloudaicompanion API, which requires a project_id. + // For personal accounts, Google auto-assigns a project_id via the LoadCodeAssist API. + if projectID == "" { + log.Printf("[GeminiOAuth] No project_id provided, attempting to fetch from LoadCodeAssist API...") + var err error + projectID, _, err = s.fetchProjectID(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + log.Printf("[GeminiOAuth] ERROR: Failed to fetch project_id: %v", err) + return nil, fmt.Errorf("google One accounts require a project_id, failed to auto-detect: %w", err) + } + log.Printf("[GeminiOAuth] Successfully fetched project_id: %s", projectID) + } + + log.Printf("[GeminiOAuth] Attempting to fetch Google One tier from Drive API...") + // Attempt to fetch Drive storage tier + var storageInfo *geminicli.DriveStorageInfo + var err error + tierID, storageInfo, err = s.FetchGoogleOneTier(ctx, tokenResp.AccessToken, proxyURL) + if err != nil { + // Log warning but don't block - use fallback + fmt.Printf("[GeminiOAuth] Warning: Failed to fetch Drive tier: %v\n", err) + log.Printf("[GeminiOAuth] WARNING: Failed to fetch Drive tier: %v", err) + tierID = "" + } else { + log.Printf("[GeminiOAuth] Successfully fetched Drive tier: %s", tierID) + if storageInfo != nil { + log.Printf("[GeminiOAuth] Drive storage - Limit: %d bytes (%.2f TB), Usage: %d bytes (%.2f GB)", + storageInfo.Limit, float64(storageInfo.Limit)/float64(TB), + storageInfo.Usage, float64(storageInfo.Usage)/float64(GB)) + } + } + tierID = canonicalGeminiTierIDForOAuthType(oauthType, tierID) + if tierID == "" || tierID == GeminiTierGoogleOneUnknown { + if fallbackTierID != "" { + tierID = fallbackTierID + log.Printf("[GeminiOAuth] Using fallback tier_id from user/session: %s", tierID) + } else { + tierID = GeminiTierGoogleOneFree + log.Printf("[GeminiOAuth] Using default tier_id: %s", tierID) + } + } + fmt.Printf("[GeminiOAuth] Google One tierID after normalization: %s\n", tierID) + + // Store Drive info in extra field for caching + if storageInfo != nil { + tokenInfo := &GeminiTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + Scope: tokenResp.Scope, + ProjectID: projectID, + TierID: tierID, + OAuthType: oauthType, + Extra: map[string]any{ + "drive_storage_limit": storageInfo.Limit, + "drive_storage_usage": storageInfo.Usage, + "drive_tier_updated_at": time.Now().Format(time.RFC3339), + }, + } + log.Printf("[GeminiOAuth] ========== ExchangeCode END (google_one with storage info) ==========") + return tokenInfo, nil + } + + case "ai_studio": + // No automatic tier detection for AI Studio OAuth; rely on user selection. + if fallbackTierID != "" { + tierID = fallbackTierID + } else { + tierID = GeminiTierAIStudioFree + } + + default: + log.Printf("[GeminiOAuth] Processing %s OAuth type (no tier detection)", oauthType) + } + + log.Printf("[GeminiOAuth] ========== Account Type Detection END ==========") + + result := &GeminiTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + Scope: tokenResp.Scope, + ProjectID: projectID, + TierID: tierID, + OAuthType: oauthType, + } + log.Printf("[GeminiOAuth] Final result - OAuth Type: %s, Project ID: %s, Tier ID: %s", result.OAuthType, result.ProjectID, result.TierID) + log.Printf("[GeminiOAuth] ========== ExchangeCode END ==========") + return result, nil +} + +func (s *GeminiOAuthService) RefreshToken(ctx context.Context, oauthType, refreshToken, proxyURL string) (*GeminiTokenInfo, error) { + var lastErr error + + for attempt := 0; attempt <= 3; attempt++ { + if attempt > 0 { + backoff := time.Duration(1< 30*time.Second { + backoff = 30 * time.Second + } + time.Sleep(backoff) + } + + tokenResp, err := s.oauthClient.RefreshToken(ctx, oauthType, refreshToken, proxyURL) + if err == nil { + // 计算过期时间:减去 5 分钟安全时间窗口(考虑网络延迟和时钟偏差) + // 同时设置下界保护,防止 expires_in 过小导致过去时间(引发刷新风暴) + const safetyWindow = 300 // 5 minutes + const minTTL = 30 // minimum 30 seconds + expiresAt := time.Now().Unix() + tokenResp.ExpiresIn - safetyWindow + minExpiresAt := time.Now().Unix() + minTTL + if expiresAt < minExpiresAt { + expiresAt = minExpiresAt + } + return &GeminiTokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: expiresAt, + Scope: tokenResp.Scope, + }, nil + } + + if isNonRetryableGeminiOAuthError(err) { + return nil, err + } + lastErr = err + } + + return nil, fmt.Errorf("token refresh failed after retries: %w", lastErr) +} + +func isNonRetryableGeminiOAuthError(err error) bool { + msg := err.Error() + nonRetryable := []string{ + "invalid_grant", + "invalid_client", + "unauthorized_client", + "access_denied", + } + for _, needle := range nonRetryable { + if strings.Contains(msg, needle) { + return true + } + } + return false +} + +func (s *GeminiOAuthService) RefreshAccountToken(ctx context.Context, account *Account) (*GeminiTokenInfo, error) { + if account.Platform != PlatformGemini || account.Type != AccountTypeOAuth { + return nil, fmt.Errorf("account is not a Gemini OAuth account") + } + + refreshToken := account.GetCredential("refresh_token") + if strings.TrimSpace(refreshToken) == "" { + return nil, fmt.Errorf("no refresh token available") + } + + // Preserve oauth_type from the account (defaults to code_assist for backward compatibility). + oauthType := strings.TrimSpace(account.GetCredential("oauth_type")) + if oauthType == "" { + oauthType = "code_assist" + } + + var proxyURL string + if account.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *account.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + tokenInfo, err := s.RefreshToken(ctx, oauthType, refreshToken, proxyURL) + // Backward compatibility: + // Older versions could refresh Code Assist tokens using a user-provided OAuth client when configured. + // If the refresh token was originally issued to that custom client, forcing the built-in client will + // fail with "unauthorized_client". In that case, retry with the custom client (ai_studio path) when available. + if err != nil && oauthType == "code_assist" && strings.Contains(err.Error(), "unauthorized_client") && s.GetOAuthConfig().AIStudioOAuthEnabled { + if alt, altErr := s.RefreshToken(ctx, "ai_studio", refreshToken, proxyURL); altErr == nil { + tokenInfo = alt + err = nil + } + } + // Backward compatibility for google_one: + // - New behavior: when a custom OAuth client is configured, google_one will use it. + // - Old behavior: google_one always used the built-in Gemini CLI OAuth client. + // If an existing account was authorized with the built-in client, refreshing with the custom client + // will fail with "unauthorized_client". Retry with the built-in client (code_assist path forces it). + if err != nil && oauthType == "google_one" && strings.Contains(err.Error(), "unauthorized_client") && s.GetOAuthConfig().AIStudioOAuthEnabled { + if alt, altErr := s.RefreshToken(ctx, "code_assist", refreshToken, proxyURL); altErr == nil { + tokenInfo = alt + err = nil + } + } + if err != nil { + // Provide a more actionable error for common OAuth client mismatch issues. + if strings.Contains(err.Error(), "unauthorized_client") { + return nil, fmt.Errorf("%w (OAuth client mismatch: the refresh_token is bound to the OAuth client used during authorization; please re-authorize this account or restore the original GEMINI_OAUTH_CLIENT_ID/SECRET)", err) + } + return nil, err + } + + tokenInfo.OAuthType = oauthType + + // Preserve account's project_id when present. + existingProjectID := strings.TrimSpace(account.GetCredential("project_id")) + if existingProjectID != "" { + tokenInfo.ProjectID = existingProjectID + } + + // 尝试从账号凭证获取 tierID(向后兼容) + existingTierID := strings.TrimSpace(account.GetCredential("tier_id")) + + // For Code Assist, project_id is required. Auto-detect if missing. + // For AI Studio OAuth, project_id is optional and should not block refresh. + switch oauthType { + case "code_assist": + // 先设置默认值或保留旧值,确保 tier_id 始终有值 + if existingTierID != "" { + tokenInfo.TierID = canonicalGeminiTierIDForOAuthType(oauthType, existingTierID) + } + if tokenInfo.TierID == "" { + tokenInfo.TierID = GeminiTierGCPStandard + } + + // 尝试自动探测 project_id 和 tier_id + needDetect := strings.TrimSpace(tokenInfo.ProjectID) == "" || tokenInfo.TierID == "" + if needDetect { + projectID, tierID, err := s.fetchProjectID(ctx, tokenInfo.AccessToken, proxyURL) + if err != nil { + fmt.Printf("[GeminiOAuth] Warning: failed to auto-detect project/tier: %v\n", err) + } else { + if strings.TrimSpace(tokenInfo.ProjectID) == "" && projectID != "" { + tokenInfo.ProjectID = projectID + } + if tierID != "" { + if canonical := canonicalGeminiTierIDForOAuthType(oauthType, tierID); canonical != "" { + tokenInfo.TierID = canonical + } + } + } + } + + if strings.TrimSpace(tokenInfo.ProjectID) == "" { + return nil, fmt.Errorf("failed to auto-detect project_id: empty result") + } + case "google_one": + canonicalExistingTier := canonicalGeminiTierIDForOAuthType(oauthType, existingTierID) + // Check if tier cache is stale (> 24 hours) + needsRefresh := true + if account.Extra != nil { + if updatedAtStr, ok := account.Extra["drive_tier_updated_at"].(string); ok { + if updatedAt, err := time.Parse(time.RFC3339, updatedAtStr); err == nil { + if time.Since(updatedAt) <= 24*time.Hour { + needsRefresh = false + // Use cached tier + tokenInfo.TierID = canonicalExistingTier + } + } + } + } + + if tokenInfo.TierID == "" { + tokenInfo.TierID = canonicalExistingTier + } + + if needsRefresh { + tierID, storageInfo, err := s.FetchGoogleOneTier(ctx, tokenInfo.AccessToken, proxyURL) + if err == nil { + if canonical := canonicalGeminiTierIDForOAuthType(oauthType, tierID); canonical != "" && canonical != GeminiTierGoogleOneUnknown { + tokenInfo.TierID = canonical + } + if storageInfo != nil { + tokenInfo.Extra = map[string]any{ + "drive_storage_limit": storageInfo.Limit, + "drive_storage_usage": storageInfo.Usage, + "drive_tier_updated_at": time.Now().Format(time.RFC3339), + } + } + } + } + + if tokenInfo.TierID == "" || tokenInfo.TierID == GeminiTierGoogleOneUnknown { + if canonicalExistingTier != "" { + tokenInfo.TierID = canonicalExistingTier + } else { + tokenInfo.TierID = GeminiTierGoogleOneFree + } + } + } + + return tokenInfo, nil +} + +func (s *GeminiOAuthService) BuildAccountCredentials(tokenInfo *GeminiTokenInfo) map[string]any { + creds := map[string]any{ + "access_token": tokenInfo.AccessToken, + "expires_at": strconv.FormatInt(tokenInfo.ExpiresAt, 10), + } + if tokenInfo.RefreshToken != "" { + creds["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.TokenType != "" { + creds["token_type"] = tokenInfo.TokenType + } + if tokenInfo.Scope != "" { + creds["scope"] = tokenInfo.Scope + } + if tokenInfo.ProjectID != "" { + creds["project_id"] = tokenInfo.ProjectID + } + if tokenInfo.TierID != "" { + // Validate tier_id before storing + if err := validateTierID(tokenInfo.TierID); err == nil { + creds["tier_id"] = tokenInfo.TierID + fmt.Printf("[GeminiOAuth] Storing tier_id: %s\n", tokenInfo.TierID) + } else { + fmt.Printf("[GeminiOAuth] Invalid tier_id %s: %v\n", tokenInfo.TierID, err) + } + // Silently skip invalid tier_id (don't block account creation) + } + if tokenInfo.OAuthType != "" { + creds["oauth_type"] = tokenInfo.OAuthType + } + // Store extra metadata (Drive info) if present + if len(tokenInfo.Extra) > 0 { + for k, v := range tokenInfo.Extra { + creds[k] = v + } + } + return creds +} + +func (s *GeminiOAuthService) Stop() { + s.sessionStore.Stop() +} + +func (s *GeminiOAuthService) fetchProjectID(ctx context.Context, accessToken, proxyURL string) (string, string, error) { + if s.codeAssist == nil { + return "", "", errors.New("code assist client not configured") + } + + loadResp, loadErr := s.codeAssist.LoadCodeAssist(ctx, accessToken, proxyURL, nil) + + // Extract tierID from response (works whether CloudAICompanionProject is set or not) + tierID := "LEGACY" + if loadResp != nil { + // First try to get tier from currentTier/paidTier fields + if tier := loadResp.GetTier(); tier != "" { + tierID = tier + } else { + // Fallback to extracting from allowedTiers + tierID = extractTierIDFromAllowedTiers(loadResp.AllowedTiers) + } + } + + // If LoadCodeAssist returned a project, use it + if loadErr == nil && loadResp != nil && strings.TrimSpace(loadResp.CloudAICompanionProject) != "" { + return strings.TrimSpace(loadResp.CloudAICompanionProject), tierID, nil + } + + req := &geminicli.OnboardUserRequest{ + TierID: tierID, + Metadata: geminicli.LoadCodeAssistMetadata{ + IDEType: "ANTIGRAVITY", + Platform: "PLATFORM_UNSPECIFIED", + PluginType: "GEMINI", + }, + } + + maxAttempts := 5 + for attempt := 1; attempt <= maxAttempts; attempt++ { + resp, err := s.codeAssist.OnboardUser(ctx, accessToken, proxyURL, req) + if err != nil { + // If Code Assist onboarding fails (e.g. INVALID_ARGUMENT), fallback to Cloud Resource Manager projects. + fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) + if fbErr == nil && strings.TrimSpace(fallback) != "" { + return strings.TrimSpace(fallback), tierID, nil + } + return "", tierID, err + } + if resp.Done { + if resp.Response != nil && resp.Response.CloudAICompanionProject != nil { + switch v := resp.Response.CloudAICompanionProject.(type) { + case string: + return strings.TrimSpace(v), tierID, nil + case map[string]any: + if id, ok := v["id"].(string); ok { + return strings.TrimSpace(id), tierID, nil + } + } + } + + fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) + if fbErr == nil && strings.TrimSpace(fallback) != "" { + return strings.TrimSpace(fallback), tierID, nil + } + return "", tierID, errors.New("onboardUser completed but no project_id returned") + } + time.Sleep(2 * time.Second) + } + + fallback, fbErr := fetchProjectIDFromResourceManager(ctx, accessToken, proxyURL) + if fbErr == nil && strings.TrimSpace(fallback) != "" { + return strings.TrimSpace(fallback), tierID, nil + } + if loadErr != nil { + return "", tierID, fmt.Errorf("loadCodeAssist failed (%v) and onboardUser timeout after %d attempts", loadErr, maxAttempts) + } + return "", tierID, fmt.Errorf("onboardUser timeout after %d attempts", maxAttempts) +} + +type googleCloudProject struct { + ProjectID string `json:"projectId"` + DisplayName string `json:"name"` + LifecycleState string `json:"lifecycleState"` +} + +type googleCloudProjectsResponse struct { + Projects []googleCloudProject `json:"projects"` +} + +func fetchProjectIDFromResourceManager(ctx context.Context, accessToken, proxyURL string) (string, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "https://cloudresourcemanager.googleapis.com/v1/projects", nil) + if err != nil { + return "", fmt.Errorf("failed to create resource manager request: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+accessToken) + req.Header.Set("User-Agent", geminicli.GeminiCLIUserAgent) + + client, err := httpclient.GetClient(httpclient.Options{ + ProxyURL: strings.TrimSpace(proxyURL), + Timeout: 30 * time.Second, + ValidateResolvedIP: true, + }) + if err != nil { + client = &http.Client{Timeout: 30 * time.Second} + } + + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("resource manager request failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read resource manager response: %w", err) + } + + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("resource manager HTTP %d: %s", resp.StatusCode, string(bodyBytes)) + } + + var projectsResp googleCloudProjectsResponse + if err := json.Unmarshal(bodyBytes, &projectsResp); err != nil { + return "", fmt.Errorf("failed to parse resource manager response: %w", err) + } + + active := make([]googleCloudProject, 0, len(projectsResp.Projects)) + for _, p := range projectsResp.Projects { + if p.LifecycleState == "ACTIVE" && strings.TrimSpace(p.ProjectID) != "" { + active = append(active, p) + } + } + if len(active) == 0 { + return "", errors.New("no ACTIVE projects found from resource manager") + } + + // Prefer likely companion projects first. + for _, p := range active { + id := strings.ToLower(strings.TrimSpace(p.ProjectID)) + name := strings.ToLower(strings.TrimSpace(p.DisplayName)) + if strings.Contains(id, "cloud-ai-companion") || strings.Contains(name, "cloud ai companion") || strings.Contains(name, "code assist") { + return strings.TrimSpace(p.ProjectID), nil + } + } + // Then prefer "default". + for _, p := range active { + id := strings.ToLower(strings.TrimSpace(p.ProjectID)) + name := strings.ToLower(strings.TrimSpace(p.DisplayName)) + if strings.Contains(id, "default") || strings.Contains(name, "default") { + return strings.TrimSpace(p.ProjectID), nil + } + } + + return strings.TrimSpace(active[0].ProjectID), nil +} diff --git a/backend/internal/service/gemini_oauth_service_test.go b/backend/internal/service/gemini_oauth_service_test.go new file mode 100644 index 00000000..5591eb39 --- /dev/null +++ b/backend/internal/service/gemini_oauth_service_test.go @@ -0,0 +1,130 @@ +package service + +import ( + "context" + "net/url" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" +) + +func TestGeminiOAuthService_GenerateAuthURL_RedirectURIStrategy(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + cfg *config.Config + oauthType string + projectID string + wantClientID string + wantRedirect string + wantScope string + wantProjectID string + wantErrSubstr string + } + + tests := []testCase{ + { + name: "google_one uses built-in client when not configured and redirects to upstream", + cfg: &config.Config{ + Gemini: config.GeminiConfig{ + OAuth: config.GeminiOAuthConfig{}, + }, + }, + oauthType: "google_one", + wantClientID: geminicli.GeminiCLIOAuthClientID, + wantRedirect: geminicli.GeminiCLIRedirectURI, + wantScope: geminicli.DefaultCodeAssistScopes, + wantProjectID: "", + }, + { + name: "google_one always forces built-in client even when custom client configured", + cfg: &config.Config{ + Gemini: config.GeminiConfig{ + OAuth: config.GeminiOAuthConfig{ + ClientID: "custom-client-id", + ClientSecret: "custom-client-secret", + }, + }, + }, + oauthType: "google_one", + wantClientID: geminicli.GeminiCLIOAuthClientID, + wantRedirect: geminicli.GeminiCLIRedirectURI, + wantScope: geminicli.DefaultCodeAssistScopes, + wantProjectID: "", + }, + { + name: "code_assist always forces built-in client even when custom client configured", + cfg: &config.Config{ + Gemini: config.GeminiConfig{ + OAuth: config.GeminiOAuthConfig{ + ClientID: "custom-client-id", + ClientSecret: "custom-client-secret", + }, + }, + }, + oauthType: "code_assist", + projectID: "my-gcp-project", + wantClientID: geminicli.GeminiCLIOAuthClientID, + wantRedirect: geminicli.GeminiCLIRedirectURI, + wantScope: geminicli.DefaultCodeAssistScopes, + wantProjectID: "my-gcp-project", + }, + { + name: "ai_studio requires custom client", + cfg: &config.Config{ + Gemini: config.GeminiConfig{ + OAuth: config.GeminiOAuthConfig{}, + }, + }, + oauthType: "ai_studio", + wantErrSubstr: "AI Studio OAuth requires a custom OAuth Client", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + svc := NewGeminiOAuthService(nil, nil, nil, tt.cfg) + got, err := svc.GenerateAuthURL(context.Background(), nil, "https://example.com/auth/callback", tt.projectID, tt.oauthType, "") + if tt.wantErrSubstr != "" { + if err == nil { + t.Fatalf("expected error containing %q, got nil", tt.wantErrSubstr) + } + if !strings.Contains(err.Error(), tt.wantErrSubstr) { + t.Fatalf("expected error containing %q, got: %v", tt.wantErrSubstr, err) + } + return + } + if err != nil { + t.Fatalf("GenerateAuthURL returned error: %v", err) + } + + parsed, err := url.Parse(got.AuthURL) + if err != nil { + t.Fatalf("failed to parse auth_url: %v", err) + } + q := parsed.Query() + + if gotState := q.Get("state"); gotState != got.State { + t.Fatalf("state mismatch: query=%q result=%q", gotState, got.State) + } + if gotClientID := q.Get("client_id"); gotClientID != tt.wantClientID { + t.Fatalf("client_id mismatch: got=%q want=%q", gotClientID, tt.wantClientID) + } + if gotRedirect := q.Get("redirect_uri"); gotRedirect != tt.wantRedirect { + t.Fatalf("redirect_uri mismatch: got=%q want=%q", gotRedirect, tt.wantRedirect) + } + if gotScope := q.Get("scope"); gotScope != tt.wantScope { + t.Fatalf("scope mismatch: got=%q want=%q", gotScope, tt.wantScope) + } + if gotProjectID := q.Get("project_id"); gotProjectID != tt.wantProjectID { + t.Fatalf("project_id mismatch: got=%q want=%q", gotProjectID, tt.wantProjectID) + } + }) + } +} diff --git a/backend/internal/service/gemini_quota.go b/backend/internal/service/gemini_quota.go new file mode 100644 index 00000000..3a70232c --- /dev/null +++ b/backend/internal/service/gemini_quota.go @@ -0,0 +1,448 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" +) + +type geminiModelClass string + +const ( + geminiModelPro geminiModelClass = "pro" + geminiModelFlash geminiModelClass = "flash" +) + +type GeminiQuota struct { + // SharedRPD is a shared requests-per-day pool across models. + // When SharedRPD > 0, callers should treat ProRPD/FlashRPD as not applicable for daily quota checks. + SharedRPD int64 `json:"shared_rpd,omitempty"` + // SharedRPM is a shared requests-per-minute pool across models. + // When SharedRPM > 0, callers should treat ProRPM/FlashRPM as not applicable for minute quota checks. + SharedRPM int64 `json:"shared_rpm,omitempty"` + + // Per-model quotas (AI Studio / API key). + // A value of -1 means "unlimited" (pay-as-you-go). + ProRPD int64 `json:"pro_rpd,omitempty"` + ProRPM int64 `json:"pro_rpm,omitempty"` + FlashRPD int64 `json:"flash_rpd,omitempty"` + FlashRPM int64 `json:"flash_rpm,omitempty"` +} + +type GeminiTierPolicy struct { + Quota GeminiQuota + Cooldown time.Duration +} + +type GeminiQuotaPolicy struct { + tiers map[string]GeminiTierPolicy +} + +type GeminiUsageTotals struct { + ProRequests int64 + FlashRequests int64 + ProTokens int64 + FlashTokens int64 + ProCost float64 + FlashCost float64 +} + +const geminiQuotaCacheTTL = time.Minute + +type geminiQuotaOverridesV1 struct { + Tiers map[string]config.GeminiTierQuotaConfig `json:"tiers"` +} + +type geminiQuotaOverridesV2 struct { + QuotaRules map[string]geminiQuotaRuleOverride `json:"quota_rules"` +} + +type geminiQuotaRuleOverride struct { + SharedRPD *int64 `json:"shared_rpd,omitempty"` + SharedRPM *int64 `json:"rpm,omitempty"` + GeminiPro *geminiModelQuotaOverride `json:"gemini_pro,omitempty"` + GeminiFlash *geminiModelQuotaOverride `json:"gemini_flash,omitempty"` + Desc *string `json:"desc,omitempty"` +} + +type geminiModelQuotaOverride struct { + RPD *int64 `json:"rpd,omitempty"` + RPM *int64 `json:"rpm,omitempty"` +} + +type GeminiQuotaService struct { + cfg *config.Config + settingRepo SettingRepository + mu sync.Mutex + cachedAt time.Time + policy *GeminiQuotaPolicy +} + +func NewGeminiQuotaService(cfg *config.Config, settingRepo SettingRepository) *GeminiQuotaService { + return &GeminiQuotaService{ + cfg: cfg, + settingRepo: settingRepo, + } +} + +func (s *GeminiQuotaService) Policy(ctx context.Context) *GeminiQuotaPolicy { + if s == nil { + return newGeminiQuotaPolicy() + } + + now := time.Now() + s.mu.Lock() + if s.policy != nil && now.Sub(s.cachedAt) < geminiQuotaCacheTTL { + policy := s.policy + s.mu.Unlock() + return policy + } + s.mu.Unlock() + + policy := newGeminiQuotaPolicy() + if s.cfg != nil { + policy.ApplyOverrides(s.cfg.Gemini.Quota.Tiers) + if strings.TrimSpace(s.cfg.Gemini.Quota.Policy) != "" { + raw := []byte(s.cfg.Gemini.Quota.Policy) + var overridesV2 geminiQuotaOverridesV2 + if err := json.Unmarshal(raw, &overridesV2); err == nil && len(overridesV2.QuotaRules) > 0 { + policy.ApplyQuotaRulesOverrides(overridesV2.QuotaRules) + } else { + var overridesV1 geminiQuotaOverridesV1 + if err := json.Unmarshal(raw, &overridesV1); err != nil { + log.Printf("gemini quota: parse config policy failed: %v", err) + } else { + policy.ApplyOverrides(overridesV1.Tiers) + } + } + } + } + + if s.settingRepo != nil { + value, err := s.settingRepo.GetValue(ctx, SettingKeyGeminiQuotaPolicy) + if err != nil && !errors.Is(err, ErrSettingNotFound) { + log.Printf("gemini quota: load setting failed: %v", err) + } else if strings.TrimSpace(value) != "" { + raw := []byte(value) + var overridesV2 geminiQuotaOverridesV2 + if err := json.Unmarshal(raw, &overridesV2); err == nil && len(overridesV2.QuotaRules) > 0 { + policy.ApplyQuotaRulesOverrides(overridesV2.QuotaRules) + } else { + var overridesV1 geminiQuotaOverridesV1 + if err := json.Unmarshal(raw, &overridesV1); err != nil { + log.Printf("gemini quota: parse setting failed: %v", err) + } else { + policy.ApplyOverrides(overridesV1.Tiers) + } + } + } + } + + s.mu.Lock() + s.policy = policy + s.cachedAt = now + s.mu.Unlock() + + return policy +} + +func (s *GeminiQuotaService) QuotaForAccount(ctx context.Context, account *Account) (GeminiQuota, bool) { + if account == nil || account.Platform != PlatformGemini { + return GeminiQuota{}, false + } + + // Map (oauth_type + tier_id) to a canonical policy tier key. + // This keeps the policy table stable even if upstream tier_id strings vary. + tierKey := geminiQuotaTierKeyForAccount(account) + if tierKey == "" { + return GeminiQuota{}, false + } + + policy := s.Policy(ctx) + return policy.QuotaForTier(tierKey) +} + +func (s *GeminiQuotaService) CooldownForTier(ctx context.Context, tierID string) time.Duration { + policy := s.Policy(ctx) + return policy.CooldownForTier(tierID) +} + +func (s *GeminiQuotaService) CooldownForAccount(ctx context.Context, account *Account) time.Duration { + if s == nil || account == nil || account.Platform != PlatformGemini { + return 5 * time.Minute + } + tierKey := geminiQuotaTierKeyForAccount(account) + if strings.TrimSpace(tierKey) == "" { + return 5 * time.Minute + } + return s.CooldownForTier(ctx, tierKey) +} + +func newGeminiQuotaPolicy() *GeminiQuotaPolicy { + return &GeminiQuotaPolicy{ + tiers: map[string]GeminiTierPolicy{ + // --- AI Studio / API Key (per-model) --- + // aistudio_free: + // - gemini_pro: 50 RPD / 2 RPM + // - gemini_flash: 1500 RPD / 15 RPM + GeminiTierAIStudioFree: {Quota: GeminiQuota{ProRPD: 50, ProRPM: 2, FlashRPD: 1500, FlashRPM: 15}, Cooldown: 30 * time.Minute}, + // aistudio_paid: -1 means "unlimited/pay-as-you-go" for RPD. + GeminiTierAIStudioPaid: {Quota: GeminiQuota{ProRPD: -1, ProRPM: 1000, FlashRPD: -1, FlashRPM: 2000}, Cooldown: 5 * time.Minute}, + + // --- Google One (shared pool) --- + GeminiTierGoogleOneFree: {Quota: GeminiQuota{SharedRPD: 1000, SharedRPM: 60}, Cooldown: 30 * time.Minute}, + GeminiTierGoogleAIPro: {Quota: GeminiQuota{SharedRPD: 1500, SharedRPM: 120}, Cooldown: 5 * time.Minute}, + GeminiTierGoogleAIUltra: {Quota: GeminiQuota{SharedRPD: 2000, SharedRPM: 120}, Cooldown: 5 * time.Minute}, + + // --- GCP Code Assist (shared pool) --- + GeminiTierGCPStandard: {Quota: GeminiQuota{SharedRPD: 1500, SharedRPM: 120}, Cooldown: 5 * time.Minute}, + GeminiTierGCPEnterprise: {Quota: GeminiQuota{SharedRPD: 2000, SharedRPM: 120}, Cooldown: 5 * time.Minute}, + }, + } +} + +func (p *GeminiQuotaPolicy) ApplyOverrides(tiers map[string]config.GeminiTierQuotaConfig) { + if p == nil || len(tiers) == 0 { + return + } + for rawID, override := range tiers { + tierID := normalizeGeminiTierID(rawID) + if tierID == "" { + continue + } + policy, ok := p.tiers[tierID] + if !ok { + policy = GeminiTierPolicy{Cooldown: 5 * time.Minute} + } + // Backward-compatible overrides: + // - If the tier uses shared quota, interpret pro_rpd as shared_rpd. + // - Otherwise apply per-model overrides. + if override.ProRPD != nil { + if policy.Quota.SharedRPD > 0 { + policy.Quota.SharedRPD = clampGeminiQuotaInt64WithUnlimited(*override.ProRPD) + } else { + policy.Quota.ProRPD = clampGeminiQuotaInt64WithUnlimited(*override.ProRPD) + } + } + if override.FlashRPD != nil { + if policy.Quota.SharedRPD > 0 { + // No separate flash RPD for shared tiers. + } else { + policy.Quota.FlashRPD = clampGeminiQuotaInt64WithUnlimited(*override.FlashRPD) + } + } + if override.CooldownMinutes != nil { + minutes := clampGeminiQuotaInt(*override.CooldownMinutes) + policy.Cooldown = time.Duration(minutes) * time.Minute + } + p.tiers[tierID] = policy + } +} + +func (p *GeminiQuotaPolicy) ApplyQuotaRulesOverrides(rules map[string]geminiQuotaRuleOverride) { + if p == nil || len(rules) == 0 { + return + } + for rawID, override := range rules { + tierID := normalizeGeminiTierID(rawID) + if tierID == "" { + continue + } + policy, ok := p.tiers[tierID] + if !ok { + policy = GeminiTierPolicy{Cooldown: 5 * time.Minute} + } + + if override.SharedRPD != nil { + policy.Quota.SharedRPD = clampGeminiQuotaInt64WithUnlimited(*override.SharedRPD) + } + if override.SharedRPM != nil { + policy.Quota.SharedRPM = clampGeminiQuotaRPM(*override.SharedRPM) + } + if override.GeminiPro != nil { + if override.GeminiPro.RPD != nil { + policy.Quota.ProRPD = clampGeminiQuotaInt64WithUnlimited(*override.GeminiPro.RPD) + } + if override.GeminiPro.RPM != nil { + policy.Quota.ProRPM = clampGeminiQuotaRPM(*override.GeminiPro.RPM) + } + } + if override.GeminiFlash != nil { + if override.GeminiFlash.RPD != nil { + policy.Quota.FlashRPD = clampGeminiQuotaInt64WithUnlimited(*override.GeminiFlash.RPD) + } + if override.GeminiFlash.RPM != nil { + policy.Quota.FlashRPM = clampGeminiQuotaRPM(*override.GeminiFlash.RPM) + } + } + + p.tiers[tierID] = policy + } +} + +func (p *GeminiQuotaPolicy) QuotaForTier(tierID string) (GeminiQuota, bool) { + policy, ok := p.policyForTier(tierID) + if !ok { + return GeminiQuota{}, false + } + return policy.Quota, true +} + +func (p *GeminiQuotaPolicy) CooldownForTier(tierID string) time.Duration { + policy, ok := p.policyForTier(tierID) + if ok && policy.Cooldown > 0 { + return policy.Cooldown + } + return 5 * time.Minute +} + +func (p *GeminiQuotaPolicy) policyForTier(tierID string) (GeminiTierPolicy, bool) { + if p == nil { + return GeminiTierPolicy{}, false + } + normalized := normalizeGeminiTierID(tierID) + if policy, ok := p.tiers[normalized]; ok { + return policy, true + } + return GeminiTierPolicy{}, false +} + +func normalizeGeminiTierID(tierID string) string { + tierID = strings.TrimSpace(tierID) + if tierID == "" { + return "" + } + // Prefer canonical mapping (handles legacy tier strings). + if canonical := canonicalGeminiTierID(tierID); canonical != "" { + return canonical + } + // Accept older policy keys that used uppercase names. + switch strings.ToUpper(tierID) { + case "AISTUDIO_FREE": + return GeminiTierAIStudioFree + case "AISTUDIO_PAID": + return GeminiTierAIStudioPaid + case "GOOGLE_ONE_FREE": + return GeminiTierGoogleOneFree + case "GOOGLE_AI_PRO": + return GeminiTierGoogleAIPro + case "GOOGLE_AI_ULTRA": + return GeminiTierGoogleAIUltra + case "GCP_STANDARD": + return GeminiTierGCPStandard + case "GCP_ENTERPRISE": + return GeminiTierGCPEnterprise + } + return strings.ToLower(tierID) +} + +func clampGeminiQuotaInt64WithUnlimited(value int64) int64 { + if value < -1 { + return 0 + } + return value +} + +func clampGeminiQuotaInt(value int) int { + if value < 0 { + return 0 + } + return value +} + +func clampGeminiQuotaRPM(value int64) int64 { + if value < 0 { + return 0 + } + return value +} + +func geminiCooldownForTier(tierID string) time.Duration { + policy := newGeminiQuotaPolicy() + return policy.CooldownForTier(tierID) +} + +func geminiQuotaTierKeyForAccount(account *Account) string { + if account == nil || account.Platform != PlatformGemini { + return "" + } + + // Note: GeminiOAuthType() already defaults legacy (project_id present) to code_assist. + oauthType := strings.ToLower(strings.TrimSpace(account.GeminiOAuthType())) + rawTier := strings.TrimSpace(account.GeminiTierID()) + + // Prefer the canonical tier stored in credentials. + if tierID := canonicalGeminiTierIDForOAuthType(oauthType, rawTier); tierID != "" && tierID != GeminiTierGoogleOneUnknown { + return tierID + } + + // Fallback defaults when tier_id is missing or unknown. + switch oauthType { + case "google_one": + return GeminiTierGoogleOneFree + case "code_assist": + return GeminiTierGCPStandard + case "ai_studio": + return GeminiTierAIStudioFree + default: + // API Key accounts (type=apikey) have empty oauth_type and are treated as AI Studio. + return GeminiTierAIStudioFree + } +} + +func geminiModelClassFromName(model string) geminiModelClass { + name := strings.ToLower(strings.TrimSpace(model)) + if strings.Contains(name, "flash") || strings.Contains(name, "lite") { + return geminiModelFlash + } + return geminiModelPro +} + +func geminiAggregateUsage(stats []usagestats.ModelStat) GeminiUsageTotals { + var totals GeminiUsageTotals + for _, stat := range stats { + switch geminiModelClassFromName(stat.Model) { + case geminiModelFlash: + totals.FlashRequests += stat.Requests + totals.FlashTokens += stat.TotalTokens + totals.FlashCost += stat.ActualCost + default: + totals.ProRequests += stat.Requests + totals.ProTokens += stat.TotalTokens + totals.ProCost += stat.ActualCost + } + } + return totals +} + +func geminiQuotaLocation() *time.Location { + loc, err := time.LoadLocation("America/Los_Angeles") + if err != nil { + return time.FixedZone("PST", -8*3600) + } + return loc +} + +func geminiDailyWindowStart(now time.Time) time.Time { + loc := geminiQuotaLocation() + localNow := now.In(loc) + return time.Date(localNow.Year(), localNow.Month(), localNow.Day(), 0, 0, 0, 0, loc) +} + +func geminiDailyResetTime(now time.Time) time.Time { + loc := geminiQuotaLocation() + localNow := now.In(loc) + start := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), 0, 0, 0, 0, loc) + reset := start.Add(24 * time.Hour) + if !reset.After(localNow) { + reset = reset.Add(24 * time.Hour) + } + return reset +} diff --git a/backend/internal/service/gemini_token_cache.go b/backend/internal/service/gemini_token_cache.go new file mode 100644 index 00000000..70f246da --- /dev/null +++ b/backend/internal/service/gemini_token_cache.go @@ -0,0 +1,17 @@ +package service + +import ( + "context" + "time" +) + +// GeminiTokenCache stores short-lived access tokens and coordinates refresh to avoid stampedes. +type GeminiTokenCache interface { + // cacheKey should be stable for the token scope; for GeminiCli OAuth we primarily use project_id. + GetAccessToken(ctx context.Context, cacheKey string) (string, error) + SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error + DeleteAccessToken(ctx context.Context, cacheKey string) error + + AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) + ReleaseRefreshLock(ctx context.Context, cacheKey string) error +} diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go new file mode 100644 index 00000000..a5cacc9a --- /dev/null +++ b/backend/internal/service/gemini_token_provider.go @@ -0,0 +1,160 @@ +package service + +import ( + "context" + "errors" + "log" + "strconv" + "strings" + "time" +) + +const ( + geminiTokenRefreshSkew = 3 * time.Minute + geminiTokenCacheSkew = 5 * time.Minute +) + +type GeminiTokenProvider struct { + accountRepo AccountRepository + tokenCache GeminiTokenCache + geminiOAuthService *GeminiOAuthService +} + +func NewGeminiTokenProvider( + accountRepo AccountRepository, + tokenCache GeminiTokenCache, + geminiOAuthService *GeminiOAuthService, +) *GeminiTokenProvider { + return &GeminiTokenProvider{ + accountRepo: accountRepo, + tokenCache: tokenCache, + geminiOAuthService: geminiOAuthService, + } +} + +func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformGemini || account.Type != AccountTypeOAuth { + return "", errors.New("not a gemini oauth account") + } + + cacheKey := GeminiTokenCacheKey(account) + + // 1) Try cache first. + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + } + + // 2) Refresh if needed (pre-expiry skew). + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= geminiTokenRefreshSkew + if needsRefresh && p.tokenCache != nil { + locked, err := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if err == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // Re-check after lock (another worker may have refreshed). + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= geminiTokenRefreshSkew { + if p.geminiOAuthService == nil { + return "", errors.New("gemini oauth service not configured") + } + tokenInfo, err := p.geminiOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return "", err + } + newCredentials := p.geminiOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + _ = p.accountRepo.Update(ctx, account) + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + + accessToken := account.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found in credentials") + } + + // project_id is optional now: + // - If present: will use Code Assist API (requires project_id) + // - If absent: will use AI Studio API with OAuth token (like regular API key mode) + // Auto-detect project_id only if explicitly enabled via a credential flag + projectID := strings.TrimSpace(account.GetCredential("project_id")) + autoDetectProjectID := account.GetCredential("auto_detect_project_id") == "true" + + if projectID == "" && autoDetectProjectID { + if p.geminiOAuthService == nil { + return accessToken, nil // Fallback to AI Studio API mode + } + + var proxyURL string + if account.ProxyID != nil && p.geminiOAuthService.proxyRepo != nil { + if proxy, err := p.geminiOAuthService.proxyRepo.GetByID(ctx, *account.ProxyID); err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + detected, tierID, err := p.geminiOAuthService.fetchProjectID(ctx, accessToken, proxyURL) + if err != nil { + log.Printf("[GeminiTokenProvider] Auto-detect project_id failed: %v, fallback to AI Studio API mode", err) + return accessToken, nil + } + detected = strings.TrimSpace(detected) + tierID = strings.TrimSpace(tierID) + if detected != "" { + if account.Credentials == nil { + account.Credentials = make(map[string]any) + } + account.Credentials["project_id"] = detected + if tierID != "" { + account.Credentials["tier_id"] = tierID + } + _ = p.accountRepo.Update(ctx, account) + } + } + + // 3) Populate cache with TTL. + if p.tokenCache != nil { + ttl := 30 * time.Minute + if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > geminiTokenCacheSkew: + ttl = until - geminiTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) + } + + return accessToken, nil +} + +func GeminiTokenCacheKey(account *Account) string { + projectID := strings.TrimSpace(account.GetCredential("project_id")) + if projectID != "" { + return projectID + } + return "account:" + strconv.FormatInt(account.ID, 10) +} diff --git a/backend/internal/service/gemini_token_refresher.go b/backend/internal/service/gemini_token_refresher.go new file mode 100644 index 00000000..7dfc5521 --- /dev/null +++ b/backend/internal/service/gemini_token_refresher.go @@ -0,0 +1,45 @@ +package service + +import ( + "context" + "time" +) + +type GeminiTokenRefresher struct { + geminiOAuthService *GeminiOAuthService +} + +func NewGeminiTokenRefresher(geminiOAuthService *GeminiOAuthService) *GeminiTokenRefresher { + return &GeminiTokenRefresher{geminiOAuthService: geminiOAuthService} +} + +func (r *GeminiTokenRefresher) CanRefresh(account *Account) bool { + return account.Platform == PlatformGemini && account.Type == AccountTypeOAuth +} + +func (r *GeminiTokenRefresher) NeedsRefresh(account *Account, refreshWindow time.Duration) bool { + if !r.CanRefresh(account) { + return false + } + expiresAt := account.GetCredentialAsTime("expires_at") + if expiresAt == nil { + return false + } + return time.Until(*expiresAt) < refreshWindow +} + +func (r *GeminiTokenRefresher) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + tokenInfo, err := r.geminiOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return nil, err + } + + newCredentials := r.geminiOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + return newCredentials, nil +} diff --git a/backend/internal/service/geminicli_codeassist.go b/backend/internal/service/geminicli_codeassist.go new file mode 100644 index 00000000..0fe7f1cf --- /dev/null +++ b/backend/internal/service/geminicli_codeassist.go @@ -0,0 +1,13 @@ +package service + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/geminicli" +) + +// GeminiCliCodeAssistClient calls GeminiCli internal Code Assist endpoints. +type GeminiCliCodeAssistClient interface { + LoadCodeAssist(ctx context.Context, accessToken, proxyURL string, req *geminicli.LoadCodeAssistRequest) (*geminicli.LoadCodeAssistResponse, error) + OnboardUser(ctx context.Context, accessToken, proxyURL string, req *geminicli.OnboardUserRequest) (*geminicli.OnboardUserResponse, error) +} diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go new file mode 100644 index 00000000..8e8d47d6 --- /dev/null +++ b/backend/internal/service/group.go @@ -0,0 +1,92 @@ +package service + +import "time" + +type Group struct { + ID int64 + Name string + Description string + Platform string + RateMultiplier float64 + IsExclusive bool + Status string + Hydrated bool // indicates the group was loaded from a trusted repository source + + SubscriptionType string + DailyLimitUSD *float64 + WeeklyLimitUSD *float64 + MonthlyLimitUSD *float64 + DefaultValidityDays int + + // 图片生成计费配置(antigravity 和 gemini 平台使用) + ImagePrice1K *float64 + ImagePrice2K *float64 + ImagePrice4K *float64 + + // Claude Code 客户端限制 + ClaudeCodeOnly bool + FallbackGroupID *int64 + + CreatedAt time.Time + UpdatedAt time.Time + + AccountGroups []AccountGroup + AccountCount int64 +} + +func (g *Group) IsActive() bool { + return g.Status == StatusActive +} + +func (g *Group) IsSubscriptionType() bool { + return g.SubscriptionType == SubscriptionTypeSubscription +} + +func (g *Group) IsFreeSubscription() bool { + return g.IsSubscriptionType() && g.RateMultiplier == 0 +} + +func (g *Group) HasDailyLimit() bool { + return g.DailyLimitUSD != nil && *g.DailyLimitUSD > 0 +} + +func (g *Group) HasWeeklyLimit() bool { + return g.WeeklyLimitUSD != nil && *g.WeeklyLimitUSD > 0 +} + +func (g *Group) HasMonthlyLimit() bool { + return g.MonthlyLimitUSD != nil && *g.MonthlyLimitUSD > 0 +} + +// GetImagePrice 根据 image_size 返回对应的图片生成价格 +// 如果分组未配置价格,返回 nil(调用方应使用默认值) +func (g *Group) GetImagePrice(imageSize string) *float64 { + switch imageSize { + case "1K": + return g.ImagePrice1K + case "2K": + return g.ImagePrice2K + case "4K": + return g.ImagePrice4K + default: + // 未知尺寸默认按 2K 计费 + return g.ImagePrice2K + } +} + +// IsGroupContextValid reports whether a group from context has the fields required for routing decisions. +func IsGroupContextValid(group *Group) bool { + if group == nil { + return false + } + if group.ID <= 0 { + return false + } + if !group.Hydrated { + return false + } + if group.Platform == "" || group.Status == "" { + return false + } + return true +} diff --git a/backend/internal/service/group_service.go b/backend/internal/service/group_service.go new file mode 100644 index 00000000..324f347b --- /dev/null +++ b/backend/internal/service/group_service.go @@ -0,0 +1,208 @@ +package service + +import ( + "context" + "fmt" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrGroupNotFound = infraerrors.NotFound("GROUP_NOT_FOUND", "group not found") + ErrGroupExists = infraerrors.Conflict("GROUP_EXISTS", "group name already exists") +) + +type GroupRepository interface { + Create(ctx context.Context, group *Group) error + GetByID(ctx context.Context, id int64) (*Group, error) + GetByIDLite(ctx context.Context, id int64) (*Group, error) + Update(ctx context.Context, group *Group) error + Delete(ctx context.Context, id int64) error + DeleteCascade(ctx context.Context, id int64) ([]int64, error) + + List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) + ListActive(ctx context.Context) ([]Group, error) + ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error) + + ExistsByName(ctx context.Context, name string) (bool, error) + GetAccountCount(ctx context.Context, groupID int64) (int64, error) + DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) +} + +// CreateGroupRequest 创建分组请求 +type CreateGroupRequest struct { + Name string `json:"name"` + Description string `json:"description"` + RateMultiplier float64 `json:"rate_multiplier"` + IsExclusive bool `json:"is_exclusive"` +} + +// UpdateGroupRequest 更新分组请求 +type UpdateGroupRequest struct { + Name *string `json:"name"` + Description *string `json:"description"` + RateMultiplier *float64 `json:"rate_multiplier"` + IsExclusive *bool `json:"is_exclusive"` + Status *string `json:"status"` +} + +// GroupService 分组管理服务 +type GroupService struct { + groupRepo GroupRepository + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewGroupService 创建分组服务实例 +func NewGroupService(groupRepo GroupRepository, authCacheInvalidator APIKeyAuthCacheInvalidator) *GroupService { + return &GroupService{ + groupRepo: groupRepo, + authCacheInvalidator: authCacheInvalidator, + } +} + +// Create 创建分组 +func (s *GroupService) Create(ctx context.Context, req CreateGroupRequest) (*Group, error) { + // 检查名称是否已存在 + exists, err := s.groupRepo.ExistsByName(ctx, req.Name) + if err != nil { + return nil, fmt.Errorf("check group exists: %w", err) + } + if exists { + return nil, ErrGroupExists + } + + // 创建分组 + group := &Group{ + Name: req.Name, + Description: req.Description, + Platform: PlatformAnthropic, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + Status: StatusActive, + SubscriptionType: SubscriptionTypeStandard, + } + + if err := s.groupRepo.Create(ctx, group); err != nil { + return nil, fmt.Errorf("create group: %w", err) + } + + return group, nil +} + +// GetByID 根据ID获取分组 +func (s *GroupService) GetByID(ctx context.Context, id int64) (*Group, error) { + group, err := s.groupRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + return group, nil +} + +// List 获取分组列表 +func (s *GroupService) List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + groups, pagination, err := s.groupRepo.List(ctx, params) + if err != nil { + return nil, nil, fmt.Errorf("list groups: %w", err) + } + return groups, pagination, nil +} + +// ListActive 获取活跃分组列表 +func (s *GroupService) ListActive(ctx context.Context) ([]Group, error) { + groups, err := s.groupRepo.ListActive(ctx) + if err != nil { + return nil, fmt.Errorf("list active groups: %w", err) + } + return groups, nil +} + +// Update 更新分组 +func (s *GroupService) Update(ctx context.Context, id int64, req UpdateGroupRequest) (*Group, error) { + group, err := s.groupRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + + // 更新字段 + if req.Name != nil && *req.Name != group.Name { + // 检查新名称是否已存在 + exists, err := s.groupRepo.ExistsByName(ctx, *req.Name) + if err != nil { + return nil, fmt.Errorf("check group exists: %w", err) + } + if exists { + return nil, ErrGroupExists + } + group.Name = *req.Name + } + + if req.Description != nil { + group.Description = *req.Description + } + + if req.RateMultiplier != nil { + group.RateMultiplier = *req.RateMultiplier + } + + if req.IsExclusive != nil { + group.IsExclusive = *req.IsExclusive + } + + if req.Status != nil { + group.Status = *req.Status + } + + if err := s.groupRepo.Update(ctx, group); err != nil { + return nil, fmt.Errorf("update group: %w", err) + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } + + return group, nil +} + +// Delete 删除分组 +func (s *GroupService) Delete(ctx context.Context, id int64) error { + // 检查分组是否存在 + _, err := s.groupRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get group: %w", err) + } + + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } + if err := s.groupRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete group: %w", err) + } + + return nil +} + +// GetStats 获取分组统计信息 +func (s *GroupService) GetStats(ctx context.Context, id int64) (map[string]any, error) { + group, err := s.groupRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get group: %w", err) + } + + // 获取账号数量 + accountCount, err := s.groupRepo.GetAccountCount(ctx, id) + if err != nil { + return nil, fmt.Errorf("get account count: %w", err) + } + + stats := map[string]any{ + "id": group.ID, + "name": group.Name, + "rate_multiplier": group.RateMultiplier, + "is_exclusive": group.IsExclusive, + "status": group.Status, + "account_count": accountCount, + } + + return stats, nil +} diff --git a/backend/internal/service/group_test.go b/backend/internal/service/group_test.go new file mode 100644 index 00000000..a0f9672c --- /dev/null +++ b/backend/internal/service/group_test.go @@ -0,0 +1,92 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestGroup_GetImagePrice_1K 测试 1K 尺寸返回正确价格 +func TestGroup_GetImagePrice_1K(t *testing.T) { + price := 0.10 + group := &Group{ + ImagePrice1K: &price, + } + + result := group.GetImagePrice("1K") + require.NotNil(t, result) + require.InDelta(t, 0.10, *result, 0.0001) +} + +// TestGroup_GetImagePrice_2K 测试 2K 尺寸返回正确价格 +func TestGroup_GetImagePrice_2K(t *testing.T) { + price := 0.15 + group := &Group{ + ImagePrice2K: &price, + } + + result := group.GetImagePrice("2K") + require.NotNil(t, result) + require.InDelta(t, 0.15, *result, 0.0001) +} + +// TestGroup_GetImagePrice_4K 测试 4K 尺寸返回正确价格 +func TestGroup_GetImagePrice_4K(t *testing.T) { + price := 0.30 + group := &Group{ + ImagePrice4K: &price, + } + + result := group.GetImagePrice("4K") + require.NotNil(t, result) + require.InDelta(t, 0.30, *result, 0.0001) +} + +// TestGroup_GetImagePrice_UnknownSize 测试未知尺寸回退 2K +func TestGroup_GetImagePrice_UnknownSize(t *testing.T) { + price2K := 0.15 + group := &Group{ + ImagePrice2K: &price2K, + } + + // 未知尺寸 "3K" 应该回退到 2K + result := group.GetImagePrice("3K") + require.NotNil(t, result) + require.InDelta(t, 0.15, *result, 0.0001) + + // 空字符串也回退到 2K + result = group.GetImagePrice("") + require.NotNil(t, result) + require.InDelta(t, 0.15, *result, 0.0001) +} + +// TestGroup_GetImagePrice_NilValues 测试未配置时返回 nil +func TestGroup_GetImagePrice_NilValues(t *testing.T) { + group := &Group{ + // 所有 ImagePrice 字段都是 nil + } + + require.Nil(t, group.GetImagePrice("1K")) + require.Nil(t, group.GetImagePrice("2K")) + require.Nil(t, group.GetImagePrice("4K")) + require.Nil(t, group.GetImagePrice("unknown")) +} + +// TestGroup_GetImagePrice_PartialConfig 测试部分配置 +func TestGroup_GetImagePrice_PartialConfig(t *testing.T) { + price1K := 0.10 + group := &Group{ + ImagePrice1K: &price1K, + // ImagePrice2K 和 ImagePrice4K 未配置 + } + + result := group.GetImagePrice("1K") + require.NotNil(t, result) + require.InDelta(t, 0.10, *result, 0.0001) + + // 2K 和 4K 返回 nil + require.Nil(t, group.GetImagePrice("2K")) + require.Nil(t, group.GetImagePrice("4K")) +} diff --git a/backend/internal/service/http_upstream_port.go b/backend/internal/service/http_upstream_port.go new file mode 100644 index 00000000..9357f763 --- /dev/null +++ b/backend/internal/service/http_upstream_port.go @@ -0,0 +1,30 @@ +package service + +import "net/http" + +// HTTPUpstream 上游 HTTP 请求接口 +// 用于向上游 API(Claude、OpenAI、Gemini 等)发送请求 +// 这是一个通用接口,可用于任何基于 HTTP 的上游服务 +// +// 设计说明: +// - 支持可选代理配置 +// - 支持账户级连接池隔离 +// - 实现类负责连接池管理和复用 +type HTTPUpstream interface { + // Do 执行 HTTP 请求 + // + // 参数: + // - req: HTTP 请求对象,由调用方构建 + // - proxyURL: 代理服务器地址,空字符串表示直连 + // - accountID: 账户 ID,用于连接池隔离(隔离策略为 account 或 account_proxy 时生效) + // - accountConcurrency: 账户并发限制,用于动态调整连接池大小 + // + // 返回: + // - *http.Response: HTTP 响应,调用方必须关闭 Body + // - error: 请求错误(网络错误、超时等) + // + // 注意: + // - 调用方必须关闭 resp.Body,否则会导致连接泄漏 + // - 响应体可能已被包装以跟踪请求生命周期 + Do(req *http.Request, proxyURL string, accountID int64, accountConcurrency int) (*http.Response, error) +} diff --git a/backend/internal/service/identity_service.go b/backend/internal/service/identity_service.go new file mode 100644 index 00000000..1ffa8057 --- /dev/null +++ b/backend/internal/service/identity_service.go @@ -0,0 +1,271 @@ +package service + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "net/http" + "regexp" + "strconv" + "time" +) + +// 预编译正则表达式(避免每次调用重新编译) +var ( + // 匹配 user_id 格式: user_{64位hex}_account__session_{uuid} + userIDRegex = regexp.MustCompile(`^user_[a-f0-9]{64}_account__session_([a-f0-9-]{36})$`) + // 匹配 User-Agent 版本号: xxx/x.y.z + userAgentVersionRegex = regexp.MustCompile(`/(\d+)\.(\d+)\.(\d+)`) +) + +// 默认指纹值(当客户端未提供时使用) +var defaultFingerprint = Fingerprint{ + UserAgent: "claude-cli/2.0.62 (external, cli)", + StainlessLang: "js", + StainlessPackageVersion: "0.52.0", + StainlessOS: "Linux", + StainlessArch: "x64", + StainlessRuntime: "node", + StainlessRuntimeVersion: "v22.14.0", +} + +// Fingerprint represents account fingerprint data +type Fingerprint struct { + ClientID string + UserAgent string + StainlessLang string + StainlessPackageVersion string + StainlessOS string + StainlessArch string + StainlessRuntime string + StainlessRuntimeVersion string +} + +// IdentityCache defines cache operations for identity service +type IdentityCache interface { + GetFingerprint(ctx context.Context, accountID int64) (*Fingerprint, error) + SetFingerprint(ctx context.Context, accountID int64, fp *Fingerprint) error +} + +// IdentityService 管理OAuth账号的请求身份指纹 +type IdentityService struct { + cache IdentityCache +} + +// NewIdentityService 创建新的IdentityService +func NewIdentityService(cache IdentityCache) *IdentityService { + return &IdentityService{cache: cache} +} + +// GetOrCreateFingerprint 获取或创建账号的指纹 +// 如果缓存存在,检测user-agent版本,新版本则更新 +// 如果缓存不存在,生成随机ClientID并从请求头创建指纹,然后缓存 +func (s *IdentityService) GetOrCreateFingerprint(ctx context.Context, accountID int64, headers http.Header) (*Fingerprint, error) { + // 尝试从缓存获取指纹 + cached, err := s.cache.GetFingerprint(ctx, accountID) + if err == nil && cached != nil { + // 检查客户端的user-agent是否是更新版本 + clientUA := headers.Get("User-Agent") + if clientUA != "" && isNewerVersion(clientUA, cached.UserAgent) { + // 更新user-agent + cached.UserAgent = clientUA + // 保存更新后的指纹 + _ = s.cache.SetFingerprint(ctx, accountID, cached) + log.Printf("Updated fingerprint user-agent for account %d: %s", accountID, clientUA) + } + return cached, nil + } + + // 缓存不存在或解析失败,创建新指纹 + fp := s.createFingerprintFromHeaders(headers) + + // 生成随机ClientID + fp.ClientID = generateClientID() + + // 保存到缓存(永不过期) + if err := s.cache.SetFingerprint(ctx, accountID, fp); err != nil { + log.Printf("Warning: failed to cache fingerprint for account %d: %v", accountID, err) + } + + log.Printf("Created new fingerprint for account %d with client_id: %s", accountID, fp.ClientID) + return fp, nil +} + +// createFingerprintFromHeaders 从请求头创建指纹 +func (s *IdentityService) createFingerprintFromHeaders(headers http.Header) *Fingerprint { + fp := &Fingerprint{} + + // 获取User-Agent + if ua := headers.Get("User-Agent"); ua != "" { + fp.UserAgent = ua + } else { + fp.UserAgent = defaultFingerprint.UserAgent + } + + // 获取x-stainless-*头,如果没有则使用默认值 + fp.StainlessLang = getHeaderOrDefault(headers, "X-Stainless-Lang", defaultFingerprint.StainlessLang) + fp.StainlessPackageVersion = getHeaderOrDefault(headers, "X-Stainless-Package-Version", defaultFingerprint.StainlessPackageVersion) + fp.StainlessOS = getHeaderOrDefault(headers, "X-Stainless-OS", defaultFingerprint.StainlessOS) + fp.StainlessArch = getHeaderOrDefault(headers, "X-Stainless-Arch", defaultFingerprint.StainlessArch) + fp.StainlessRuntime = getHeaderOrDefault(headers, "X-Stainless-Runtime", defaultFingerprint.StainlessRuntime) + fp.StainlessRuntimeVersion = getHeaderOrDefault(headers, "X-Stainless-Runtime-Version", defaultFingerprint.StainlessRuntimeVersion) + + return fp +} + +// getHeaderOrDefault 获取header值,如果不存在则返回默认值 +func getHeaderOrDefault(headers http.Header, key, defaultValue string) string { + if v := headers.Get(key); v != "" { + return v + } + return defaultValue +} + +// ApplyFingerprint 将指纹应用到请求头(覆盖原有的x-stainless-*头) +func (s *IdentityService) ApplyFingerprint(req *http.Request, fp *Fingerprint) { + if fp == nil { + return + } + + // 设置user-agent + if fp.UserAgent != "" { + req.Header.Set("user-agent", fp.UserAgent) + } + + // 设置x-stainless-*头 + if fp.StainlessLang != "" { + req.Header.Set("X-Stainless-Lang", fp.StainlessLang) + } + if fp.StainlessPackageVersion != "" { + req.Header.Set("X-Stainless-Package-Version", fp.StainlessPackageVersion) + } + if fp.StainlessOS != "" { + req.Header.Set("X-Stainless-OS", fp.StainlessOS) + } + if fp.StainlessArch != "" { + req.Header.Set("X-Stainless-Arch", fp.StainlessArch) + } + if fp.StainlessRuntime != "" { + req.Header.Set("X-Stainless-Runtime", fp.StainlessRuntime) + } + if fp.StainlessRuntimeVersion != "" { + req.Header.Set("X-Stainless-Runtime-Version", fp.StainlessRuntimeVersion) + } +} + +// RewriteUserID 重写body中的metadata.user_id +// 输入格式:user_{clientId}_account__session_{sessionUUID} +// 输出格式:user_{cachedClientID}_account_{accountUUID}_session_{newHash} +func (s *IdentityService) RewriteUserID(body []byte, accountID int64, accountUUID, cachedClientID string) ([]byte, error) { + if len(body) == 0 || accountUUID == "" || cachedClientID == "" { + return body, nil + } + + // 解析JSON + var reqMap map[string]any + if err := json.Unmarshal(body, &reqMap); err != nil { + return body, nil + } + + metadata, ok := reqMap["metadata"].(map[string]any) + if !ok { + return body, nil + } + + userID, ok := metadata["user_id"].(string) + if !ok || userID == "" { + return body, nil + } + + // 匹配格式: user_{64位hex}_account__session_{uuid} + matches := userIDRegex.FindStringSubmatch(userID) + if matches == nil { + return body, nil + } + + sessionTail := matches[1] // 原始session UUID + + // 生成新的session hash: SHA256(accountID::sessionTail) -> UUID格式 + seed := fmt.Sprintf("%d::%s", accountID, sessionTail) + newSessionHash := generateUUIDFromSeed(seed) + + // 构建新的user_id + // 格式: user_{cachedClientID}_account_{account_uuid}_session_{newSessionHash} + newUserID := fmt.Sprintf("user_%s_account_%s_session_%s", cachedClientID, accountUUID, newSessionHash) + + metadata["user_id"] = newUserID + reqMap["metadata"] = metadata + + return json.Marshal(reqMap) +} + +// generateClientID 生成64位十六进制客户端ID(32字节随机数) +func generateClientID() string { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + // 极罕见的情况,使用时间戳+固定值作为fallback + log.Printf("Warning: crypto/rand.Read failed: %v, using fallback", err) + // 使用SHA256(当前纳秒时间)作为fallback + h := sha256.Sum256([]byte(fmt.Sprintf("%d", time.Now().UnixNano()))) + return hex.EncodeToString(h[:]) + } + return hex.EncodeToString(b) +} + +// generateUUIDFromSeed 从种子生成确定性UUID v4格式字符串 +func generateUUIDFromSeed(seed string) string { + hash := sha256.Sum256([]byte(seed)) + bytes := hash[:16] + + // 设置UUID v4版本和变体位 + bytes[6] = (bytes[6] & 0x0f) | 0x40 + bytes[8] = (bytes[8] & 0x3f) | 0x80 + + return fmt.Sprintf("%x-%x-%x-%x-%x", + bytes[0:4], bytes[4:6], bytes[6:8], bytes[8:10], bytes[10:16]) +} + +// parseUserAgentVersion 解析user-agent版本号 +// 例如:claude-cli/2.0.62 -> (2, 0, 62) +func parseUserAgentVersion(ua string) (major, minor, patch int, ok bool) { + // 匹配 xxx/x.y.z 格式 + matches := userAgentVersionRegex.FindStringSubmatch(ua) + if len(matches) != 4 { + return 0, 0, 0, false + } + major, _ = strconv.Atoi(matches[1]) + minor, _ = strconv.Atoi(matches[2]) + patch, _ = strconv.Atoi(matches[3]) + return major, minor, patch, true +} + +// isNewerVersion 比较版本号,判断newUA是否比cachedUA更新 +func isNewerVersion(newUA, cachedUA string) bool { + newMajor, newMinor, newPatch, newOk := parseUserAgentVersion(newUA) + cachedMajor, cachedMinor, cachedPatch, cachedOk := parseUserAgentVersion(cachedUA) + + if !newOk || !cachedOk { + return false + } + + // 比较版本号 + if newMajor > cachedMajor { + return true + } + if newMajor < cachedMajor { + return false + } + + if newMinor > cachedMinor { + return true + } + if newMinor < cachedMinor { + return false + } + + return newPatch > cachedPatch +} diff --git a/backend/internal/service/oauth_service.go b/backend/internal/service/oauth_service.go new file mode 100644 index 00000000..0039cb44 --- /dev/null +++ b/backend/internal/service/oauth_service.go @@ -0,0 +1,301 @@ +package service + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/oauth" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" +) + +// OpenAIOAuthClient interface for OpenAI OAuth operations +type OpenAIOAuthClient interface { + ExchangeCode(ctx context.Context, code, codeVerifier, redirectURI, proxyURL string) (*openai.TokenResponse, error) + RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*openai.TokenResponse, error) +} + +// ClaudeOAuthClient handles HTTP requests for Claude OAuth flows +type ClaudeOAuthClient interface { + GetOrganizationUUID(ctx context.Context, sessionKey, proxyURL string) (string, error) + GetAuthorizationCode(ctx context.Context, sessionKey, orgUUID, scope, codeChallenge, state, proxyURL string) (string, error) + ExchangeCodeForToken(ctx context.Context, code, codeVerifier, state, proxyURL string, isSetupToken bool) (*oauth.TokenResponse, error) + RefreshToken(ctx context.Context, refreshToken, proxyURL string) (*oauth.TokenResponse, error) +} + +// OAuthService handles OAuth authentication flows +type OAuthService struct { + sessionStore *oauth.SessionStore + proxyRepo ProxyRepository + oauthClient ClaudeOAuthClient +} + +// NewOAuthService creates a new OAuth service +func NewOAuthService(proxyRepo ProxyRepository, oauthClient ClaudeOAuthClient) *OAuthService { + return &OAuthService{ + sessionStore: oauth.NewSessionStore(), + proxyRepo: proxyRepo, + oauthClient: oauthClient, + } +} + +// GenerateAuthURLResult contains the authorization URL and session info +type GenerateAuthURLResult struct { + AuthURL string `json:"auth_url"` + SessionID string `json:"session_id"` +} + +// GenerateAuthURL generates an OAuth authorization URL with full scope +func (s *OAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64) (*GenerateAuthURLResult, error) { + scope := fmt.Sprintf("%s %s", oauth.ScopeProfile, oauth.ScopeInference) + return s.generateAuthURLWithScope(ctx, scope, proxyID) +} + +// GenerateSetupTokenURL generates an OAuth authorization URL for setup token (inference only) +func (s *OAuthService) GenerateSetupTokenURL(ctx context.Context, proxyID *int64) (*GenerateAuthURLResult, error) { + scope := oauth.ScopeInference + return s.generateAuthURLWithScope(ctx, scope, proxyID) +} + +func (s *OAuthService) generateAuthURLWithScope(ctx context.Context, scope string, proxyID *int64) (*GenerateAuthURLResult, error) { + // Generate PKCE values + state, err := oauth.GenerateState() + if err != nil { + return nil, fmt.Errorf("failed to generate state: %w", err) + } + + codeVerifier, err := oauth.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("failed to generate code verifier: %w", err) + } + + codeChallenge := oauth.GenerateCodeChallenge(codeVerifier) + + // Generate session ID + sessionID, err := oauth.GenerateSessionID() + if err != nil { + return nil, fmt.Errorf("failed to generate session ID: %w", err) + } + + // Get proxy URL if specified + var proxyURL string + if proxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Store session + session := &oauth.OAuthSession{ + State: state, + CodeVerifier: codeVerifier, + Scope: scope, + ProxyURL: proxyURL, + CreatedAt: time.Now(), + } + s.sessionStore.Set(sessionID, session) + + // Build authorization URL + authURL := oauth.BuildAuthorizationURL(state, codeChallenge, scope) + + return &GenerateAuthURLResult{ + AuthURL: authURL, + SessionID: sessionID, + }, nil +} + +// ExchangeCodeInput represents the input for code exchange +type ExchangeCodeInput struct { + SessionID string + Code string + ProxyID *int64 +} + +// TokenInfo represents the token information stored in credentials +type TokenInfo struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + RefreshToken string `json:"refresh_token,omitempty"` + Scope string `json:"scope,omitempty"` + OrgUUID string `json:"org_uuid,omitempty"` + AccountUUID string `json:"account_uuid,omitempty"` +} + +// ExchangeCode exchanges authorization code for tokens +func (s *OAuthService) ExchangeCode(ctx context.Context, input *ExchangeCodeInput) (*TokenInfo, error) { + // Get session + session, ok := s.sessionStore.Get(input.SessionID) + if !ok { + return nil, fmt.Errorf("session not found or expired") + } + + // Get proxy URL + proxyURL := session.ProxyURL + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Determine if this is a setup token (scope is inference only) + isSetupToken := session.Scope == oauth.ScopeInference + + // Exchange code for token + tokenInfo, err := s.exchangeCodeForToken(ctx, input.Code, session.CodeVerifier, session.State, proxyURL, isSetupToken) + if err != nil { + return nil, err + } + + // Delete session after successful exchange + s.sessionStore.Delete(input.SessionID) + + return tokenInfo, nil +} + +// CookieAuthInput represents the input for cookie-based authentication +type CookieAuthInput struct { + SessionKey string + ProxyID *int64 + Scope string // "full" or "inference" +} + +// CookieAuth performs OAuth using sessionKey (cookie-based auto-auth) +func (s *OAuthService) CookieAuth(ctx context.Context, input *CookieAuthInput) (*TokenInfo, error) { + // Get proxy URL if specified + var proxyURL string + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Determine scope and if this is a setup token + scope := fmt.Sprintf("%s %s", oauth.ScopeProfile, oauth.ScopeInference) + isSetupToken := false + if input.Scope == "inference" { + scope = oauth.ScopeInference + isSetupToken = true + } + + // Step 1: Get organization info using sessionKey + orgUUID, err := s.getOrganizationUUID(ctx, input.SessionKey, proxyURL) + if err != nil { + return nil, fmt.Errorf("failed to get organization info: %w", err) + } + + // Step 2: Generate PKCE values + codeVerifier, err := oauth.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("failed to generate code verifier: %w", err) + } + codeChallenge := oauth.GenerateCodeChallenge(codeVerifier) + + state, err := oauth.GenerateState() + if err != nil { + return nil, fmt.Errorf("failed to generate state: %w", err) + } + + // Step 3: Get authorization code using cookie + authCode, err := s.getAuthorizationCode(ctx, input.SessionKey, orgUUID, scope, codeChallenge, state, proxyURL) + if err != nil { + return nil, fmt.Errorf("failed to get authorization code: %w", err) + } + + // Step 4: Exchange code for token + tokenInfo, err := s.exchangeCodeForToken(ctx, authCode, codeVerifier, state, proxyURL, isSetupToken) + if err != nil { + return nil, fmt.Errorf("failed to exchange code: %w", err) + } + + // Ensure org_uuid is set (from step 1 if not from token response) + if tokenInfo.OrgUUID == "" && orgUUID != "" { + tokenInfo.OrgUUID = orgUUID + log.Printf("[OAuth] Set org_uuid from cookie auth: %s", orgUUID) + } + + return tokenInfo, nil +} + +// getOrganizationUUID gets the organization UUID from claude.ai using sessionKey +func (s *OAuthService) getOrganizationUUID(ctx context.Context, sessionKey, proxyURL string) (string, error) { + return s.oauthClient.GetOrganizationUUID(ctx, sessionKey, proxyURL) +} + +// getAuthorizationCode gets the authorization code using sessionKey +func (s *OAuthService) getAuthorizationCode(ctx context.Context, sessionKey, orgUUID, scope, codeChallenge, state, proxyURL string) (string, error) { + return s.oauthClient.GetAuthorizationCode(ctx, sessionKey, orgUUID, scope, codeChallenge, state, proxyURL) +} + +// exchangeCodeForToken exchanges authorization code for tokens +func (s *OAuthService) exchangeCodeForToken(ctx context.Context, code, codeVerifier, state, proxyURL string, isSetupToken bool) (*TokenInfo, error) { + tokenResp, err := s.oauthClient.ExchangeCodeForToken(ctx, code, codeVerifier, state, proxyURL, isSetupToken) + if err != nil { + return nil, err + } + + tokenInfo := &TokenInfo{ + AccessToken: tokenResp.AccessToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: time.Now().Unix() + tokenResp.ExpiresIn, + RefreshToken: tokenResp.RefreshToken, + Scope: tokenResp.Scope, + } + + if tokenResp.Organization != nil && tokenResp.Organization.UUID != "" { + tokenInfo.OrgUUID = tokenResp.Organization.UUID + log.Printf("[OAuth] Got org_uuid: %s", tokenInfo.OrgUUID) + } + if tokenResp.Account != nil && tokenResp.Account.UUID != "" { + tokenInfo.AccountUUID = tokenResp.Account.UUID + log.Printf("[OAuth] Got account_uuid: %s", tokenInfo.AccountUUID) + } + + return tokenInfo, nil +} + +// RefreshToken refreshes an OAuth token +func (s *OAuthService) RefreshToken(ctx context.Context, refreshToken string, proxyURL string) (*TokenInfo, error) { + tokenResp, err := s.oauthClient.RefreshToken(ctx, refreshToken, proxyURL) + if err != nil { + return nil, err + } + + return &TokenInfo{ + AccessToken: tokenResp.AccessToken, + TokenType: tokenResp.TokenType, + ExpiresIn: tokenResp.ExpiresIn, + ExpiresAt: time.Now().Unix() + tokenResp.ExpiresIn, + RefreshToken: tokenResp.RefreshToken, + Scope: tokenResp.Scope, + }, nil +} + +// RefreshAccountToken refreshes token for an account +func (s *OAuthService) RefreshAccountToken(ctx context.Context, account *Account) (*TokenInfo, error) { + refreshToken := account.GetCredential("refresh_token") + if refreshToken == "" { + return nil, fmt.Errorf("no refresh token available") + } + + var proxyURL string + if account.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *account.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + return s.RefreshToken(ctx, refreshToken, proxyURL) +} + +// Stop stops the session store cleanup goroutine +func (s *OAuthService) Stop() { + s.sessionStore.Stop() +} diff --git a/backend/internal/service/openai_codex_transform.go b/backend/internal/service/openai_codex_transform.go new file mode 100644 index 00000000..264bdf95 --- /dev/null +++ b/backend/internal/service/openai_codex_transform.go @@ -0,0 +1,528 @@ +package service + +import ( + _ "embed" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" +) + +const ( + opencodeCodexHeaderURL = "https://raw.githubusercontent.com/anomalyco/opencode/dev/packages/opencode/src/session/prompt/codex_header.txt" + codexCacheTTL = 15 * time.Minute +) + +//go:embed prompts/codex_cli_instructions.md +var codexCLIInstructions string + +var codexModelMap = map[string]string{ + "gpt-5.1-codex": "gpt-5.1-codex", + "gpt-5.1-codex-low": "gpt-5.1-codex", + "gpt-5.1-codex-medium": "gpt-5.1-codex", + "gpt-5.1-codex-high": "gpt-5.1-codex", + "gpt-5.1-codex-max": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-low": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-medium": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-high": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max", + "gpt-5.2": "gpt-5.2", + "gpt-5.2-none": "gpt-5.2", + "gpt-5.2-low": "gpt-5.2", + "gpt-5.2-medium": "gpt-5.2", + "gpt-5.2-high": "gpt-5.2", + "gpt-5.2-xhigh": "gpt-5.2", + "gpt-5.2-codex": "gpt-5.2-codex", + "gpt-5.2-codex-low": "gpt-5.2-codex", + "gpt-5.2-codex-medium": "gpt-5.2-codex", + "gpt-5.2-codex-high": "gpt-5.2-codex", + "gpt-5.2-codex-xhigh": "gpt-5.2-codex", + "gpt-5.1-codex-mini": "gpt-5.1-codex-mini", + "gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini", + "gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini", + "gpt-5.1": "gpt-5.1", + "gpt-5.1-none": "gpt-5.1", + "gpt-5.1-low": "gpt-5.1", + "gpt-5.1-medium": "gpt-5.1", + "gpt-5.1-high": "gpt-5.1", + "gpt-5.1-chat-latest": "gpt-5.1", + "gpt-5-codex": "gpt-5.1-codex", + "codex-mini-latest": "gpt-5.1-codex-mini", + "gpt-5-codex-mini": "gpt-5.1-codex-mini", + "gpt-5-codex-mini-medium": "gpt-5.1-codex-mini", + "gpt-5-codex-mini-high": "gpt-5.1-codex-mini", + "gpt-5": "gpt-5.1", + "gpt-5-mini": "gpt-5.1", + "gpt-5-nano": "gpt-5.1", +} + +type codexTransformResult struct { + Modified bool + NormalizedModel string + PromptCacheKey string +} + +type opencodeCacheMetadata struct { + ETag string `json:"etag"` + LastFetch string `json:"lastFetch,omitempty"` + LastChecked int64 `json:"lastChecked"` +} + +func applyCodexOAuthTransform(reqBody map[string]any) codexTransformResult { + result := codexTransformResult{} + // 工具续链需求会影响存储策略与 input 过滤逻辑。 + needsToolContinuation := NeedsToolContinuation(reqBody) + + model := "" + if v, ok := reqBody["model"].(string); ok { + model = v + } + normalizedModel := normalizeCodexModel(model) + if normalizedModel != "" { + if model != normalizedModel { + reqBody["model"] = normalizedModel + result.Modified = true + } + result.NormalizedModel = normalizedModel + } + + // OAuth 走 ChatGPT internal API 时,store 必须为 false;显式 true 也会强制覆盖。 + // 避免上游返回 "Store must be set to false"。 + if v, ok := reqBody["store"].(bool); !ok || v { + reqBody["store"] = false + result.Modified = true + } + if v, ok := reqBody["stream"].(bool); !ok || !v { + reqBody["stream"] = true + result.Modified = true + } + + if _, ok := reqBody["max_output_tokens"]; ok { + delete(reqBody, "max_output_tokens") + result.Modified = true + } + if _, ok := reqBody["max_completion_tokens"]; ok { + delete(reqBody, "max_completion_tokens") + result.Modified = true + } + + if normalizeCodexTools(reqBody) { + result.Modified = true + } + + if v, ok := reqBody["prompt_cache_key"].(string); ok { + result.PromptCacheKey = strings.TrimSpace(v) + } + + instructions := strings.TrimSpace(getOpenCodeCodexHeader()) + existingInstructions, _ := reqBody["instructions"].(string) + existingInstructions = strings.TrimSpace(existingInstructions) + + if instructions != "" { + if existingInstructions != instructions { + reqBody["instructions"] = instructions + result.Modified = true + } + } else if existingInstructions == "" { + // 未获取到 opencode 指令时,回退使用 Codex CLI 指令。 + codexInstructions := strings.TrimSpace(getCodexCLIInstructions()) + if codexInstructions != "" { + reqBody["instructions"] = codexInstructions + result.Modified = true + } + } + + // 续链场景保留 item_reference 与 id,避免 call_id 上下文丢失。 + if input, ok := reqBody["input"].([]any); ok { + input = filterCodexInput(input, needsToolContinuation) + reqBody["input"] = input + result.Modified = true + } + + return result +} + +func normalizeCodexModel(model string) string { + if model == "" { + return "gpt-5.1" + } + + modelID := model + if strings.Contains(modelID, "/") { + parts := strings.Split(modelID, "/") + modelID = parts[len(parts)-1] + } + + if mapped := getNormalizedCodexModel(modelID); mapped != "" { + return mapped + } + + normalized := strings.ToLower(modelID) + + if strings.Contains(normalized, "gpt-5.2-codex") || strings.Contains(normalized, "gpt 5.2 codex") { + return "gpt-5.2-codex" + } + if strings.Contains(normalized, "gpt-5.2") || strings.Contains(normalized, "gpt 5.2") { + return "gpt-5.2" + } + if strings.Contains(normalized, "gpt-5.1-codex-max") || strings.Contains(normalized, "gpt 5.1 codex max") { + return "gpt-5.1-codex-max" + } + if strings.Contains(normalized, "gpt-5.1-codex-mini") || strings.Contains(normalized, "gpt 5.1 codex mini") { + return "gpt-5.1-codex-mini" + } + if strings.Contains(normalized, "codex-mini-latest") || + strings.Contains(normalized, "gpt-5-codex-mini") || + strings.Contains(normalized, "gpt 5 codex mini") { + return "codex-mini-latest" + } + if strings.Contains(normalized, "gpt-5.1-codex") || strings.Contains(normalized, "gpt 5.1 codex") { + return "gpt-5.1-codex" + } + if strings.Contains(normalized, "gpt-5.1") || strings.Contains(normalized, "gpt 5.1") { + return "gpt-5.1" + } + if strings.Contains(normalized, "codex") { + return "gpt-5.1-codex" + } + if strings.Contains(normalized, "gpt-5") || strings.Contains(normalized, "gpt 5") { + return "gpt-5.1" + } + + return "gpt-5.1" +} + +func getNormalizedCodexModel(modelID string) string { + if modelID == "" { + return "" + } + if mapped, ok := codexModelMap[modelID]; ok { + return mapped + } + lower := strings.ToLower(modelID) + for key, value := range codexModelMap { + if strings.ToLower(key) == lower { + return value + } + } + return "" +} + +func getOpenCodeCachedPrompt(url, cacheFileName, metaFileName string) string { + cacheDir := codexCachePath("") + if cacheDir == "" { + return "" + } + cacheFile := filepath.Join(cacheDir, cacheFileName) + metaFile := filepath.Join(cacheDir, metaFileName) + + var cachedContent string + if content, ok := readFile(cacheFile); ok { + cachedContent = content + } + + var meta opencodeCacheMetadata + if loadJSON(metaFile, &meta) && meta.LastChecked > 0 && cachedContent != "" { + if time.Since(time.UnixMilli(meta.LastChecked)) < codexCacheTTL { + return cachedContent + } + } + + content, etag, status, err := fetchWithETag(url, meta.ETag) + if err == nil && status == http.StatusNotModified && cachedContent != "" { + return cachedContent + } + if err == nil && status >= 200 && status < 300 && content != "" { + _ = writeFile(cacheFile, content) + meta = opencodeCacheMetadata{ + ETag: etag, + LastFetch: time.Now().UTC().Format(time.RFC3339), + LastChecked: time.Now().UnixMilli(), + } + _ = writeJSON(metaFile, meta) + return content + } + + return cachedContent +} + +func getOpenCodeCodexHeader() string { + // 优先从 opencode 仓库缓存获取指令。 + opencodeInstructions := getOpenCodeCachedPrompt(opencodeCodexHeaderURL, "opencode-codex-header.txt", "opencode-codex-header-meta.json") + + // 若 opencode 指令可用,直接返回。 + if opencodeInstructions != "" { + return opencodeInstructions + } + + // 否则回退使用本地 Codex CLI 指令。 + return getCodexCLIInstructions() +} + +func getCodexCLIInstructions() string { + return codexCLIInstructions +} + +func GetOpenCodeInstructions() string { + return getOpenCodeCodexHeader() +} + +// GetCodexCLIInstructions 返回内置的 Codex CLI 指令内容。 +func GetCodexCLIInstructions() string { + return getCodexCLIInstructions() +} + +// ReplaceWithCodexInstructions 将请求 instructions 替换为内置 Codex 指令(必要时)。 +func ReplaceWithCodexInstructions(reqBody map[string]any) bool { + codexInstructions := strings.TrimSpace(getCodexCLIInstructions()) + if codexInstructions == "" { + return false + } + + existingInstructions, _ := reqBody["instructions"].(string) + if strings.TrimSpace(existingInstructions) != codexInstructions { + reqBody["instructions"] = codexInstructions + return true + } + + return false +} + +// IsInstructionError 判断错误信息是否与指令格式/系统提示相关。 +func IsInstructionError(errorMessage string) bool { + if errorMessage == "" { + return false + } + + lowerMsg := strings.ToLower(errorMessage) + instructionKeywords := []string{ + "instruction", + "instructions", + "system prompt", + "system message", + "invalid prompt", + "prompt format", + } + + for _, keyword := range instructionKeywords { + if strings.Contains(lowerMsg, keyword) { + return true + } + } + + return false +} + +// filterCodexInput 按需过滤 item_reference 与 id。 +// preserveReferences 为 true 时保持引用与 id,以满足续链请求对上下文的依赖。 +func filterCodexInput(input []any, preserveReferences bool) []any { + filtered := make([]any, 0, len(input)) + for _, item := range input { + m, ok := item.(map[string]any) + if !ok { + filtered = append(filtered, item) + continue + } + typ, _ := m["type"].(string) + if typ == "item_reference" { + if !preserveReferences { + continue + } + newItem := make(map[string]any, len(m)) + for key, value := range m { + newItem[key] = value + } + filtered = append(filtered, newItem) + continue + } + + newItem := m + copied := false + // 仅在需要修改字段时创建副本,避免直接改写原始输入。 + ensureCopy := func() { + if copied { + return + } + newItem = make(map[string]any, len(m)) + for key, value := range m { + newItem[key] = value + } + copied = true + } + + if isCodexToolCallItemType(typ) { + if callID, ok := m["call_id"].(string); !ok || strings.TrimSpace(callID) == "" { + if id, ok := m["id"].(string); ok && strings.TrimSpace(id) != "" { + ensureCopy() + newItem["call_id"] = id + } + } + } + + if !preserveReferences { + ensureCopy() + delete(newItem, "id") + if !isCodexToolCallItemType(typ) { + delete(newItem, "call_id") + } + } + + filtered = append(filtered, newItem) + } + return filtered +} + +func isCodexToolCallItemType(typ string) bool { + if typ == "" { + return false + } + return strings.HasSuffix(typ, "_call") || strings.HasSuffix(typ, "_call_output") +} + +func normalizeCodexTools(reqBody map[string]any) bool { + rawTools, ok := reqBody["tools"] + if !ok || rawTools == nil { + return false + } + tools, ok := rawTools.([]any) + if !ok { + return false + } + + modified := false + for idx, tool := range tools { + toolMap, ok := tool.(map[string]any) + if !ok { + continue + } + + toolType, _ := toolMap["type"].(string) + if strings.TrimSpace(toolType) != "function" { + continue + } + + function, ok := toolMap["function"].(map[string]any) + if !ok { + continue + } + + if _, ok := toolMap["name"]; !ok { + if name, ok := function["name"].(string); ok && strings.TrimSpace(name) != "" { + toolMap["name"] = name + modified = true + } + } + if _, ok := toolMap["description"]; !ok { + if desc, ok := function["description"].(string); ok && strings.TrimSpace(desc) != "" { + toolMap["description"] = desc + modified = true + } + } + if _, ok := toolMap["parameters"]; !ok { + if params, ok := function["parameters"]; ok { + toolMap["parameters"] = params + modified = true + } + } + if _, ok := toolMap["strict"]; !ok { + if strict, ok := function["strict"]; ok { + toolMap["strict"] = strict + modified = true + } + } + + tools[idx] = toolMap + } + + if modified { + reqBody["tools"] = tools + } + + return modified +} + +func codexCachePath(filename string) string { + home, err := os.UserHomeDir() + if err != nil { + return "" + } + cacheDir := filepath.Join(home, ".opencode", "cache") + if filename == "" { + return cacheDir + } + return filepath.Join(cacheDir, filename) +} + +func readFile(path string) (string, bool) { + if path == "" { + return "", false + } + data, err := os.ReadFile(path) + if err != nil { + return "", false + } + return string(data), true +} + +func writeFile(path, content string) error { + if path == "" { + return fmt.Errorf("empty cache path") + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + return os.WriteFile(path, []byte(content), 0o644) +} + +func loadJSON(path string, target any) bool { + data, err := os.ReadFile(path) + if err != nil { + return false + } + if err := json.Unmarshal(data, target); err != nil { + return false + } + return true +} + +func writeJSON(path string, value any) error { + if path == "" { + return fmt.Errorf("empty json path") + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + data, err := json.Marshal(value) + if err != nil { + return err + } + return os.WriteFile(path, data, 0o644) +} + +func fetchWithETag(url, etag string) (string, string, int, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return "", "", 0, err + } + req.Header.Set("User-Agent", "sub2api-codex") + if etag != "" { + req.Header.Set("If-None-Match", etag) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", 0, err + } + defer func() { + _ = resp.Body.Close() + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", "", resp.StatusCode, err + } + return string(body), resp.Header.Get("etag"), resp.StatusCode, nil +} diff --git a/backend/internal/service/openai_codex_transform_test.go b/backend/internal/service/openai_codex_transform_test.go new file mode 100644 index 00000000..0ff9485a --- /dev/null +++ b/backend/internal/service/openai_codex_transform_test.go @@ -0,0 +1,167 @@ +package service + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestApplyCodexOAuthTransform_ToolContinuationPreservesInput(t *testing.T) { + // 续链场景:保留 item_reference 与 id,但不再强制 store=true。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.2", + "input": []any{ + map[string]any{"type": "item_reference", "id": "ref1", "text": "x"}, + map[string]any{"type": "function_call_output", "call_id": "call_1", "output": "ok", "id": "o1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + // 未显式设置 store=true,默认为 false。 + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 2) + + // 校验 input[0] 为 map,避免断言失败导致测试中断。 + first, ok := input[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "item_reference", first["type"]) + require.Equal(t, "ref1", first["id"]) + + // 校验 input[1] 为 map,确保后续字段断言安全。 + second, ok := input[1].(map[string]any) + require.True(t, ok) + require.Equal(t, "o1", second["id"]) +} + +func TestApplyCodexOAuthTransform_ExplicitStoreFalsePreserved(t *testing.T) { + // 续链场景:显式 store=false 不再强制为 true,保持 false。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "store": false, + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) +} + +func TestApplyCodexOAuthTransform_ExplicitStoreTrueForcedFalse(t *testing.T) { + // 显式 store=true 也会强制为 false。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "store": true, + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) +} + +func TestApplyCodexOAuthTransform_NonContinuationDefaultsStoreFalseAndStripsIDs(t *testing.T) { + // 非续链场景:未设置 store 时默认 false,并移除 input 中的 id。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "input": []any{ + map[string]any{"type": "text", "id": "t1", "text": "hi"}, + }, + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 1) + // 校验 input[0] 为 map,避免类型不匹配触发 errcheck。 + item, ok := input[0].(map[string]any) + require.True(t, ok) + _, hasID := item["id"] + require.False(t, hasID) +} + +func TestFilterCodexInput_RemovesItemReferenceWhenNotPreserved(t *testing.T) { + input := []any{ + map[string]any{"type": "item_reference", "id": "ref1"}, + map[string]any{"type": "text", "id": "t1", "text": "hi"}, + } + + filtered := filterCodexInput(input, false) + require.Len(t, filtered, 1) + // 校验 filtered[0] 为 map,确保字段检查可靠。 + item, ok := filtered[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", item["type"]) + _, hasID := item["id"] + require.False(t, hasID) +} + +func TestApplyCodexOAuthTransform_EmptyInput(t *testing.T) { + // 空 input 应保持为空且不触发异常。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "input": []any{}, + } + + applyCodexOAuthTransform(reqBody) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 0) +} + +func setupCodexCache(t *testing.T) { + t.Helper() + + // 使用临时 HOME 避免触发网络拉取 header。 + tempDir := t.TempDir() + t.Setenv("HOME", tempDir) + + cacheDir := filepath.Join(tempDir, ".opencode", "cache") + require.NoError(t, os.MkdirAll(cacheDir, 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(cacheDir, "opencode-codex-header.txt"), []byte("header"), 0o644)) + + meta := map[string]any{ + "etag": "", + "lastFetch": time.Now().UTC().Format(time.RFC3339), + "lastChecked": time.Now().UnixMilli(), + } + data, err := json.Marshal(meta) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(cacheDir, "opencode-codex-header-meta.json"), data, 0o644)) +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go new file mode 100644 index 00000000..cfba6460 --- /dev/null +++ b/backend/internal/service/openai_gateway_service.go @@ -0,0 +1,1736 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "regexp" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" + "github.com/gin-gonic/gin" +) + +const ( + // ChatGPT internal API for OAuth accounts + chatgptCodexURL = "https://chatgpt.com/backend-api/codex/responses" + // OpenAI Platform API for API Key accounts (fallback) + openaiPlatformAPIURL = "https://api.openai.com/v1/responses" + openaiStickySessionTTL = time.Hour // 粘性会话TTL +) + +// openaiSSEDataRe matches SSE data lines with optional whitespace after colon. +// Some upstream APIs return non-standard "data:" without space (should be "data: "). +var openaiSSEDataRe = regexp.MustCompile(`^data:\s*`) + +// OpenAI allowed headers whitelist (for non-OAuth accounts) +var openaiAllowedHeaders = map[string]bool{ + "accept-language": true, + "content-type": true, + "conversation_id": true, + "user-agent": true, + "originator": true, + "session_id": true, +} + +// OpenAICodexUsageSnapshot represents Codex API usage limits from response headers +type OpenAICodexUsageSnapshot struct { + PrimaryUsedPercent *float64 `json:"primary_used_percent,omitempty"` + PrimaryResetAfterSeconds *int `json:"primary_reset_after_seconds,omitempty"` + PrimaryWindowMinutes *int `json:"primary_window_minutes,omitempty"` + SecondaryUsedPercent *float64 `json:"secondary_used_percent,omitempty"` + SecondaryResetAfterSeconds *int `json:"secondary_reset_after_seconds,omitempty"` + SecondaryWindowMinutes *int `json:"secondary_window_minutes,omitempty"` + PrimaryOverSecondaryPercent *float64 `json:"primary_over_secondary_percent,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +// OpenAIUsage represents OpenAI API response usage +type OpenAIUsage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationInputTokens int `json:"cache_creation_input_tokens,omitempty"` + CacheReadInputTokens int `json:"cache_read_input_tokens,omitempty"` +} + +// OpenAIForwardResult represents the result of forwarding +type OpenAIForwardResult struct { + RequestID string + Usage OpenAIUsage + Model string + Stream bool + Duration time.Duration + FirstTokenMs *int +} + +// OpenAIGatewayService handles OpenAI API gateway operations +type OpenAIGatewayService struct { + accountRepo AccountRepository + usageLogRepo UsageLogRepository + userRepo UserRepository + userSubRepo UserSubscriptionRepository + cache GatewayCache + cfg *config.Config + schedulerSnapshot *SchedulerSnapshotService + concurrencyService *ConcurrencyService + billingService *BillingService + rateLimitService *RateLimitService + billingCacheService *BillingCacheService + httpUpstream HTTPUpstream + deferredService *DeferredService +} + +// NewOpenAIGatewayService creates a new OpenAIGatewayService +func NewOpenAIGatewayService( + accountRepo AccountRepository, + usageLogRepo UsageLogRepository, + userRepo UserRepository, + userSubRepo UserSubscriptionRepository, + cache GatewayCache, + cfg *config.Config, + schedulerSnapshot *SchedulerSnapshotService, + concurrencyService *ConcurrencyService, + billingService *BillingService, + rateLimitService *RateLimitService, + billingCacheService *BillingCacheService, + httpUpstream HTTPUpstream, + deferredService *DeferredService, +) *OpenAIGatewayService { + return &OpenAIGatewayService{ + accountRepo: accountRepo, + usageLogRepo: usageLogRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + cache: cache, + cfg: cfg, + schedulerSnapshot: schedulerSnapshot, + concurrencyService: concurrencyService, + billingService: billingService, + rateLimitService: rateLimitService, + billingCacheService: billingCacheService, + httpUpstream: httpUpstream, + deferredService: deferredService, + } +} + +// GenerateSessionHash generates session hash from header (OpenAI uses session_id header) +func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context) string { + sessionID := c.GetHeader("session_id") + if sessionID == "" { + return "" + } + hash := sha256.Sum256([]byte(sessionID)) + return hex.EncodeToString(hash[:]) +} + +// BindStickySession sets session -> account binding with standard TTL. +func (s *OpenAIGatewayService) BindStickySession(ctx context.Context, groupID *int64, sessionHash string, accountID int64) error { + if sessionHash == "" || accountID <= 0 { + return nil + } + return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, accountID, openaiStickySessionTTL) +} + +// SelectAccount selects an OpenAI account with sticky session support +func (s *OpenAIGatewayService) SelectAccount(ctx context.Context, groupID *int64, sessionHash string) (*Account, error) { + return s.SelectAccountForModel(ctx, groupID, sessionHash, "") +} + +// SelectAccountForModel selects an account supporting the requested model +func (s *OpenAIGatewayService) SelectAccountForModel(ctx context.Context, groupID *int64, sessionHash string, requestedModel string) (*Account, error) { + return s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, nil) +} + +// SelectAccountForModelWithExclusions selects an account supporting the requested model while excluding specified accounts. +func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { + // 1. Check sticky session + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) + if err == nil && accountID > 0 { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + if err == nil && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { + // Refresh sticky session TTL + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) + return account, nil + } + } + } + } + + // 2. Get schedulable OpenAI accounts + accounts, err := s.listSchedulableAccounts(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + + // 3. Select by priority + LRU + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } + // Check model support + if requestedModel != "" && !acc.IsModelSupported(requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + // Lower priority value means higher priority + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + // keep selected (both never used) + default: + // Same priority, select least recently used + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected == nil { + if requestedModel != "" { + return nil, fmt.Errorf("no available OpenAI accounts supporting model: %s", requestedModel) + } + return nil, errors.New("no available OpenAI accounts") + } + + // 4. Set sticky session + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, selected.ID, openaiStickySessionTTL) + } + + return selected, nil +} + +// SelectAccountWithLoadAwareness selects an account with load-awareness and wait plan. +func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + cfg := s.schedulingConfig() + var stickyAccountID int64 + if sessionHash != "" && s.cache != nil { + if accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash); err == nil { + stickyAccountID = accountID + } + } + if s.concurrencyService == nil || !cfg.LoadBatchEnabled { + account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) + if err != nil { + return nil, err + } + result, err := s.tryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err == nil && result.Acquired { + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + if stickyAccountID > 0 && stickyAccountID == account.ID && s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, account.ID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: account.ID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + accounts, err := s.listSchedulableAccounts(ctx, groupID) + if err != nil { + return nil, err + } + if len(accounts) == 0 { + return nil, errors.New("no available accounts") + } + + isExcluded := func(accountID int64) bool { + if excludedIDs == nil { + return false + } + _, excluded := excludedIDs[accountID] + return excluded + } + + // ============ Layer 1: Sticky session ============ + if sessionHash != "" { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) + if err == nil && accountID > 0 && !isExcluded(accountID) { + account, err := s.getSchedulableAccount(ctx, accountID) + if err == nil && account.IsSchedulable() && account.IsOpenAI() && + (requestedModel == "" || account.IsModelSupported(requestedModel)) { + result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if err == nil && result.Acquired { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + } + } + + // ============ Layer 2: Load-aware selection ============ + candidates := make([]*Account, 0, len(accounts)) + for i := range accounts { + acc := &accounts[i] + if isExcluded(acc.ID) { + continue + } + // Scheduler snapshots can be temporarily stale (bucket rebuild is throttled); + // re-check schedulability here so recently rate-limited/overloaded accounts + // are not selected again before the bucket is rebuilt. + if !acc.IsSchedulable() { + continue + } + if requestedModel != "" && !acc.IsModelSupported(requestedModel) { + continue + } + candidates = append(candidates, acc) + } + + if len(candidates) == 0 { + return nil, errors.New("no available accounts") + } + + accountLoads := make([]AccountWithConcurrency, 0, len(candidates)) + for _, acc := range candidates { + accountLoads = append(accountLoads, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: acc.Concurrency, + }) + } + + loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) + if err != nil { + ordered := append([]*Account(nil), candidates...) + sortAccountsByPriorityAndLastUsed(ordered, false) + for _, acc := range ordered { + result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, acc.ID, openaiStickySessionTTL) + } + return &AccountSelectionResult{ + Account: acc, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } else { + type accountWithLoad struct { + account *Account + loadInfo *AccountLoadInfo + } + var available []accountWithLoad + for _, acc := range candidates { + loadInfo := loadMap[acc.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: acc.ID} + } + if loadInfo.LoadRate < 100 { + available = append(available, accountWithLoad{ + account: acc, + loadInfo: loadInfo, + }) + } + } + + if len(available) > 0 { + sort.SliceStable(available, func(i, j int) bool { + a, b := available[i], available[j] + if a.account.Priority != b.account.Priority { + return a.account.Priority < b.account.Priority + } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return a.loadInfo.LoadRate < b.loadInfo.LoadRate + } + switch { + case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: + return true + case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: + return false + case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: + return false + default: + return a.account.LastUsedAt.Before(*b.account.LastUsedAt) + } + }) + + for _, item := range available { + result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) + if err == nil && result.Acquired { + if sessionHash != "" { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, item.account.ID, openaiStickySessionTTL) + } + return &AccountSelectionResult{ + Account: item.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + } + } + + // ============ Layer 3: Fallback wait ============ + sortAccountsByPriorityAndLastUsed(candidates, false) + for _, acc := range candidates { + return &AccountSelectionResult{ + Account: acc, + WaitPlan: &AccountWaitPlan{ + AccountID: acc.ID, + MaxConcurrency: acc.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, nil + } + + return nil, errors.New("no available accounts") +} + +func (s *OpenAIGatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64) ([]Account, error) { + if s.schedulerSnapshot != nil { + accounts, _, err := s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, PlatformOpenAI, false) + return accounts, err + } + var accounts []Account + var err error + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) + } else if groupID != nil { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformOpenAI) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) + } + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + return accounts, nil +} + +func (s *OpenAIGatewayService) tryAcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int) (*AcquireResult, error) { + if s.concurrencyService == nil { + return &AcquireResult{Acquired: true, ReleaseFunc: func() {}}, nil + } + return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) +} + +func (s *OpenAIGatewayService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + +func (s *OpenAIGatewayService) schedulingConfig() config.GatewaySchedulingConfig { + if s.cfg != nil { + return s.cfg.Gateway.Scheduling + } + return config.GatewaySchedulingConfig{ + StickySessionMaxWaiting: 3, + StickySessionWaitTimeout: 45 * time.Second, + FallbackWaitTimeout: 30 * time.Second, + FallbackMaxWaiting: 100, + LoadBatchEnabled: true, + SlotCleanupInterval: 30 * time.Second, + } +} + +// GetAccessToken gets the access token for an OpenAI account +func (s *OpenAIGatewayService) GetAccessToken(ctx context.Context, account *Account) (string, string, error) { + switch account.Type { + case AccountTypeOAuth: + accessToken := account.GetOpenAIAccessToken() + if accessToken == "" { + return "", "", errors.New("access_token not found in credentials") + } + return accessToken, "oauth", nil + case AccountTypeAPIKey: + apiKey := account.GetOpenAIApiKey() + if apiKey == "" { + return "", "", errors.New("api_key not found in credentials") + } + return apiKey, "apikey", nil + default: + return "", "", fmt.Errorf("unsupported account type: %s", account.Type) + } +} + +func (s *OpenAIGatewayService) shouldFailoverUpstreamError(statusCode int) bool { + switch statusCode { + case 401, 402, 403, 429, 529: + return true + default: + return statusCode >= 500 + } +} + +func (s *OpenAIGatewayService) handleFailoverSideEffects(ctx context.Context, resp *http.Response, account *Account) { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) +} + +// Forward forwards request to OpenAI API +func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, body []byte) (*OpenAIForwardResult, error) { + startTime := time.Now() + + // Parse request body once (avoid multiple parse/serialize cycles) + var reqBody map[string]any + if err := json.Unmarshal(body, &reqBody); err != nil { + return nil, fmt.Errorf("parse request: %w", err) + } + + // Extract model and stream from parsed body + reqModel, _ := reqBody["model"].(string) + reqStream, _ := reqBody["stream"].(bool) + promptCacheKey := "" + if v, ok := reqBody["prompt_cache_key"].(string); ok { + promptCacheKey = strings.TrimSpace(v) + } + + // Track if body needs re-serialization + bodyModified := false + originalModel := reqModel + + isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) + + // 对所有请求执行模型映射(包含 Codex CLI)。 + mappedModel := account.GetMappedModel(reqModel) + if mappedModel != reqModel { + log.Printf("[OpenAI] Model mapping applied: %s -> %s (account: %s, isCodexCLI: %v)", reqModel, mappedModel, account.Name, isCodexCLI) + reqBody["model"] = mappedModel + bodyModified = true + } + + // 针对所有 OpenAI 账号执行 Codex 模型名规范化,确保上游识别一致。 + if model, ok := reqBody["model"].(string); ok { + normalizedModel := normalizeCodexModel(model) + if normalizedModel != "" && normalizedModel != model { + log.Printf("[OpenAI] Codex model normalization: %s -> %s (account: %s, type: %s, isCodexCLI: %v)", + model, normalizedModel, account.Name, account.Type, isCodexCLI) + reqBody["model"] = normalizedModel + mappedModel = normalizedModel + bodyModified = true + } + } + + // 规范化 reasoning.effort 参数(minimal -> none),与上游允许值对齐。 + if reasoning, ok := reqBody["reasoning"].(map[string]any); ok { + if effort, ok := reasoning["effort"].(string); ok && effort == "minimal" { + reasoning["effort"] = "none" + bodyModified = true + log.Printf("[OpenAI] Normalized reasoning.effort: minimal -> none (account: %s)", account.Name) + } + } + + if account.Type == AccountTypeOAuth && !isCodexCLI { + codexResult := applyCodexOAuthTransform(reqBody) + if codexResult.Modified { + bodyModified = true + } + if codexResult.NormalizedModel != "" { + mappedModel = codexResult.NormalizedModel + } + if codexResult.PromptCacheKey != "" { + promptCacheKey = codexResult.PromptCacheKey + } + } + + // Handle max_output_tokens based on platform and account type + if !isCodexCLI { + if maxOutputTokens, hasMaxOutputTokens := reqBody["max_output_tokens"]; hasMaxOutputTokens { + switch account.Platform { + case PlatformOpenAI: + // For OpenAI API Key, remove max_output_tokens (not supported) + // For OpenAI OAuth (Responses API), keep it (supported) + if account.Type == AccountTypeAPIKey { + delete(reqBody, "max_output_tokens") + bodyModified = true + } + case PlatformAnthropic: + // For Anthropic (Claude), convert to max_tokens + delete(reqBody, "max_output_tokens") + if _, hasMaxTokens := reqBody["max_tokens"]; !hasMaxTokens { + reqBody["max_tokens"] = maxOutputTokens + } + bodyModified = true + case PlatformGemini: + // For Gemini, remove (will be handled by Gemini-specific transform) + delete(reqBody, "max_output_tokens") + bodyModified = true + default: + // For unknown platforms, remove to be safe + delete(reqBody, "max_output_tokens") + bodyModified = true + } + } + + // Also handle max_completion_tokens (similar logic) + if _, hasMaxCompletionTokens := reqBody["max_completion_tokens"]; hasMaxCompletionTokens { + if account.Type == AccountTypeAPIKey || account.Platform != PlatformOpenAI { + delete(reqBody, "max_completion_tokens") + bodyModified = true + } + } + } + + // Re-serialize body only if modified + if bodyModified { + var err error + body, err = json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("serialize request body: %w", err) + } + } + + // Get access token + token, _, err := s.GetAccessToken(ctx, account) + if err != nil { + return nil, err + } + + // Build upstream request + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, reqStream, promptCacheKey, isCodexCLI) + if err != nil { + return nil, err + } + + // Get proxy URL + proxyURL := "" + if account.ProxyID != nil && account.Proxy != nil { + proxyURL = account.Proxy.URL() + } + + // Capture upstream request body for ops retry of this attempt. + if c != nil { + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + + // Send request + resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) + if err != nil { + // Ensure the client receives an error response (handlers assume Forward writes on non-failover errors). + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + setOpsUpstreamError(c, 0, safeErr, "") + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + c.JSON(http.StatusBadGateway, gin.H{ + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream request failed", + }, + }) + return nil, fmt.Errorf("upstream request failed: %s", safeErr) + } + defer func() { _ = resp.Body.Close() }() + + // Handle error response + if resp.StatusCode >= 400 { + if s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + s.handleFailoverSideEffects(ctx, resp, account) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + return s.handleErrorResponse(ctx, resp, c, account) + } + + // Handle normal response + var usage *OpenAIUsage + var firstTokenMs *int + if reqStream { + streamResult, err := s.handleStreamingResponse(ctx, resp, c, account, startTime, originalModel, mappedModel) + if err != nil { + return nil, err + } + usage = streamResult.usage + firstTokenMs = streamResult.firstTokenMs + } else { + usage, err = s.handleNonStreamingResponse(ctx, resp, c, account, originalModel, mappedModel) + if err != nil { + return nil, err + } + } + + // Extract and save Codex usage snapshot from response headers (for OAuth accounts) + if account.Type == AccountTypeOAuth { + if snapshot := extractCodexUsageHeaders(resp.Header); snapshot != nil { + s.updateCodexUsageSnapshot(ctx, account.ID, snapshot) + } + } + + return &OpenAIForwardResult{ + RequestID: resp.Header.Get("x-request-id"), + Usage: *usage, + Model: originalModel, + Stream: reqStream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token string, isStream bool, promptCacheKey string, isCodexCLI bool) (*http.Request, error) { + // Determine target URL based on account type + var targetURL string + switch account.Type { + case AccountTypeOAuth: + // OAuth accounts use ChatGPT internal API + targetURL = chatgptCodexURL + case AccountTypeAPIKey: + // API Key accounts use Platform API or custom base URL + baseURL := account.GetOpenAIBaseURL() + if baseURL == "" { + targetURL = openaiPlatformAPIURL + } else { + validatedURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return nil, err + } + targetURL = validatedURL + "/responses" + } + default: + targetURL = openaiPlatformAPIURL + } + + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + // Set authentication header + req.Header.Set("authorization", "Bearer "+token) + + // Set headers specific to OAuth accounts (ChatGPT internal API) + if account.Type == AccountTypeOAuth { + // Required: set Host for ChatGPT API (must use req.Host, not Header.Set) + req.Host = "chatgpt.com" + // Required: set chatgpt-account-id header + chatgptAccountID := account.GetChatGPTAccountID() + if chatgptAccountID != "" { + req.Header.Set("chatgpt-account-id", chatgptAccountID) + } + } + + // Whitelist passthrough headers + for key, values := range c.Request.Header { + lowerKey := strings.ToLower(key) + if openaiAllowedHeaders[lowerKey] { + for _, v := range values { + req.Header.Add(key, v) + } + } + } + if account.Type == AccountTypeOAuth { + req.Header.Set("OpenAI-Beta", "responses=experimental") + if isCodexCLI { + req.Header.Set("originator", "codex_cli_rs") + } else { + req.Header.Set("originator", "opencode") + } + req.Header.Set("accept", "text/event-stream") + if promptCacheKey != "" { + req.Header.Set("conversation_id", promptCacheKey) + req.Header.Set("session_id", promptCacheKey) + } + } + + // Apply custom User-Agent if configured + customUA := account.GetOpenAIUserAgent() + if customUA != "" { + req.Header.Set("user-agent", customUA) + } + + // Ensure required headers exist + if req.Header.Get("content-type") == "" { + req.Header.Set("content-type", "application/json") + } + + return req, nil +} + +func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*OpenAIForwardResult, error) { + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "OpenAI upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + + // Check custom error codes + if !account.ShouldHandleErrorCode(resp.StatusCode) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + c.JSON(http.StatusInternalServerError, gin.H{ + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream gateway error", + }, + }) + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d (not in custom error codes)", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d (not in custom error codes) message=%s", resp.StatusCode, upstreamMsg) + } + + // Handle upstream error (mark account status) + shouldDisable := false + if s.rateLimitService != nil { + shouldDisable = s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) + } + kind := "http_error" + if shouldDisable { + kind = "failover" + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: kind, + Message: upstreamMsg, + Detail: upstreamDetail, + }) + if shouldDisable { + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + + // Return appropriate error response + var errType, errMsg string + var statusCode int + + switch resp.StatusCode { + case 401: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream authentication failed, please contact administrator" + case 402: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream payment required: insufficient balance or billing issue" + case 403: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream access forbidden, please contact administrator" + case 429: + statusCode = http.StatusTooManyRequests + errType = "rate_limit_error" + errMsg = "Upstream rate limit exceeded, please retry later" + default: + statusCode = http.StatusBadGateway + errType = "upstream_error" + errMsg = "Upstream request failed" + } + + c.JSON(statusCode, gin.H{ + "error": gin.H{ + "type": errType, + "message": errMsg, + }, + }) + + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) +} + +// openaiStreamingResult streaming response result +type openaiStreamingResult struct { + usage *OpenAIUsage + firstTokenMs *int +} + +func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string) (*openaiStreamingResult, error) { + if s.cfg != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + } + + // Set SSE response headers + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + + // Pass through other headers + if v := resp.Header.Get("x-request-id"); v != "" { + c.Header("x-request-id", v) + } + + w := c.Writer + flusher, ok := w.(http.Flusher) + if !ok { + return nil, errors.New("streaming not supported") + } + + usage := &OpenAIUsage{} + var firstTokenMs *int + scanner := bufio.NewScanner(resp.Body) + maxLineSize := defaultMaxLineSize + if s.cfg != nil && s.cfg.Gateway.MaxLineSize > 0 { + maxLineSize = s.cfg.Gateway.MaxLineSize + } + scanner.Buffer(make([]byte, 64*1024), maxLineSize) + + type scanEvent struct { + line string + err error + } + // 独立 goroutine 读取上游,避免读取阻塞影响 keepalive/超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func() { + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }() + defer close(done) + + streamInterval := time.Duration(0) + if s.cfg != nil && s.cfg.Gateway.StreamDataIntervalTimeout > 0 { + streamInterval = time.Duration(s.cfg.Gateway.StreamDataIntervalTimeout) * time.Second + } + // 仅监控上游数据间隔超时,不被下游写入阻塞影响 + var intervalTicker *time.Ticker + if streamInterval > 0 { + intervalTicker = time.NewTicker(streamInterval) + defer intervalTicker.Stop() + } + var intervalCh <-chan time.Time + if intervalTicker != nil { + intervalCh = intervalTicker.C + } + + keepaliveInterval := time.Duration(0) + if s.cfg != nil && s.cfg.Gateway.StreamKeepaliveInterval > 0 { + keepaliveInterval = time.Duration(s.cfg.Gateway.StreamKeepaliveInterval) * time.Second + } + // 下游 keepalive 仅用于防止代理空闲断开 + var keepaliveTicker *time.Ticker + if keepaliveInterval > 0 { + keepaliveTicker = time.NewTicker(keepaliveInterval) + defer keepaliveTicker.Stop() + } + var keepaliveCh <-chan time.Time + if keepaliveTicker != nil { + keepaliveCh = keepaliveTicker.C + } + // 记录上次收到上游数据的时间,用于控制 keepalive 发送频率 + lastDataAt := time.Now() + + // 仅发送一次错误事件,避免多次写入导致协议混乱(写失败时尽力通知客户端) + errorEventSent := false + sendErrorEvent := func(reason string) { + if errorEventSent { + return + } + errorEventSent = true + _, _ = fmt.Fprintf(w, "event: error\ndata: {\"error\":\"%s\"}\n\n", reason) + flusher.Flush() + } + + needModelReplace := originalModel != mappedModel + + for { + select { + case ev, ok := <-events: + if !ok { + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil + } + if ev.err != nil { + if errors.Is(ev.err, bufio.ErrTooLong) { + log.Printf("SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, ev.err) + sendErrorEvent("response_too_large") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, ev.err + } + sendErrorEvent("stream_read_error") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", ev.err) + } + + line := ev.line + lastDataAt = time.Now() + + // Extract data from SSE line (supports both "data: " and "data:" formats) + if openaiSSEDataRe.MatchString(line) { + data := openaiSSEDataRe.ReplaceAllString(line, "") + + // Replace model in response if needed + if needModelReplace { + line = s.replaceModelInSSELine(line, mappedModel, originalModel) + } + + // Forward line + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + sendErrorEvent("write_failed") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + + // Record first token time + if firstTokenMs == nil && data != "" && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsage(data, usage) + } else { + // Forward non-data lines as-is + if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + sendErrorEvent("write_failed") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + } + + case <-intervalCh: + lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) + if time.Since(lastRead) < streamInterval { + continue + } + log.Printf("Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) + // 处理流超时,可能标记账户为临时不可调度或错误状态 + if s.rateLimitService != nil { + s.rateLimitService.HandleStreamTimeout(ctx, account, originalModel) + } + sendErrorEvent("stream_timeout") + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") + + case <-keepaliveCh: + if time.Since(lastDataAt) < keepaliveInterval { + continue + } + if _, err := fmt.Fprint(w, ":\n\n"); err != nil { + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, err + } + flusher.Flush() + } + } + +} + +func (s *OpenAIGatewayService) replaceModelInSSELine(line, fromModel, toModel string) string { + if !openaiSSEDataRe.MatchString(line) { + return line + } + data := openaiSSEDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + return line + } + + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return line + } + + // Replace model in response + if m, ok := event["model"].(string); ok && m == fromModel { + event["model"] = toModel + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) + } + + // Check nested response + if response, ok := event["response"].(map[string]any); ok { + if m, ok := response["model"].(string); ok && m == fromModel { + response["model"] = toModel + newData, err := json.Marshal(event) + if err != nil { + return line + } + return "data: " + string(newData) + } + } + + return line +} + +func (s *OpenAIGatewayService) parseSSEUsage(data string, usage *OpenAIUsage) { + // Parse response.completed event for usage (OpenAI Responses format) + var event struct { + Type string `json:"type"` + Response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } `json:"response"` + } + + if json.Unmarshal([]byte(data), &event) == nil && event.Type == "response.completed" { + usage.InputTokens = event.Response.Usage.InputTokens + usage.OutputTokens = event.Response.Usage.OutputTokens + usage.CacheReadInputTokens = event.Response.Usage.InputTokenDetails.CachedTokens + } +} + +func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*OpenAIUsage, error) { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if account.Type == AccountTypeOAuth { + bodyLooksLikeSSE := bytes.Contains(body, []byte("data:")) || bytes.Contains(body, []byte("event:")) + if isEventStreamResponse(resp.Header) || bodyLooksLikeSSE { + return s.handleOAuthSSEToJSON(resp, c, body, originalModel, mappedModel) + } + } + + // Parse usage + var response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } + if err := json.Unmarshal(body, &response); err != nil { + return nil, fmt.Errorf("parse response: %w", err) + } + + usage := &OpenAIUsage{ + InputTokens: response.Usage.InputTokens, + OutputTokens: response.Usage.OutputTokens, + CacheReadInputTokens: response.Usage.InputTokenDetails.CachedTokens, + } + + // Replace model in response if needed + if originalModel != mappedModel { + body = s.replaceModelInResponseBody(body, mappedModel, originalModel) + } + + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + + contentType := "application/json" + if s.cfg != nil && !s.cfg.Security.ResponseHeaders.Enabled { + if upstreamType := resp.Header.Get("Content-Type"); upstreamType != "" { + contentType = upstreamType + } + } + + c.Data(resp.StatusCode, contentType, body) + + return usage, nil +} + +func isEventStreamResponse(header http.Header) bool { + contentType := strings.ToLower(header.Get("Content-Type")) + return strings.Contains(contentType, "text/event-stream") +} + +func (s *OpenAIGatewayService) handleOAuthSSEToJSON(resp *http.Response, c *gin.Context, body []byte, originalModel, mappedModel string) (*OpenAIUsage, error) { + bodyText := string(body) + finalResponse, ok := extractCodexFinalResponse(bodyText) + + usage := &OpenAIUsage{} + if ok { + var response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } + if err := json.Unmarshal(finalResponse, &response); err == nil { + usage.InputTokens = response.Usage.InputTokens + usage.OutputTokens = response.Usage.OutputTokens + usage.CacheReadInputTokens = response.Usage.InputTokenDetails.CachedTokens + } + body = finalResponse + if originalModel != mappedModel { + body = s.replaceModelInResponseBody(body, mappedModel, originalModel) + } + } else { + usage = s.parseSSEUsageFromBody(bodyText) + if originalModel != mappedModel { + bodyText = s.replaceModelInSSEBody(bodyText, mappedModel, originalModel) + } + body = []byte(bodyText) + } + + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + + contentType := "application/json; charset=utf-8" + if !ok { + contentType = resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "text/event-stream" + } + } + c.Data(resp.StatusCode, contentType, body) + + return usage, nil +} + +func extractCodexFinalResponse(body string) ([]byte, bool) { + lines := strings.Split(body, "\n") + for _, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + data := openaiSSEDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + continue + } + var event struct { + Type string `json:"type"` + Response json.RawMessage `json:"response"` + } + if json.Unmarshal([]byte(data), &event) != nil { + continue + } + if event.Type == "response.done" || event.Type == "response.completed" { + if len(event.Response) > 0 { + return event.Response, true + } + } + } + return nil, false +} + +func (s *OpenAIGatewayService) parseSSEUsageFromBody(body string) *OpenAIUsage { + usage := &OpenAIUsage{} + lines := strings.Split(body, "\n") + for _, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + data := openaiSSEDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + continue + } + s.parseSSEUsage(data, usage) + } + return usage +} + +func (s *OpenAIGatewayService) replaceModelInSSEBody(body, fromModel, toModel string) string { + lines := strings.Split(body, "\n") + for i, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + lines[i] = s.replaceModelInSSELine(line, fromModel, toModel) + } + return strings.Join(lines, "\n") +} + +func (s *OpenAIGatewayService) validateUpstreamBaseURL(raw string) (string, error) { + if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { + normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.cfg.Security.URLAllowlist.UpstreamHosts, + RequireAllowlist: true, + AllowPrivate: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", fmt.Errorf("invalid base_url: %w", err) + } + return normalized, nil +} + +func (s *OpenAIGatewayService) replaceModelInResponseBody(body []byte, fromModel, toModel string) []byte { + var resp map[string]any + if err := json.Unmarshal(body, &resp); err != nil { + return body + } + + model, ok := resp["model"].(string) + if !ok || model != fromModel { + return body + } + + resp["model"] = toModel + newBody, err := json.Marshal(resp) + if err != nil { + return body + } + + return newBody +} + +// OpenAIRecordUsageInput input for recording usage +type OpenAIRecordUsageInput struct { + Result *OpenAIForwardResult + APIKey *APIKey + User *User + Account *Account + Subscription *UserSubscription + UserAgent string // 请求的 User-Agent + IPAddress string // 请求的客户端 IP 地址 +} + +// RecordUsage records usage and deducts balance +func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRecordUsageInput) error { + result := input.Result + apiKey := input.APIKey + user := input.User + account := input.Account + subscription := input.Subscription + + // 计算实际的新输入token(减去缓存读取的token) + // 因为 input_tokens 包含了 cache_read_tokens,而缓存读取的token不应按输入价格计费 + actualInputTokens := result.Usage.InputTokens - result.Usage.CacheReadInputTokens + if actualInputTokens < 0 { + actualInputTokens = 0 + } + + // Calculate cost + tokens := UsageTokens{ + InputTokens: actualInputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + } + + // Get rate multiplier + multiplier := s.cfg.Default.RateMultiplier + if apiKey.GroupID != nil && apiKey.Group != nil { + multiplier = apiKey.Group.RateMultiplier + } + + cost, err := s.billingService.CalculateCost(result.Model, tokens, multiplier) + if err != nil { + cost = &CostBreakdown{ActualCost: 0} + } + + // Determine billing type + isSubscriptionBilling := subscription != nil && apiKey.Group != nil && apiKey.Group.IsSubscriptionType() + billingType := BillingTypeBalance + if isSubscriptionBilling { + billingType = BillingTypeSubscription + } + + // Create usage log + durationMs := int(result.Duration.Milliseconds()) + accountRateMultiplier := account.BillingRateMultiplier() + usageLog := &UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: actualInputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + AccountRateMultiplier: &accountRateMultiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + CreatedAt: time.Now(), + } + + // 添加 UserAgent + if input.UserAgent != "" { + usageLog.UserAgent = &input.UserAgent + } + + // 添加 IPAddress + if input.IPAddress != "" { + usageLog.IPAddress = &input.IPAddress + } + + if apiKey.GroupID != nil { + usageLog.GroupID = apiKey.GroupID + } + if subscription != nil { + usageLog.SubscriptionID = &subscription.ID + } + + inserted, err := s.usageLogRepo.Create(ctx, usageLog) + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + log.Printf("[SIMPLE MODE] Usage recorded (not billed): user=%d, tokens=%d", usageLog.UserID, usageLog.TotalTokens()) + s.deferredService.ScheduleLastUsedUpdate(account.ID) + return nil + } + + shouldBill := inserted || err != nil + + // Deduct based on billing type + if isSubscriptionBilling { + if shouldBill && cost.TotalCost > 0 { + _ = s.userSubRepo.IncrementUsage(ctx, subscription.ID, cost.TotalCost) + s.billingCacheService.QueueUpdateSubscriptionUsage(user.ID, *apiKey.GroupID, cost.TotalCost) + } + } else { + if shouldBill && cost.ActualCost > 0 { + _ = s.userRepo.DeductBalance(ctx, user.ID, cost.ActualCost) + s.billingCacheService.QueueDeductBalance(user.ID, cost.ActualCost) + } + } + + // Schedule batch update for account last_used_at + s.deferredService.ScheduleLastUsedUpdate(account.ID) + + return nil +} + +// extractCodexUsageHeaders extracts Codex usage limits from response headers +func extractCodexUsageHeaders(headers http.Header) *OpenAICodexUsageSnapshot { + snapshot := &OpenAICodexUsageSnapshot{} + hasData := false + + // Helper to parse float64 from header + parseFloat := func(key string) *float64 { + if v := headers.Get(key); v != "" { + if f, err := strconv.ParseFloat(v, 64); err == nil { + return &f + } + } + return nil + } + + // Helper to parse int from header + parseInt := func(key string) *int { + if v := headers.Get(key); v != "" { + if i, err := strconv.Atoi(v); err == nil { + return &i + } + } + return nil + } + + // Primary (weekly) limits + if v := parseFloat("x-codex-primary-used-percent"); v != nil { + snapshot.PrimaryUsedPercent = v + hasData = true + } + if v := parseInt("x-codex-primary-reset-after-seconds"); v != nil { + snapshot.PrimaryResetAfterSeconds = v + hasData = true + } + if v := parseInt("x-codex-primary-window-minutes"); v != nil { + snapshot.PrimaryWindowMinutes = v + hasData = true + } + + // Secondary (5h) limits + if v := parseFloat("x-codex-secondary-used-percent"); v != nil { + snapshot.SecondaryUsedPercent = v + hasData = true + } + if v := parseInt("x-codex-secondary-reset-after-seconds"); v != nil { + snapshot.SecondaryResetAfterSeconds = v + hasData = true + } + if v := parseInt("x-codex-secondary-window-minutes"); v != nil { + snapshot.SecondaryWindowMinutes = v + hasData = true + } + + // Overflow ratio + if v := parseFloat("x-codex-primary-over-secondary-limit-percent"); v != nil { + snapshot.PrimaryOverSecondaryPercent = v + hasData = true + } + + if !hasData { + return nil + } + + snapshot.UpdatedAt = time.Now().Format(time.RFC3339) + return snapshot +} + +// updateCodexUsageSnapshot saves the Codex usage snapshot to account's Extra field +func (s *OpenAIGatewayService) updateCodexUsageSnapshot(ctx context.Context, accountID int64, snapshot *OpenAICodexUsageSnapshot) { + if snapshot == nil { + return + } + + // Convert snapshot to map for merging into Extra + updates := make(map[string]any) + if snapshot.PrimaryUsedPercent != nil { + updates["codex_primary_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_primary_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_primary_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + if snapshot.SecondaryUsedPercent != nil { + updates["codex_secondary_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_secondary_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_secondary_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + if snapshot.PrimaryOverSecondaryPercent != nil { + updates["codex_primary_over_secondary_percent"] = *snapshot.PrimaryOverSecondaryPercent + } + updates["codex_usage_updated_at"] = snapshot.UpdatedAt + + // Normalize to canonical 5h/7d fields based on window_minutes + // This fixes the issue where OpenAI's primary/secondary naming is reversed + // Strategy: Compare the two windows and assign the smaller one to 5h, larger one to 7d + + // IMPORTANT: We can only reliably determine window type from window_minutes field + // The reset_after_seconds is remaining time, not window size, so it cannot be used for comparison + + var primaryWindowMins, secondaryWindowMins int + var hasPrimaryWindow, hasSecondaryWindow bool + + // Only use window_minutes for reliable window size comparison + if snapshot.PrimaryWindowMinutes != nil { + primaryWindowMins = *snapshot.PrimaryWindowMinutes + hasPrimaryWindow = true + } + + if snapshot.SecondaryWindowMinutes != nil { + secondaryWindowMins = *snapshot.SecondaryWindowMinutes + hasSecondaryWindow = true + } + + // Determine which is 5h and which is 7d + var use5hFromPrimary, use7dFromPrimary bool + var use5hFromSecondary, use7dFromSecondary bool + + if hasPrimaryWindow && hasSecondaryWindow { + // Both window sizes known: compare and assign smaller to 5h, larger to 7d + if primaryWindowMins < secondaryWindowMins { + use5hFromPrimary = true + use7dFromSecondary = true + } else { + use5hFromSecondary = true + use7dFromPrimary = true + } + } else if hasPrimaryWindow { + // Only primary window size known: classify by absolute threshold + if primaryWindowMins <= 360 { + use5hFromPrimary = true + } else { + use7dFromPrimary = true + } + } else if hasSecondaryWindow { + // Only secondary window size known: classify by absolute threshold + if secondaryWindowMins <= 360 { + use5hFromSecondary = true + } else { + use7dFromSecondary = true + } + } else { + // No window_minutes available: cannot reliably determine window types + // Fall back to legacy assumption (may be incorrect) + // Assume primary=7d, secondary=5h based on historical observation + if snapshot.SecondaryUsedPercent != nil || snapshot.SecondaryResetAfterSeconds != nil || snapshot.SecondaryWindowMinutes != nil { + use5hFromSecondary = true + } + if snapshot.PrimaryUsedPercent != nil || snapshot.PrimaryResetAfterSeconds != nil || snapshot.PrimaryWindowMinutes != nil { + use7dFromPrimary = true + } + } + + // Write canonical 5h fields + if use5hFromPrimary { + if snapshot.PrimaryUsedPercent != nil { + updates["codex_5h_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_5h_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + } else if use5hFromSecondary { + if snapshot.SecondaryUsedPercent != nil { + updates["codex_5h_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_5h_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_5h_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + } + + // Write canonical 7d fields + if use7dFromPrimary { + if snapshot.PrimaryUsedPercent != nil { + updates["codex_7d_used_percent"] = *snapshot.PrimaryUsedPercent + } + if snapshot.PrimaryResetAfterSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *snapshot.PrimaryResetAfterSeconds + } + if snapshot.PrimaryWindowMinutes != nil { + updates["codex_7d_window_minutes"] = *snapshot.PrimaryWindowMinutes + } + } else if use7dFromSecondary { + if snapshot.SecondaryUsedPercent != nil { + updates["codex_7d_used_percent"] = *snapshot.SecondaryUsedPercent + } + if snapshot.SecondaryResetAfterSeconds != nil { + updates["codex_7d_reset_after_seconds"] = *snapshot.SecondaryResetAfterSeconds + } + if snapshot.SecondaryWindowMinutes != nil { + updates["codex_7d_window_minutes"] = *snapshot.SecondaryWindowMinutes + } + } + + // Update account's Extra field asynchronously + go func() { + updateCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.accountRepo.UpdateExtra(updateCtx, accountID, updates) + }() +} diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go new file mode 100644 index 00000000..42b88b7d --- /dev/null +++ b/backend/internal/service/openai_gateway_service_test.go @@ -0,0 +1,410 @@ +package service + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" +) + +type stubOpenAIAccountRepo struct { + AccountRepository + accounts []Account +} + +func (r stubOpenAIAccountRepo) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { + return append([]Account(nil), r.accounts...), nil +} + +func (r stubOpenAIAccountRepo) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { + return append([]Account(nil), r.accounts...), nil +} + +type stubConcurrencyCache struct { + ConcurrencyCache +} + +func (c stubConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + return true, nil +} + +func (c stubConcurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error { + return nil +} + +func (c stubConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + out := make(map[int64]*AccountLoadInfo, len(accounts)) + for _, acc := range accounts { + out[acc.ID] = &AccountLoadInfo{AccountID: acc.ID, LoadRate: 0} + } + return out, nil +} + +func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulable(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + groupID := int64(1) + + rateLimited := Account{ + ID: 1, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + RateLimitResetAt: &resetAt, + } + available := Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 1, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{rateLimited, available}}, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-5.2", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil { + t.Fatalf("expected selection with account") + } + if selection.Account.ID != available.ID { + t.Fatalf("expected account %d, got %d", available.ID, selection.Account.ID) + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulableWhenNoConcurrencyService(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + groupID := int64(1) + + rateLimited := Account{ + ID: 1, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + RateLimitResetAt: &resetAt, + } + available := Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 1, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{rateLimited, available}}, + // concurrencyService is nil, forcing the non-load-batch selection path. + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-5.2", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil { + t.Fatalf("expected selection with account") + } + if selection.Account.ID != available.ID { + t.Fatalf("expected account %d, got %d", available.ID, selection.Account.ID) + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIStreamingTimeout(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + StreamDataIntervalTimeout: 1, + StreamKeepaliveInterval: 0, + MaxLineSize: defaultMaxLineSize, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: pr, + Header: http.Header{}, + } + + start := time.Now() + _, err := svc.handleStreamingResponse(c.Request.Context(), resp, c, &Account{ID: 1}, start, "model", "model") + _ = pw.Close() + _ = pr.Close() + + if err == nil || !strings.Contains(err.Error(), "stream data interval timeout") { + t.Fatalf("expected stream timeout error, got %v", err) + } + if !strings.Contains(rec.Body.String(), "stream_timeout") { + t.Fatalf("expected stream_timeout SSE error, got %q", rec.Body.String()) + } +} + +func TestOpenAIStreamingTooLong(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Gateway: config.GatewayConfig{ + StreamDataIntervalTimeout: 0, + StreamKeepaliveInterval: 0, + MaxLineSize: 64 * 1024, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: pr, + Header: http.Header{}, + } + + go func() { + defer func() { _ = pw.Close() }() + // 写入超过 MaxLineSize 的单行数据,触发 ErrTooLong + payload := "data: " + strings.Repeat("a", 128*1024) + "\n" + _, _ = pw.Write([]byte(payload)) + }() + + _, err := svc.handleStreamingResponse(c.Request.Context(), resp, c, &Account{ID: 2}, time.Now(), "model", "model") + _ = pr.Close() + + if !errors.Is(err, bufio.ErrTooLong) { + t.Fatalf("expected ErrTooLong, got %v", err) + } + if !strings.Contains(rec.Body.String(), "response_too_large") { + t.Fatalf("expected response_too_large SSE error, got %q", rec.Body.String()) + } +} + +func TestOpenAINonStreamingContentTypePassThrough(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Security: config.SecurityConfig{ + ResponseHeaders: config.ResponseHeaderConfig{Enabled: false}, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + body := []byte(`{"usage":{"input_tokens":1,"output_tokens":2,"input_tokens_details":{"cached_tokens":0}}}`) + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(body)), + Header: http.Header{"Content-Type": []string{"application/vnd.test+json"}}, + } + + _, err := svc.handleNonStreamingResponse(c.Request.Context(), resp, c, &Account{}, "model", "model") + if err != nil { + t.Fatalf("handleNonStreamingResponse error: %v", err) + } + + if !strings.Contains(rec.Header().Get("Content-Type"), "application/vnd.test+json") { + t.Fatalf("expected Content-Type passthrough, got %q", rec.Header().Get("Content-Type")) + } +} + +func TestOpenAINonStreamingContentTypeDefault(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Security: config.SecurityConfig{ + ResponseHeaders: config.ResponseHeaderConfig{Enabled: false}, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + body := []byte(`{"usage":{"input_tokens":1,"output_tokens":2,"input_tokens_details":{"cached_tokens":0}}}`) + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader(body)), + Header: http.Header{}, + } + + _, err := svc.handleNonStreamingResponse(c.Request.Context(), resp, c, &Account{}, "model", "model") + if err != nil { + t.Fatalf("handleNonStreamingResponse error: %v", err) + } + + if !strings.Contains(rec.Header().Get("Content-Type"), "application/json") { + t.Fatalf("expected default Content-Type, got %q", rec.Header().Get("Content-Type")) + } +} + +func TestOpenAIStreamingHeadersOverride(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Security: config.SecurityConfig{ + ResponseHeaders: config.ResponseHeaderConfig{Enabled: false}, + }, + Gateway: config.GatewayConfig{ + StreamDataIntervalTimeout: 0, + StreamKeepaliveInterval: 0, + MaxLineSize: defaultMaxLineSize, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + pr, pw := io.Pipe() + resp := &http.Response{ + StatusCode: http.StatusOK, + Body: pr, + Header: http.Header{ + "Cache-Control": []string{"upstream"}, + "X-Request-Id": []string{"req-123"}, + "Content-Type": []string{"application/custom"}, + }, + } + + go func() { + defer func() { _ = pw.Close() }() + _, _ = pw.Write([]byte("data: {}\n\n")) + }() + + _, err := svc.handleStreamingResponse(c.Request.Context(), resp, c, &Account{ID: 1}, time.Now(), "model", "model") + _ = pr.Close() + if err != nil { + t.Fatalf("handleStreamingResponse error: %v", err) + } + + if rec.Header().Get("Cache-Control") != "no-cache" { + t.Fatalf("expected Cache-Control override, got %q", rec.Header().Get("Cache-Control")) + } + if rec.Header().Get("Content-Type") != "text/event-stream" { + t.Fatalf("expected Content-Type override, got %q", rec.Header().Get("Content-Type")) + } + if rec.Header().Get("X-Request-Id") != "req-123" { + t.Fatalf("expected X-Request-Id passthrough, got %q", rec.Header().Get("X-Request-Id")) + } +} + +func TestOpenAIInvalidBaseURLWhenAllowlistDisabled(t *testing.T) { + gin.SetMode(gin.TestMode) + cfg := &config.Config{ + Security: config.SecurityConfig{ + URLAllowlist: config.URLAllowlistConfig{Enabled: false}, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/", nil) + + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Credentials: map[string]any{"base_url": "://invalid-url"}, + } + + _, err := svc.buildUpstreamRequest(c.Request.Context(), c, account, []byte("{}"), "token", false, "", false) + if err == nil { + t.Fatalf("expected error for invalid base_url when allowlist disabled") + } +} + +func TestOpenAIValidateUpstreamBaseURLDisabledRequiresHTTPS(t *testing.T) { + cfg := &config.Config{ + Security: config.SecurityConfig{ + URLAllowlist: config.URLAllowlistConfig{Enabled: false}, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + if _, err := svc.validateUpstreamBaseURL("http://not-https.example.com"); err == nil { + t.Fatalf("expected http to be rejected when allow_insecure_http is false") + } + normalized, err := svc.validateUpstreamBaseURL("https://example.com") + if err != nil { + t.Fatalf("expected https to be allowed when allowlist disabled, got %v", err) + } + if normalized != "https://example.com" { + t.Fatalf("expected raw url passthrough, got %q", normalized) + } +} + +func TestOpenAIValidateUpstreamBaseURLDisabledAllowsHTTP(t *testing.T) { + cfg := &config.Config{ + Security: config.SecurityConfig{ + URLAllowlist: config.URLAllowlistConfig{ + Enabled: false, + AllowInsecureHTTP: true, + }, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + normalized, err := svc.validateUpstreamBaseURL("http://not-https.example.com") + if err != nil { + t.Fatalf("expected http allowed when allow_insecure_http is true, got %v", err) + } + if normalized != "http://not-https.example.com" { + t.Fatalf("expected raw url passthrough, got %q", normalized) + } +} + +func TestOpenAIValidateUpstreamBaseURLEnabledEnforcesAllowlist(t *testing.T) { + cfg := &config.Config{ + Security: config.SecurityConfig{ + URLAllowlist: config.URLAllowlistConfig{ + Enabled: true, + UpstreamHosts: []string{"example.com"}, + }, + }, + } + svc := &OpenAIGatewayService{cfg: cfg} + + if _, err := svc.validateUpstreamBaseURL("https://example.com"); err != nil { + t.Fatalf("expected allowlisted host to pass, got %v", err) + } + if _, err := svc.validateUpstreamBaseURL("https://evil.com"); err == nil { + t.Fatalf("expected non-allowlisted host to fail") + } +} diff --git a/backend/internal/service/openai_oauth_service.go b/backend/internal/service/openai_oauth_service.go new file mode 100644 index 00000000..182e08fe --- /dev/null +++ b/backend/internal/service/openai_oauth_service.go @@ -0,0 +1,255 @@ +package service + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" +) + +// OpenAIOAuthService handles OpenAI OAuth authentication flows +type OpenAIOAuthService struct { + sessionStore *openai.SessionStore + proxyRepo ProxyRepository + oauthClient OpenAIOAuthClient +} + +// NewOpenAIOAuthService creates a new OpenAI OAuth service +func NewOpenAIOAuthService(proxyRepo ProxyRepository, oauthClient OpenAIOAuthClient) *OpenAIOAuthService { + return &OpenAIOAuthService{ + sessionStore: openai.NewSessionStore(), + proxyRepo: proxyRepo, + oauthClient: oauthClient, + } +} + +// OpenAIAuthURLResult contains the authorization URL and session info +type OpenAIAuthURLResult struct { + AuthURL string `json:"auth_url"` + SessionID string `json:"session_id"` +} + +// GenerateAuthURL generates an OpenAI OAuth authorization URL +func (s *OpenAIOAuthService) GenerateAuthURL(ctx context.Context, proxyID *int64, redirectURI string) (*OpenAIAuthURLResult, error) { + // Generate PKCE values + state, err := openai.GenerateState() + if err != nil { + return nil, fmt.Errorf("failed to generate state: %w", err) + } + + codeVerifier, err := openai.GenerateCodeVerifier() + if err != nil { + return nil, fmt.Errorf("failed to generate code verifier: %w", err) + } + + codeChallenge := openai.GenerateCodeChallenge(codeVerifier) + + // Generate session ID + sessionID, err := openai.GenerateSessionID() + if err != nil { + return nil, fmt.Errorf("failed to generate session ID: %w", err) + } + + // Get proxy URL if specified + var proxyURL string + if proxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *proxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Use default redirect URI if not specified + if redirectURI == "" { + redirectURI = openai.DefaultRedirectURI + } + + // Store session + session := &openai.OAuthSession{ + State: state, + CodeVerifier: codeVerifier, + RedirectURI: redirectURI, + ProxyURL: proxyURL, + CreatedAt: time.Now(), + } + s.sessionStore.Set(sessionID, session) + + // Build authorization URL + authURL := openai.BuildAuthorizationURL(state, codeChallenge, redirectURI) + + return &OpenAIAuthURLResult{ + AuthURL: authURL, + SessionID: sessionID, + }, nil +} + +// OpenAIExchangeCodeInput represents the input for code exchange +type OpenAIExchangeCodeInput struct { + SessionID string + Code string + RedirectURI string + ProxyID *int64 +} + +// OpenAITokenInfo represents the token information for OpenAI +type OpenAITokenInfo struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + IDToken string `json:"id_token,omitempty"` + ExpiresIn int64 `json:"expires_in"` + ExpiresAt int64 `json:"expires_at"` + Email string `json:"email,omitempty"` + ChatGPTAccountID string `json:"chatgpt_account_id,omitempty"` + ChatGPTUserID string `json:"chatgpt_user_id,omitempty"` + OrganizationID string `json:"organization_id,omitempty"` +} + +// ExchangeCode exchanges authorization code for tokens +func (s *OpenAIOAuthService) ExchangeCode(ctx context.Context, input *OpenAIExchangeCodeInput) (*OpenAITokenInfo, error) { + // Get session + session, ok := s.sessionStore.Get(input.SessionID) + if !ok { + return nil, fmt.Errorf("session not found or expired") + } + + // Get proxy URL + proxyURL := session.ProxyURL + if input.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *input.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + // Use redirect URI from session or input + redirectURI := session.RedirectURI + if input.RedirectURI != "" { + redirectURI = input.RedirectURI + } + + // Exchange code for token + tokenResp, err := s.oauthClient.ExchangeCode(ctx, input.Code, session.CodeVerifier, redirectURI, proxyURL) + if err != nil { + return nil, fmt.Errorf("failed to exchange code: %w", err) + } + + // Parse ID token to get user info + var userInfo *openai.UserInfo + if tokenResp.IDToken != "" { + claims, err := openai.ParseIDToken(tokenResp.IDToken) + if err == nil { + userInfo = claims.GetUserInfo() + } + } + + // Delete session after successful exchange + s.sessionStore.Delete(input.SessionID) + + tokenInfo := &OpenAITokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + IDToken: tokenResp.IDToken, + ExpiresIn: int64(tokenResp.ExpiresIn), + ExpiresAt: time.Now().Unix() + int64(tokenResp.ExpiresIn), + } + + if userInfo != nil { + tokenInfo.Email = userInfo.Email + tokenInfo.ChatGPTAccountID = userInfo.ChatGPTAccountID + tokenInfo.ChatGPTUserID = userInfo.ChatGPTUserID + tokenInfo.OrganizationID = userInfo.OrganizationID + } + + return tokenInfo, nil +} + +// RefreshToken refreshes an OpenAI OAuth token +func (s *OpenAIOAuthService) RefreshToken(ctx context.Context, refreshToken string, proxyURL string) (*OpenAITokenInfo, error) { + tokenResp, err := s.oauthClient.RefreshToken(ctx, refreshToken, proxyURL) + if err != nil { + return nil, err + } + + // Parse ID token to get user info + var userInfo *openai.UserInfo + if tokenResp.IDToken != "" { + claims, err := openai.ParseIDToken(tokenResp.IDToken) + if err == nil { + userInfo = claims.GetUserInfo() + } + } + + tokenInfo := &OpenAITokenInfo{ + AccessToken: tokenResp.AccessToken, + RefreshToken: tokenResp.RefreshToken, + IDToken: tokenResp.IDToken, + ExpiresIn: int64(tokenResp.ExpiresIn), + ExpiresAt: time.Now().Unix() + int64(tokenResp.ExpiresIn), + } + + if userInfo != nil { + tokenInfo.Email = userInfo.Email + tokenInfo.ChatGPTAccountID = userInfo.ChatGPTAccountID + tokenInfo.ChatGPTUserID = userInfo.ChatGPTUserID + tokenInfo.OrganizationID = userInfo.OrganizationID + } + + return tokenInfo, nil +} + +// RefreshAccountToken refreshes token for an OpenAI account +func (s *OpenAIOAuthService) RefreshAccountToken(ctx context.Context, account *Account) (*OpenAITokenInfo, error) { + if !account.IsOpenAI() { + return nil, fmt.Errorf("account is not an OpenAI account") + } + + refreshToken := account.GetOpenAIRefreshToken() + if refreshToken == "" { + return nil, fmt.Errorf("no refresh token available") + } + + var proxyURL string + if account.ProxyID != nil { + proxy, err := s.proxyRepo.GetByID(ctx, *account.ProxyID) + if err == nil && proxy != nil { + proxyURL = proxy.URL() + } + } + + return s.RefreshToken(ctx, refreshToken, proxyURL) +} + +// BuildAccountCredentials builds credentials map from token info +func (s *OpenAIOAuthService) BuildAccountCredentials(tokenInfo *OpenAITokenInfo) map[string]any { + expiresAt := time.Unix(tokenInfo.ExpiresAt, 0).Format(time.RFC3339) + + creds := map[string]any{ + "access_token": tokenInfo.AccessToken, + "refresh_token": tokenInfo.RefreshToken, + "expires_at": expiresAt, + } + + if tokenInfo.IDToken != "" { + creds["id_token"] = tokenInfo.IDToken + } + if tokenInfo.Email != "" { + creds["email"] = tokenInfo.Email + } + if tokenInfo.ChatGPTAccountID != "" { + creds["chatgpt_account_id"] = tokenInfo.ChatGPTAccountID + } + if tokenInfo.ChatGPTUserID != "" { + creds["chatgpt_user_id"] = tokenInfo.ChatGPTUserID + } + if tokenInfo.OrganizationID != "" { + creds["organization_id"] = tokenInfo.OrganizationID + } + + return creds +} + +// Stop stops the session store cleanup goroutine +func (s *OpenAIOAuthService) Stop() { + s.sessionStore.Stop() +} diff --git a/backend/internal/service/openai_tool_continuation.go b/backend/internal/service/openai_tool_continuation.go new file mode 100644 index 00000000..e59082b2 --- /dev/null +++ b/backend/internal/service/openai_tool_continuation.go @@ -0,0 +1,213 @@ +package service + +import "strings" + +// NeedsToolContinuation 判定请求是否需要工具调用续链处理。 +// 满足以下任一信号即视为续链:previous_response_id、input 内包含 function_call_output/item_reference、 +// 或显式声明 tools/tool_choice。 +func NeedsToolContinuation(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + if hasNonEmptyString(reqBody["previous_response_id"]) { + return true + } + if hasToolsSignal(reqBody) { + return true + } + if hasToolChoiceSignal(reqBody) { + return true + } + if inputHasType(reqBody, "function_call_output") { + return true + } + if inputHasType(reqBody, "item_reference") { + return true + } + return false +} + +// HasFunctionCallOutput 判断 input 是否包含 function_call_output,用于触发续链校验。 +func HasFunctionCallOutput(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + return inputHasType(reqBody, "function_call_output") +} + +// HasToolCallContext 判断 input 是否包含带 call_id 的 tool_call/function_call, +// 用于判断 function_call_output 是否具备可关联的上下文。 +func HasToolCallContext(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "tool_call" && itemType != "function_call" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + return true + } + } + return false +} + +// FunctionCallOutputCallIDs 提取 input 中 function_call_output 的 call_id 集合。 +// 仅返回非空 call_id,用于与 item_reference.id 做匹配校验。 +func FunctionCallOutputCallIDs(reqBody map[string]any) []string { + if reqBody == nil { + return nil + } + input, ok := reqBody["input"].([]any) + if !ok { + return nil + } + ids := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + ids[callID] = struct{}{} + } + } + if len(ids) == 0 { + return nil + } + result := make([]string, 0, len(ids)) + for id := range ids { + result = append(result, id) + } + return result +} + +// HasFunctionCallOutputMissingCallID 判断是否存在缺少 call_id 的 function_call_output。 +func HasFunctionCallOutputMissingCallID(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + callID, _ := itemMap["call_id"].(string) + if strings.TrimSpace(callID) == "" { + return true + } + } + return false +} + +// HasItemReferenceForCallIDs 判断 item_reference.id 是否覆盖所有 call_id。 +// 用于仅依赖引用项完成续链场景的校验。 +func HasItemReferenceForCallIDs(reqBody map[string]any, callIDs []string) bool { + if reqBody == nil || len(callIDs) == 0 { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + referenceIDs := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "item_reference" { + continue + } + idValue, _ := itemMap["id"].(string) + idValue = strings.TrimSpace(idValue) + if idValue == "" { + continue + } + referenceIDs[idValue] = struct{}{} + } + if len(referenceIDs) == 0 { + return false + } + for _, callID := range callIDs { + if _, ok := referenceIDs[callID]; !ok { + return false + } + } + return true +} + +// inputHasType 判断 input 中是否存在指定类型的 item。 +func inputHasType(reqBody map[string]any, want string) bool { + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType == want { + return true + } + } + return false +} + +// hasNonEmptyString 判断字段是否为非空字符串。 +func hasNonEmptyString(value any) bool { + stringValue, ok := value.(string) + return ok && strings.TrimSpace(stringValue) != "" +} + +// hasToolsSignal 判断 tools 字段是否显式声明(存在且不为空)。 +func hasToolsSignal(reqBody map[string]any) bool { + raw, exists := reqBody["tools"] + if !exists || raw == nil { + return false + } + if tools, ok := raw.([]any); ok { + return len(tools) > 0 + } + return false +} + +// hasToolChoiceSignal 判断 tool_choice 是否显式声明(非空或非 nil)。 +func hasToolChoiceSignal(reqBody map[string]any) bool { + raw, exists := reqBody["tool_choice"] + if !exists || raw == nil { + return false + } + switch value := raw.(type) { + case string: + return strings.TrimSpace(value) != "" + case map[string]any: + return len(value) > 0 + default: + return false + } +} diff --git a/backend/internal/service/openai_tool_continuation_test.go b/backend/internal/service/openai_tool_continuation_test.go new file mode 100644 index 00000000..fe737ad6 --- /dev/null +++ b/backend/internal/service/openai_tool_continuation_test.go @@ -0,0 +1,98 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNeedsToolContinuationSignals(t *testing.T) { + // 覆盖所有触发续链的信号来源,确保判定逻辑完整。 + cases := []struct { + name string + body map[string]any + want bool + }{ + {name: "nil", body: nil, want: false}, + {name: "previous_response_id", body: map[string]any{"previous_response_id": "resp_1"}, want: true}, + {name: "previous_response_id_blank", body: map[string]any{"previous_response_id": " "}, want: false}, + {name: "function_call_output", body: map[string]any{"input": []any{map[string]any{"type": "function_call_output"}}}, want: true}, + {name: "item_reference", body: map[string]any{"input": []any{map[string]any{"type": "item_reference"}}}, want: true}, + {name: "tools", body: map[string]any{"tools": []any{map[string]any{"type": "function"}}}, want: true}, + {name: "tools_empty", body: map[string]any{"tools": []any{}}, want: false}, + {name: "tools_invalid", body: map[string]any{"tools": "bad"}, want: false}, + {name: "tool_choice", body: map[string]any{"tool_choice": "auto"}, want: true}, + {name: "tool_choice_object", body: map[string]any{"tool_choice": map[string]any{"type": "function"}}, want: true}, + {name: "tool_choice_empty_object", body: map[string]any{"tool_choice": map[string]any{}}, want: false}, + {name: "none", body: map[string]any{"input": []any{map[string]any{"type": "text", "text": "hi"}}}, want: false}, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, NeedsToolContinuation(tt.body)) + }) + } +} + +func TestHasFunctionCallOutput(t *testing.T) { + // 仅当 input 中存在 function_call_output 才视为续链输出。 + require.False(t, HasFunctionCallOutput(nil)) + require.True(t, HasFunctionCallOutput(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output"}}, + })) + require.False(t, HasFunctionCallOutput(map[string]any{ + "input": "text", + })) +} + +func TestHasToolCallContext(t *testing.T) { + // tool_call/function_call 必须包含 call_id,才能作为可关联上下文。 + require.False(t, HasToolCallContext(nil)) + require.True(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "tool_call", "call_id": "call_1"}}, + })) + require.True(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "function_call", "call_id": "call_2"}}, + })) + require.False(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "tool_call"}}, + })) +} + +func TestFunctionCallOutputCallIDs(t *testing.T) { + // 仅提取非空 call_id,去重后返回。 + require.Empty(t, FunctionCallOutputCallIDs(nil)) + callIDs := FunctionCallOutputCallIDs(map[string]any{ + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + map[string]any{"type": "function_call_output", "call_id": ""}, + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + }) + require.ElementsMatch(t, []string{"call_1"}, callIDs) +} + +func TestHasFunctionCallOutputMissingCallID(t *testing.T) { + require.False(t, HasFunctionCallOutputMissingCallID(nil)) + require.True(t, HasFunctionCallOutputMissingCallID(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output"}}, + })) + require.False(t, HasFunctionCallOutputMissingCallID(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output", "call_id": "call_1"}}, + })) +} + +func TestHasItemReferenceForCallIDs(t *testing.T) { + // item_reference 需要覆盖所有 call_id 才视为可关联上下文。 + require.False(t, HasItemReferenceForCallIDs(nil, []string{"call_1"})) + require.False(t, HasItemReferenceForCallIDs(map[string]any{}, []string{"call_1"})) + req := map[string]any{ + "input": []any{ + map[string]any{"type": "item_reference", "id": "call_1"}, + map[string]any{"type": "item_reference", "id": "call_2"}, + }, + } + require.True(t, HasItemReferenceForCallIDs(req, []string{"call_1"})) + require.True(t, HasItemReferenceForCallIDs(req, []string{"call_1", "call_2"})) + require.False(t, HasItemReferenceForCallIDs(req, []string{"call_1", "call_3"})) +} diff --git a/backend/internal/service/ops_account_availability.go b/backend/internal/service/ops_account_availability.go new file mode 100644 index 00000000..da66ec4d --- /dev/null +++ b/backend/internal/service/ops_account_availability.go @@ -0,0 +1,194 @@ +package service + +import ( + "context" + "errors" + "time" +) + +// GetAccountAvailabilityStats returns current account availability stats. +// +// Query-level filtering is intentionally limited to platform/group to match the dashboard scope. +func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFilter string, groupIDFilter *int64) ( + map[string]*PlatformAvailability, + map[int64]*GroupAvailability, + map[int64]*AccountAvailability, + *time.Time, + error, +) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, nil, nil, nil, err + } + + accounts, err := s.listAllAccountsForOps(ctx, platformFilter) + if err != nil { + return nil, nil, nil, nil, err + } + + if groupIDFilter != nil && *groupIDFilter > 0 { + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + for _, grp := range acc.Groups { + if grp != nil && grp.ID == *groupIDFilter { + filtered = append(filtered, acc) + break + } + } + } + accounts = filtered + } + + now := time.Now() + collectedAt := now + + platform := make(map[string]*PlatformAvailability) + group := make(map[int64]*GroupAvailability) + account := make(map[int64]*AccountAvailability) + + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + + isTempUnsched := false + if acc.TempUnschedulableUntil != nil && now.Before(*acc.TempUnschedulableUntil) { + isTempUnsched = true + } + + isRateLimited := acc.RateLimitResetAt != nil && now.Before(*acc.RateLimitResetAt) + isOverloaded := acc.OverloadUntil != nil && now.Before(*acc.OverloadUntil) + hasError := acc.Status == StatusError + + // Normalize exclusive status flags so the UI doesn't show conflicting badges. + if hasError { + isRateLimited = false + isOverloaded = false + } + + isAvailable := acc.Status == StatusActive && acc.Schedulable && !isRateLimited && !isOverloaded && !isTempUnsched + + if acc.Platform != "" { + if _, ok := platform[acc.Platform]; !ok { + platform[acc.Platform] = &PlatformAvailability{ + Platform: acc.Platform, + } + } + p := platform[acc.Platform] + p.TotalAccounts++ + if isAvailable { + p.AvailableCount++ + } + if isRateLimited { + p.RateLimitCount++ + } + if hasError { + p.ErrorCount++ + } + } + + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupAvailability{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + g.TotalAccounts++ + if isAvailable { + g.AvailableCount++ + } + if isRateLimited { + g.RateLimitCount++ + } + if hasError { + g.ErrorCount++ + } + } + + displayGroupID := int64(0) + displayGroupName := "" + if len(acc.Groups) > 0 && acc.Groups[0] != nil { + displayGroupID = acc.Groups[0].ID + displayGroupName = acc.Groups[0].Name + } + + item := &AccountAvailability{ + AccountID: acc.ID, + AccountName: acc.Name, + Platform: acc.Platform, + GroupID: displayGroupID, + GroupName: displayGroupName, + Status: acc.Status, + + IsAvailable: isAvailable, + IsRateLimited: isRateLimited, + IsOverloaded: isOverloaded, + HasError: hasError, + + ErrorMessage: acc.ErrorMessage, + } + + if isRateLimited && acc.RateLimitResetAt != nil { + item.RateLimitResetAt = acc.RateLimitResetAt + remainingSec := int64(time.Until(*acc.RateLimitResetAt).Seconds()) + if remainingSec > 0 { + item.RateLimitRemainingSec = &remainingSec + } + } + if isOverloaded && acc.OverloadUntil != nil { + item.OverloadUntil = acc.OverloadUntil + remainingSec := int64(time.Until(*acc.OverloadUntil).Seconds()) + if remainingSec > 0 { + item.OverloadRemainingSec = &remainingSec + } + } + if isTempUnsched && acc.TempUnschedulableUntil != nil { + item.TempUnschedulableUntil = acc.TempUnschedulableUntil + } + + account[acc.ID] = item + } + + return platform, group, account, &collectedAt, nil +} + +type OpsAccountAvailability struct { + Group *GroupAvailability + Accounts map[int64]*AccountAvailability + CollectedAt *time.Time +} + +func (s *OpsService) GetAccountAvailability(ctx context.Context, platformFilter string, groupIDFilter *int64) (*OpsAccountAvailability, error) { + if s == nil { + return nil, errors.New("ops service is nil") + } + + if s.getAccountAvailability != nil { + return s.getAccountAvailability(ctx, platformFilter, groupIDFilter) + } + + _, groupStats, accountStats, collectedAt, err := s.GetAccountAvailabilityStats(ctx, platformFilter, groupIDFilter) + if err != nil { + return nil, err + } + + var group *GroupAvailability + if groupIDFilter != nil && *groupIDFilter > 0 { + group = groupStats[*groupIDFilter] + } + + if accountStats == nil { + accountStats = map[int64]*AccountAvailability{} + } + + return &OpsAccountAvailability{ + Group: group, + Accounts: accountStats, + CollectedAt: collectedAt, + }, nil +} diff --git a/backend/internal/service/ops_advisory_lock.go b/backend/internal/service/ops_advisory_lock.go new file mode 100644 index 00000000..f7ef4cee --- /dev/null +++ b/backend/internal/service/ops_advisory_lock.go @@ -0,0 +1,46 @@ +package service + +import ( + "context" + "database/sql" + "hash/fnv" + "time" +) + +func hashAdvisoryLockID(key string) int64 { + h := fnv.New64a() + _, _ = h.Write([]byte(key)) + return int64(h.Sum64()) +} + +func tryAcquireDBAdvisoryLock(ctx context.Context, db *sql.DB, lockID int64) (func(), bool) { + if db == nil { + return nil, false + } + if ctx == nil { + ctx = context.Background() + } + + conn, err := db.Conn(ctx) + if err != nil { + return nil, false + } + + acquired := false + if err := conn.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", lockID).Scan(&acquired); err != nil { + _ = conn.Close() + return nil, false + } + if !acquired { + _ = conn.Close() + return nil, false + } + + release := func() { + unlockCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = conn.ExecContext(unlockCtx, "SELECT pg_advisory_unlock($1)", lockID) + _ = conn.Close() + } + return release, true +} diff --git a/backend/internal/service/ops_aggregation_service.go b/backend/internal/service/ops_aggregation_service.go new file mode 100644 index 00000000..2a6afbba --- /dev/null +++ b/backend/internal/service/ops_aggregation_service.go @@ -0,0 +1,443 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" +) + +const ( + opsAggHourlyJobName = "ops_preaggregation_hourly" + opsAggDailyJobName = "ops_preaggregation_daily" + + opsAggHourlyInterval = 10 * time.Minute + opsAggDailyInterval = 1 * time.Hour + + // Keep in sync with ops retention target (vNext default 30d). + opsAggBackfillWindow = 30 * 24 * time.Hour + + // Recompute overlap to absorb late-arriving rows near boundaries. + opsAggHourlyOverlap = 2 * time.Hour + opsAggDailyOverlap = 48 * time.Hour + + opsAggHourlyChunk = 24 * time.Hour + opsAggDailyChunk = 7 * 24 * time.Hour + + // Delay around boundaries (e.g. 10:00..10:05) to avoid aggregating buckets + // that may still receive late inserts. + opsAggSafeDelay = 5 * time.Minute + + opsAggMaxQueryTimeout = 3 * time.Second + opsAggHourlyTimeout = 5 * time.Minute + opsAggDailyTimeout = 2 * time.Minute + + opsAggHourlyLeaderLockKey = "ops:aggregation:hourly:leader" + opsAggDailyLeaderLockKey = "ops:aggregation:daily:leader" + + opsAggHourlyLeaderLockTTL = 15 * time.Minute + opsAggDailyLeaderLockTTL = 10 * time.Minute +) + +// OpsAggregationService periodically backfills ops_metrics_hourly / ops_metrics_daily +// for stable long-window dashboard queries. +// +// It is safe to run in multi-replica deployments when Redis is available (leader lock). +type OpsAggregationService struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + db *sql.DB + redisClient *redis.Client + instanceID string + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + + hourlyMu sync.Mutex + dailyMu sync.Mutex + + skipLogMu sync.Mutex + skipLogAt time.Time +} + +func NewOpsAggregationService( + opsRepo OpsRepository, + settingRepo SettingRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAggregationService { + return &OpsAggregationService{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + db: db, + redisClient: redisClient, + instanceID: uuid.NewString(), + } +} + +func (s *OpsAggregationService) Start() { + if s == nil { + return + } + s.startOnce.Do(func() { + if s.stopCh == nil { + s.stopCh = make(chan struct{}) + } + go s.hourlyLoop() + go s.dailyLoop() + }) +} + +func (s *OpsAggregationService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stopCh != nil { + close(s.stopCh) + } + }) +} + +func (s *OpsAggregationService) hourlyLoop() { + // First run immediately. + s.aggregateHourly() + + ticker := time.NewTicker(opsAggHourlyInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.aggregateHourly() + case <-s.stopCh: + return + } + } +} + +func (s *OpsAggregationService) dailyLoop() { + // First run immediately. + s.aggregateDaily() + + ticker := time.NewTicker(opsAggDailyInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.aggregateDaily() + case <-s.stopCh: + return + } + } +} + +func (s *OpsAggregationService) aggregateHourly() { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil { + if !s.cfg.Ops.Enabled { + return + } + if !s.cfg.Ops.Aggregation.Enabled { + return + } + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAggHourlyTimeout) + defer cancel() + + if !s.isMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx, opsAggHourlyLeaderLockKey, opsAggHourlyLeaderLockTTL, "[OpsAggregation][hourly]") + if !ok { + return + } + if release != nil { + defer release() + } + + s.hourlyMu.Lock() + defer s.hourlyMu.Unlock() + + startedAt := time.Now().UTC() + runAt := startedAt + + // Aggregate stable full hours only. + end := utcFloorToHour(time.Now().UTC().Add(-opsAggSafeDelay)) + start := end.Add(-opsAggBackfillWindow) + + // Resume from the latest bucket with overlap. + { + ctxMax, cancelMax := context.WithTimeout(context.Background(), opsAggMaxQueryTimeout) + latest, ok, err := s.opsRepo.GetLatestHourlyBucketStart(ctxMax) + cancelMax() + if err != nil { + log.Printf("[OpsAggregation][hourly] failed to read latest bucket: %v", err) + } else if ok { + candidate := latest.Add(-opsAggHourlyOverlap) + if candidate.After(start) { + start = candidate + } + } + } + + start = utcFloorToHour(start) + if !start.Before(end) { + return + } + + var aggErr error + for cursor := start; cursor.Before(end); cursor = cursor.Add(opsAggHourlyChunk) { + chunkEnd := minTime(cursor.Add(opsAggHourlyChunk), end) + if err := s.opsRepo.UpsertHourlyMetrics(ctx, cursor, chunkEnd); err != nil { + aggErr = err + log.Printf("[OpsAggregation][hourly] upsert failed (%s..%s): %v", cursor.Format(time.RFC3339), chunkEnd.Format(time.RFC3339), err) + break + } + } + + finishedAt := time.Now().UTC() + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + + if aggErr != nil { + msg := truncateString(aggErr.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggHourlyJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggHourlyJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + }) +} + +func (s *OpsAggregationService) aggregateDaily() { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil { + if !s.cfg.Ops.Enabled { + return + } + if !s.cfg.Ops.Aggregation.Enabled { + return + } + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAggDailyTimeout) + defer cancel() + + if !s.isMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx, opsAggDailyLeaderLockKey, opsAggDailyLeaderLockTTL, "[OpsAggregation][daily]") + if !ok { + return + } + if release != nil { + defer release() + } + + s.dailyMu.Lock() + defer s.dailyMu.Unlock() + + startedAt := time.Now().UTC() + runAt := startedAt + + end := utcFloorToDay(time.Now().UTC()) + start := end.Add(-opsAggBackfillWindow) + + { + ctxMax, cancelMax := context.WithTimeout(context.Background(), opsAggMaxQueryTimeout) + latest, ok, err := s.opsRepo.GetLatestDailyBucketDate(ctxMax) + cancelMax() + if err != nil { + log.Printf("[OpsAggregation][daily] failed to read latest bucket: %v", err) + } else if ok { + candidate := latest.Add(-opsAggDailyOverlap) + if candidate.After(start) { + start = candidate + } + } + } + + start = utcFloorToDay(start) + if !start.Before(end) { + return + } + + var aggErr error + for cursor := start; cursor.Before(end); cursor = cursor.Add(opsAggDailyChunk) { + chunkEnd := minTime(cursor.Add(opsAggDailyChunk), end) + if err := s.opsRepo.UpsertDailyMetrics(ctx, cursor, chunkEnd); err != nil { + aggErr = err + log.Printf("[OpsAggregation][daily] upsert failed (%s..%s): %v", cursor.Format("2006-01-02"), chunkEnd.Format("2006-01-02"), err) + break + } + } + + finishedAt := time.Now().UTC() + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + + if aggErr != nil { + msg := truncateString(aggErr.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggDailyJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggDailyJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + }) +} + +func (s *OpsAggregationService) isMonitoringEnabled(ctx context.Context) bool { + if s == nil { + return false + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return false + } + if s.settingRepo == nil { + return true + } + if ctx == nil { + ctx = context.Background() + } + + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +var opsAggReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +func (s *OpsAggregationService) tryAcquireLeaderLock(ctx context.Context, key string, ttl time.Duration, logPrefix string) (func(), bool) { + if s == nil { + return nil, false + } + if ctx == nil { + ctx = context.Background() + } + + // Prefer Redis leader lock when available (multi-instance), but avoid stampeding + // the DB when Redis is flaky by falling back to a DB advisory lock. + if s.redisClient != nil { + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err == nil { + if !ok { + s.maybeLogSkip(logPrefix) + return nil, false + } + release := func() { + ctx2, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = opsAggReleaseScript.Run(ctx2, s.redisClient, []string{key}, s.instanceID).Result() + } + return release, true + } + // Redis error: fall through to DB advisory lock. + } + + release, ok := tryAcquireDBAdvisoryLock(ctx, s.db, hashAdvisoryLockID(key)) + if !ok { + s.maybeLogSkip(logPrefix) + return nil, false + } + return release, true +} + +func (s *OpsAggregationService) maybeLogSkip(prefix string) { + s.skipLogMu.Lock() + defer s.skipLogMu.Unlock() + + now := time.Now() + if !s.skipLogAt.IsZero() && now.Sub(s.skipLogAt) < time.Minute { + return + } + s.skipLogAt = now + if prefix == "" { + prefix = "[OpsAggregation]" + } + log.Printf("%s leader lock held by another instance; skipping", prefix) +} + +func utcFloorToHour(t time.Time) time.Time { + return t.UTC().Truncate(time.Hour) +} + +func utcFloorToDay(t time.Time) time.Time { + u := t.UTC() + y, m, d := u.Date() + return time.Date(y, m, d, 0, 0, 0, 0, time.UTC) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} diff --git a/backend/internal/service/ops_alert_evaluator_service.go b/backend/internal/service/ops_alert_evaluator_service.go new file mode 100644 index 00000000..2b619f4d --- /dev/null +++ b/backend/internal/service/ops_alert_evaluator_service.go @@ -0,0 +1,922 @@ +package service + +import ( + "context" + "fmt" + "log" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" +) + +const ( + opsAlertEvaluatorJobName = "ops_alert_evaluator" + + opsAlertEvaluatorTimeout = 45 * time.Second + opsAlertEvaluatorLeaderLockKey = "ops:alert:evaluator:leader" + opsAlertEvaluatorLeaderLockTTL = 90 * time.Second + opsAlertEvaluatorSkipLogInterval = 1 * time.Minute +) + +var opsAlertEvaluatorReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +type OpsAlertEvaluatorService struct { + opsService *OpsService + opsRepo OpsRepository + emailService *EmailService + + redisClient *redis.Client + cfg *config.Config + instanceID string + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + wg sync.WaitGroup + + mu sync.Mutex + ruleStates map[int64]*opsAlertRuleState + + emailLimiter *slidingWindowLimiter + + skipLogMu sync.Mutex + skipLogAt time.Time + + warnNoRedisOnce sync.Once +} + +type opsAlertRuleState struct { + LastEvaluatedAt time.Time + ConsecutiveBreaches int +} + +func NewOpsAlertEvaluatorService( + opsService *OpsService, + opsRepo OpsRepository, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAlertEvaluatorService { + return &OpsAlertEvaluatorService{ + opsService: opsService, + opsRepo: opsRepo, + emailService: emailService, + redisClient: redisClient, + cfg: cfg, + instanceID: uuid.NewString(), + ruleStates: map[int64]*opsAlertRuleState{}, + emailLimiter: newSlidingWindowLimiter(0, time.Hour), + } +} + +func (s *OpsAlertEvaluatorService) Start() { + if s == nil { + return + } + s.startOnce.Do(func() { + if s.stopCh == nil { + s.stopCh = make(chan struct{}) + } + go s.run() + }) +} + +func (s *OpsAlertEvaluatorService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stopCh != nil { + close(s.stopCh) + } + }) + s.wg.Wait() +} + +func (s *OpsAlertEvaluatorService) run() { + s.wg.Add(1) + defer s.wg.Done() + + // Start immediately to produce early feedback in ops dashboard. + timer := time.NewTimer(0) + defer timer.Stop() + + for { + select { + case <-timer.C: + interval := s.getInterval() + s.evaluateOnce(interval) + timer.Reset(interval) + case <-s.stopCh: + return + } + } +} + +func (s *OpsAlertEvaluatorService) getInterval() time.Duration { + // Default. + interval := 60 * time.Second + + if s == nil || s.opsService == nil { + return interval + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + cfg, err := s.opsService.GetOpsAlertRuntimeSettings(ctx) + if err != nil || cfg == nil { + return interval + } + if cfg.EvaluationIntervalSeconds <= 0 { + return interval + } + if cfg.EvaluationIntervalSeconds < 1 { + return interval + } + if cfg.EvaluationIntervalSeconds > int((24 * time.Hour).Seconds()) { + return interval + } + return time.Duration(cfg.EvaluationIntervalSeconds) * time.Second +} + +func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAlertEvaluatorTimeout) + defer cancel() + + if s.opsService != nil && !s.opsService.IsMonitoringEnabled(ctx) { + return + } + + runtimeCfg := defaultOpsAlertRuntimeSettings() + if s.opsService != nil { + if loaded, err := s.opsService.GetOpsAlertRuntimeSettings(ctx); err == nil && loaded != nil { + runtimeCfg = loaded + } + } + + release, ok := s.tryAcquireLeaderLock(ctx, runtimeCfg.DistributedLock) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + runAt := startedAt + + rules, err := s.opsRepo.ListAlertRules(ctx) + if err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + log.Printf("[OpsAlertEvaluator] list rules failed: %v", err) + return + } + + now := time.Now().UTC() + safeEnd := now.Truncate(time.Minute) + if safeEnd.IsZero() { + safeEnd = now + } + + systemMetrics, _ := s.opsRepo.GetLatestSystemMetrics(ctx, 1) + + // Cleanup stale state for removed rules. + s.pruneRuleStates(rules) + + for _, rule := range rules { + if rule == nil || !rule.Enabled || rule.ID <= 0 { + continue + } + + scopePlatform, scopeGroupID, scopeRegion := parseOpsAlertRuleScope(rule.Filters) + + windowMinutes := rule.WindowMinutes + if windowMinutes <= 0 { + windowMinutes = 1 + } + windowStart := safeEnd.Add(-time.Duration(windowMinutes) * time.Minute) + windowEnd := safeEnd + + metricValue, ok := s.computeRuleMetric(ctx, rule, systemMetrics, windowStart, windowEnd, scopePlatform, scopeGroupID) + if !ok { + s.resetRuleState(rule.ID, now) + continue + } + + breachedNow := compareMetric(metricValue, rule.Operator, rule.Threshold) + required := requiredSustainedBreaches(rule.SustainedMinutes, interval) + consecutive := s.updateRuleBreaches(rule.ID, now, interval, breachedNow) + + activeEvent, err := s.opsRepo.GetActiveAlertEvent(ctx, rule.ID) + if err != nil { + log.Printf("[OpsAlertEvaluator] get active event failed (rule=%d): %v", rule.ID, err) + continue + } + + if breachedNow && consecutive >= required { + if activeEvent != nil { + continue + } + + // Scoped silencing: if a matching silence exists, skip creating a firing event. + if s.opsService != nil { + platform := strings.TrimSpace(scopePlatform) + region := scopeRegion + if platform != "" { + if ok, err := s.opsService.IsAlertSilenced(ctx, rule.ID, platform, scopeGroupID, region, now); err == nil && ok { + continue + } + } + } + + latestEvent, err := s.opsRepo.GetLatestAlertEvent(ctx, rule.ID) + if err != nil { + log.Printf("[OpsAlertEvaluator] get latest event failed (rule=%d): %v", rule.ID, err) + continue + } + if latestEvent != nil && rule.CooldownMinutes > 0 { + cooldown := time.Duration(rule.CooldownMinutes) * time.Minute + if now.Sub(latestEvent.FiredAt) < cooldown { + continue + } + } + + firedEvent := &OpsAlertEvent{ + RuleID: rule.ID, + Severity: strings.TrimSpace(rule.Severity), + Status: OpsAlertStatusFiring, + Title: fmt.Sprintf("%s: %s", strings.TrimSpace(rule.Severity), strings.TrimSpace(rule.Name)), + Description: buildOpsAlertDescription(rule, metricValue, windowMinutes, scopePlatform, scopeGroupID), + MetricValue: float64Ptr(metricValue), + ThresholdValue: float64Ptr(rule.Threshold), + Dimensions: buildOpsAlertDimensions(scopePlatform, scopeGroupID), + FiredAt: now, + CreatedAt: now, + } + + created, err := s.opsRepo.CreateAlertEvent(ctx, firedEvent) + if err != nil { + log.Printf("[OpsAlertEvaluator] create event failed (rule=%d): %v", rule.ID, err) + continue + } + + if created != nil && created.ID > 0 { + s.maybeSendAlertEmail(ctx, runtimeCfg, rule, created) + } + continue + } + + // Not breached: resolve active event if present. + if activeEvent != nil { + resolvedAt := now + if err := s.opsRepo.UpdateAlertEventStatus(ctx, activeEvent.ID, OpsAlertStatusResolved, &resolvedAt); err != nil { + log.Printf("[OpsAlertEvaluator] resolve event failed (event=%d): %v", activeEvent.ID, err) + } + } + } + + s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) +} + +func (s *OpsAlertEvaluatorService) pruneRuleStates(rules []*OpsAlertRule) { + s.mu.Lock() + defer s.mu.Unlock() + + live := map[int64]struct{}{} + for _, r := range rules { + if r != nil && r.ID > 0 { + live[r.ID] = struct{}{} + } + } + for id := range s.ruleStates { + if _, ok := live[id]; !ok { + delete(s.ruleStates, id) + } + } +} + +func (s *OpsAlertEvaluatorService) resetRuleState(ruleID int64, now time.Time) { + if ruleID <= 0 { + return + } + s.mu.Lock() + defer s.mu.Unlock() + state, ok := s.ruleStates[ruleID] + if !ok { + state = &opsAlertRuleState{} + s.ruleStates[ruleID] = state + } + state.LastEvaluatedAt = now + state.ConsecutiveBreaches = 0 +} + +func (s *OpsAlertEvaluatorService) updateRuleBreaches(ruleID int64, now time.Time, interval time.Duration, breached bool) int { + if ruleID <= 0 { + return 0 + } + s.mu.Lock() + defer s.mu.Unlock() + + state, ok := s.ruleStates[ruleID] + if !ok { + state = &opsAlertRuleState{} + s.ruleStates[ruleID] = state + } + + if !state.LastEvaluatedAt.IsZero() && interval > 0 { + if now.Sub(state.LastEvaluatedAt) > interval*2 { + state.ConsecutiveBreaches = 0 + } + } + + state.LastEvaluatedAt = now + if breached { + state.ConsecutiveBreaches++ + } else { + state.ConsecutiveBreaches = 0 + } + return state.ConsecutiveBreaches +} + +func requiredSustainedBreaches(sustainedMinutes int, interval time.Duration) int { + if sustainedMinutes <= 0 { + return 1 + } + if interval <= 0 { + return sustainedMinutes + } + required := int(math.Ceil(float64(sustainedMinutes*60) / interval.Seconds())) + if required < 1 { + return 1 + } + return required +} + +func parseOpsAlertRuleScope(filters map[string]any) (platform string, groupID *int64, region *string) { + if filters == nil { + return "", nil, nil + } + if v, ok := filters["platform"]; ok { + if s, ok := v.(string); ok { + platform = strings.TrimSpace(s) + } + } + if v, ok := filters["group_id"]; ok { + switch t := v.(type) { + case float64: + if t > 0 { + id := int64(t) + groupID = &id + } + case int64: + if t > 0 { + id := t + groupID = &id + } + case int: + if t > 0 { + id := int64(t) + groupID = &id + } + case string: + n, err := strconv.ParseInt(strings.TrimSpace(t), 10, 64) + if err == nil && n > 0 { + groupID = &n + } + } + } + if v, ok := filters["region"]; ok { + if s, ok := v.(string); ok { + vv := strings.TrimSpace(s) + if vv != "" { + region = &vv + } + } + } + return platform, groupID, region +} + +func (s *OpsAlertEvaluatorService) computeRuleMetric( + ctx context.Context, + rule *OpsAlertRule, + systemMetrics *OpsSystemMetricsSnapshot, + start time.Time, + end time.Time, + platform string, + groupID *int64, +) (float64, bool) { + if rule == nil { + return 0, false + } + switch strings.TrimSpace(rule.MetricType) { + case "cpu_usage_percent": + if systemMetrics != nil && systemMetrics.CPUUsagePercent != nil { + return *systemMetrics.CPUUsagePercent, true + } + return 0, false + case "memory_usage_percent": + if systemMetrics != nil && systemMetrics.MemoryUsagePercent != nil { + return *systemMetrics.MemoryUsagePercent, true + } + return 0, false + case "concurrency_queue_depth": + if systemMetrics != nil && systemMetrics.ConcurrencyQueueDepth != nil { + return float64(*systemMetrics.ConcurrencyQueueDepth), true + } + return 0, false + case "group_available_accounts": + if groupID == nil || *groupID <= 0 { + return 0, false + } + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + if availability.Group == nil { + return 0, true + } + return float64(availability.Group.AvailableCount), true + case "group_available_ratio": + if groupID == nil || *groupID <= 0 { + return 0, false + } + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return computeGroupAvailableRatio(availability.Group), true + case "account_rate_limited_count": + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return float64(countAccountsByCondition(availability.Accounts, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + })), true + case "account_error_count": + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return float64(countAccountsByCondition(availability.Accounts, func(acc *AccountAvailability) bool { + return acc.HasError && acc.TempUnschedulableUntil == nil + })), true + } + + overview, err := s.opsRepo.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: platform, + GroupID: groupID, + QueryMode: OpsQueryModeRaw, + }) + if err != nil { + return 0, false + } + if overview == nil { + return 0, false + } + + switch strings.TrimSpace(rule.MetricType) { + case "success_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.SLA * 100, true + case "error_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.ErrorRate * 100, true + case "upstream_error_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.UpstreamErrorRate * 100, true + default: + return 0, false + } +} + +func compareMetric(value float64, operator string, threshold float64) bool { + switch strings.TrimSpace(operator) { + case ">": + return value > threshold + case ">=": + return value >= threshold + case "<": + return value < threshold + case "<=": + return value <= threshold + case "==": + return value == threshold + case "!=": + return value != threshold + default: + return false + } +} + +func buildOpsAlertDimensions(platform string, groupID *int64) map[string]any { + dims := map[string]any{} + if strings.TrimSpace(platform) != "" { + dims["platform"] = strings.TrimSpace(platform) + } + if groupID != nil && *groupID > 0 { + dims["group_id"] = *groupID + } + if len(dims) == 0 { + return nil + } + return dims +} + +func buildOpsAlertDescription(rule *OpsAlertRule, value float64, windowMinutes int, platform string, groupID *int64) string { + if rule == nil { + return "" + } + scope := "overall" + if strings.TrimSpace(platform) != "" { + scope = fmt.Sprintf("platform=%s", strings.TrimSpace(platform)) + } + if groupID != nil && *groupID > 0 { + scope = fmt.Sprintf("%s group_id=%d", scope, *groupID) + } + if windowMinutes <= 0 { + windowMinutes = 1 + } + return fmt.Sprintf("%s %s %.2f (current %.2f) over last %dm (%s)", + strings.TrimSpace(rule.MetricType), + strings.TrimSpace(rule.Operator), + rule.Threshold, + value, + windowMinutes, + strings.TrimSpace(scope), + ) +} + +func (s *OpsAlertEvaluatorService) maybeSendAlertEmail(ctx context.Context, runtimeCfg *OpsAlertRuntimeSettings, rule *OpsAlertRule, event *OpsAlertEvent) { + if s == nil || s.emailService == nil || s.opsService == nil || event == nil || rule == nil { + return + } + if event.EmailSent { + return + } + if !rule.NotifyEmail { + return + } + + emailCfg, err := s.opsService.GetEmailNotificationConfig(ctx) + if err != nil || emailCfg == nil || !emailCfg.Alert.Enabled { + return + } + + if len(emailCfg.Alert.Recipients) == 0 { + return + } + if !shouldSendOpsAlertEmailByMinSeverity(strings.TrimSpace(emailCfg.Alert.MinSeverity), strings.TrimSpace(rule.Severity)) { + return + } + + if runtimeCfg != nil && runtimeCfg.Silencing.Enabled { + if isOpsAlertSilenced(time.Now().UTC(), rule, event, runtimeCfg.Silencing) { + return + } + } + + // Apply/update rate limiter. + s.emailLimiter.SetLimit(emailCfg.Alert.RateLimitPerHour) + + subject := fmt.Sprintf("[Ops Alert][%s] %s", strings.TrimSpace(rule.Severity), strings.TrimSpace(rule.Name)) + body := buildOpsAlertEmailBody(rule, event) + + anySent := false + for _, to := range emailCfg.Alert.Recipients { + addr := strings.TrimSpace(to) + if addr == "" { + continue + } + if !s.emailLimiter.Allow(time.Now().UTC()) { + continue + } + if err := s.emailService.SendEmail(ctx, addr, subject, body); err != nil { + // Ignore per-recipient failures; continue best-effort. + continue + } + anySent = true + } + + if anySent { + _ = s.opsRepo.UpdateAlertEventEmailSent(context.Background(), event.ID, true) + } +} + +func buildOpsAlertEmailBody(rule *OpsAlertRule, event *OpsAlertEvent) string { + if rule == nil || event == nil { + return "" + } + metric := strings.TrimSpace(rule.MetricType) + value := "-" + threshold := fmt.Sprintf("%.2f", rule.Threshold) + if event.MetricValue != nil { + value = fmt.Sprintf("%.2f", *event.MetricValue) + } + if event.ThresholdValue != nil { + threshold = fmt.Sprintf("%.2f", *event.ThresholdValue) + } + return fmt.Sprintf(` +

Ops Alert

+

Rule: %s

+

Severity: %s

+

Status: %s

+

Metric: %s %s %s

+

Fired at: %s

+

Description: %s

+`, + htmlEscape(rule.Name), + htmlEscape(rule.Severity), + htmlEscape(event.Status), + htmlEscape(metric), + htmlEscape(rule.Operator), + htmlEscape(fmt.Sprintf("%s (threshold %s)", value, threshold)), + event.FiredAt.Format(time.RFC3339), + htmlEscape(event.Description), + ) +} + +func shouldSendOpsAlertEmailByMinSeverity(minSeverity string, ruleSeverity string) bool { + minSeverity = strings.ToLower(strings.TrimSpace(minSeverity)) + if minSeverity == "" { + return true + } + + eventLevel := opsEmailSeverityForOps(ruleSeverity) + minLevel := strings.ToLower(minSeverity) + + rank := func(level string) int { + switch level { + case "critical": + return 3 + case "warning": + return 2 + case "info": + return 1 + default: + return 0 + } + } + return rank(eventLevel) >= rank(minLevel) +} + +func opsEmailSeverityForOps(severity string) string { + switch strings.ToUpper(strings.TrimSpace(severity)) { + case "P0": + return "critical" + case "P1": + return "warning" + default: + return "info" + } +} + +func isOpsAlertSilenced(now time.Time, rule *OpsAlertRule, event *OpsAlertEvent, silencing OpsAlertSilencingSettings) bool { + if !silencing.Enabled { + return false + } + if now.IsZero() { + now = time.Now().UTC() + } + if strings.TrimSpace(silencing.GlobalUntilRFC3339) != "" { + if t, err := time.Parse(time.RFC3339, strings.TrimSpace(silencing.GlobalUntilRFC3339)); err == nil { + if now.Before(t) { + return true + } + } + } + + for _, entry := range silencing.Entries { + untilRaw := strings.TrimSpace(entry.UntilRFC3339) + if untilRaw == "" { + continue + } + until, err := time.Parse(time.RFC3339, untilRaw) + if err != nil { + continue + } + if now.After(until) { + continue + } + if entry.RuleID != nil && rule != nil && rule.ID > 0 && *entry.RuleID != rule.ID { + continue + } + if len(entry.Severities) > 0 { + match := false + for _, s := range entry.Severities { + if strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(event.Severity)) || strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(rule.Severity)) { + match = true + break + } + } + if !match { + continue + } + } + return true + } + + return false +} + +func (s *OpsAlertEvaluatorService) tryAcquireLeaderLock(ctx context.Context, lock OpsDistributedLockSettings) (func(), bool) { + if !lock.Enabled { + return nil, true + } + if s.redisClient == nil { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsAlertEvaluator] redis not configured; running without distributed lock") + }) + return nil, true + } + key := strings.TrimSpace(lock.Key) + if key == "" { + key = opsAlertEvaluatorLeaderLockKey + } + ttl := time.Duration(lock.TTLSeconds) * time.Second + if ttl <= 0 { + ttl = opsAlertEvaluatorLeaderLockTTL + } + + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err != nil { + // Prefer fail-closed to avoid duplicate evaluators stampeding the DB when Redis is flaky. + // Single-node deployments can disable the distributed lock via runtime settings. + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsAlertEvaluator] leader lock SetNX failed; skipping this cycle: %v", err) + }) + return nil, false + } + if !ok { + s.maybeLogSkip(key) + return nil, false + } + return func() { + _, _ = opsAlertEvaluatorReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true +} + +func (s *OpsAlertEvaluatorService) maybeLogSkip(key string) { + s.skipLogMu.Lock() + defer s.skipLogMu.Unlock() + + now := time.Now() + if !s.skipLogAt.IsZero() && now.Sub(s.skipLogAt) < opsAlertEvaluatorSkipLogInterval { + return + } + s.skipLogAt = now + log.Printf("[OpsAlertEvaluator] leader lock held by another instance; skipping (key=%q)", key) +} + +func (s *OpsAlertEvaluatorService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { + if s == nil || s.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAlertEvaluatorJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + }) +} + +func (s *OpsAlertEvaluatorService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAlertEvaluatorJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} + +func htmlEscape(s string) string { + replacer := strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + `"`, """, + "'", "'", + ) + return replacer.Replace(s) +} + +type slidingWindowLimiter struct { + mu sync.Mutex + limit int + window time.Duration + sent []time.Time +} + +func newSlidingWindowLimiter(limit int, window time.Duration) *slidingWindowLimiter { + if window <= 0 { + window = time.Hour + } + return &slidingWindowLimiter{ + limit: limit, + window: window, + sent: []time.Time{}, + } +} + +func (l *slidingWindowLimiter) SetLimit(limit int) { + l.mu.Lock() + defer l.mu.Unlock() + l.limit = limit +} + +func (l *slidingWindowLimiter) Allow(now time.Time) bool { + l.mu.Lock() + defer l.mu.Unlock() + + if l.limit <= 0 { + return true + } + cutoff := now.Add(-l.window) + keep := l.sent[:0] + for _, t := range l.sent { + if t.After(cutoff) { + keep = append(keep, t) + } + } + l.sent = keep + if len(l.sent) >= l.limit { + return false + } + l.sent = append(l.sent, now) + return true +} + +// computeGroupAvailableRatio returns the available percentage for a group. +// Formula: (AvailableCount / TotalAccounts) * 100. +// Returns 0 when TotalAccounts is 0. +func computeGroupAvailableRatio(group *GroupAvailability) float64 { + if group == nil || group.TotalAccounts <= 0 { + return 0 + } + return (float64(group.AvailableCount) / float64(group.TotalAccounts)) * 100 +} + +// countAccountsByCondition counts accounts that satisfy the given condition. +func countAccountsByCondition(accounts map[int64]*AccountAvailability, condition func(*AccountAvailability) bool) int64 { + if len(accounts) == 0 || condition == nil { + return 0 + } + var count int64 + for _, account := range accounts { + if account != nil && condition(account) { + count++ + } + } + return count +} diff --git a/backend/internal/service/ops_alert_evaluator_service_test.go b/backend/internal/service/ops_alert_evaluator_service_test.go new file mode 100644 index 00000000..068ab6bb --- /dev/null +++ b/backend/internal/service/ops_alert_evaluator_service_test.go @@ -0,0 +1,210 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type stubOpsRepo struct { + OpsRepository + overview *OpsDashboardOverview + err error +} + +func (s *stubOpsRepo) GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) { + if s.err != nil { + return nil, s.err + } + if s.overview != nil { + return s.overview, nil + } + return &OpsDashboardOverview{}, nil +} + +func TestComputeGroupAvailableRatio(t *testing.T) { + t.Parallel() + + t.Run("正常情况: 10个账号, 8个可用 = 80%", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 10, + AvailableCount: 8, + }) + require.InDelta(t, 80.0, got, 0.0001) + }) + + t.Run("边界情况: TotalAccounts = 0 应返回 0", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 0, + AvailableCount: 8, + }) + require.Equal(t, 0.0, got) + }) + + t.Run("边界情况: AvailableCount = 0 应返回 0%", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 10, + AvailableCount: 0, + }) + require.Equal(t, 0.0, got) + }) +} + +func TestCountAccountsByCondition(t *testing.T) { + t.Parallel() + + t.Run("测试限流账号统计: acc.IsRateLimited", func(t *testing.T) { + t.Parallel() + + accounts := map[int64]*AccountAvailability{ + 1: {IsRateLimited: true}, + 2: {IsRateLimited: false}, + 3: {IsRateLimited: true}, + } + + got := countAccountsByCondition(accounts, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + }) + require.Equal(t, int64(2), got) + }) + + t.Run("测试错误账号统计(排除临时不可调度): acc.HasError && acc.TempUnschedulableUntil == nil", func(t *testing.T) { + t.Parallel() + + until := time.Now().UTC().Add(5 * time.Minute) + accounts := map[int64]*AccountAvailability{ + 1: {HasError: true}, + 2: {HasError: true, TempUnschedulableUntil: &until}, + 3: {HasError: false}, + } + + got := countAccountsByCondition(accounts, func(acc *AccountAvailability) bool { + return acc.HasError && acc.TempUnschedulableUntil == nil + }) + require.Equal(t, int64(1), got) + }) + + t.Run("边界情况: 空 map 应返回 0", func(t *testing.T) { + t.Parallel() + + got := countAccountsByCondition(map[int64]*AccountAvailability{}, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + }) + require.Equal(t, int64(0), got) + }) +} + +func TestComputeRuleMetricNewIndicators(t *testing.T) { + t.Parallel() + + groupID := int64(101) + platform := "openai" + + availability := &OpsAccountAvailability{ + Group: &GroupAvailability{ + GroupID: groupID, + TotalAccounts: 10, + AvailableCount: 8, + }, + Accounts: map[int64]*AccountAvailability{ + 1: {IsRateLimited: true}, + 2: {IsRateLimited: true}, + 3: {HasError: true}, + 4: {HasError: true, TempUnschedulableUntil: timePtr(time.Now().UTC().Add(2 * time.Minute))}, + 5: {HasError: false, IsRateLimited: false}, + }, + } + + opsService := &OpsService{ + getAccountAvailability: func(_ context.Context, _ string, _ *int64) (*OpsAccountAvailability, error) { + return availability, nil + }, + } + + svc := &OpsAlertEvaluatorService{ + opsService: opsService, + opsRepo: &stubOpsRepo{overview: &OpsDashboardOverview{}}, + } + + start := time.Now().UTC().Add(-5 * time.Minute) + end := time.Now().UTC() + ctx := context.Background() + + tests := []struct { + name string + metricType string + groupID *int64 + wantValue float64 + wantOK bool + }{ + { + name: "group_available_accounts", + metricType: "group_available_accounts", + groupID: &groupID, + wantValue: 8, + wantOK: true, + }, + { + name: "group_available_ratio", + metricType: "group_available_ratio", + groupID: &groupID, + wantValue: 80.0, + wantOK: true, + }, + { + name: "account_rate_limited_count", + metricType: "account_rate_limited_count", + groupID: nil, + wantValue: 2, + wantOK: true, + }, + { + name: "account_error_count", + metricType: "account_error_count", + groupID: nil, + wantValue: 1, + wantOK: true, + }, + { + name: "group_available_accounts without group_id returns false", + metricType: "group_available_accounts", + groupID: nil, + wantValue: 0, + wantOK: false, + }, + { + name: "group_available_ratio without group_id returns false", + metricType: "group_available_ratio", + groupID: nil, + wantValue: 0, + wantOK: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + rule := &OpsAlertRule{ + MetricType: tt.metricType, + } + gotValue, gotOK := svc.computeRuleMetric(ctx, rule, nil, start, end, platform, tt.groupID) + require.Equal(t, tt.wantOK, gotOK) + if !tt.wantOK { + return + } + require.InDelta(t, tt.wantValue, gotValue, 0.0001) + }) + } +} diff --git a/backend/internal/service/ops_alert_models.go b/backend/internal/service/ops_alert_models.go new file mode 100644 index 00000000..a0caa990 --- /dev/null +++ b/backend/internal/service/ops_alert_models.go @@ -0,0 +1,95 @@ +package service + +import "time" + +// Ops alert rule/event models. +// +// NOTE: These are admin-facing DTOs and intentionally keep JSON naming aligned +// with the existing ops dashboard frontend (backup style). + +const ( + OpsAlertStatusFiring = "firing" + OpsAlertStatusResolved = "resolved" + OpsAlertStatusManualResolved = "manual_resolved" +) + +type OpsAlertRule struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + + Enabled bool `json:"enabled"` + Severity string `json:"severity"` + + MetricType string `json:"metric_type"` + Operator string `json:"operator"` + Threshold float64 `json:"threshold"` + + WindowMinutes int `json:"window_minutes"` + SustainedMinutes int `json:"sustained_minutes"` + CooldownMinutes int `json:"cooldown_minutes"` + + NotifyEmail bool `json:"notify_email"` + + Filters map[string]any `json:"filters,omitempty"` + + LastTriggeredAt *time.Time `json:"last_triggered_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type OpsAlertEvent struct { + ID int64 `json:"id"` + RuleID int64 `json:"rule_id"` + Severity string `json:"severity"` + Status string `json:"status"` + + Title string `json:"title"` + Description string `json:"description"` + + MetricValue *float64 `json:"metric_value,omitempty"` + ThresholdValue *float64 `json:"threshold_value,omitempty"` + + Dimensions map[string]any `json:"dimensions,omitempty"` + + FiredAt time.Time `json:"fired_at"` + ResolvedAt *time.Time `json:"resolved_at,omitempty"` + + EmailSent bool `json:"email_sent"` + CreatedAt time.Time `json:"created_at"` +} + +type OpsAlertSilence struct { + ID int64 `json:"id"` + + RuleID int64 `json:"rule_id"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id,omitempty"` + Region *string `json:"region,omitempty"` + + Until time.Time `json:"until"` + Reason string `json:"reason"` + + CreatedBy *int64 `json:"created_by,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +type OpsAlertEventFilter struct { + Limit int + + // Cursor pagination (descending by fired_at, then id). + BeforeFiredAt *time.Time + BeforeID *int64 + + // Optional filters. + Status string + Severity string + EmailSent *bool + + StartTime *time.Time + EndTime *time.Time + + // Dimensions filters (best-effort). + Platform string + GroupID *int64 +} diff --git a/backend/internal/service/ops_alerts.go b/backend/internal/service/ops_alerts.go new file mode 100644 index 00000000..b4c09824 --- /dev/null +++ b/backend/internal/service/ops_alerts.go @@ -0,0 +1,232 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) ListAlertRules(ctx context.Context) ([]*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return []*OpsAlertRule{}, nil + } + return s.opsRepo.ListAlertRules(ctx) +} + +func (s *OpsService) CreateAlertRule(ctx context.Context, rule *OpsAlertRule) (*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if rule == nil { + return nil, infraerrors.BadRequest("INVALID_RULE", "invalid rule") + } + + created, err := s.opsRepo.CreateAlertRule(ctx, rule) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) UpdateAlertRule(ctx context.Context, rule *OpsAlertRule) (*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if rule == nil || rule.ID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE", "invalid rule") + } + + updated, err := s.opsRepo.UpdateAlertRule(ctx, rule) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ALERT_RULE_NOT_FOUND", "alert rule not found") + } + return nil, err + } + return updated, nil +} + +func (s *OpsService) DeleteAlertRule(ctx context.Context, id int64) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if id <= 0 { + return infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if err := s.opsRepo.DeleteAlertRule(ctx, id); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.NotFound("OPS_ALERT_RULE_NOT_FOUND", "alert rule not found") + } + return err + } + return nil +} + +func (s *OpsService) ListAlertEvents(ctx context.Context, filter *OpsAlertEventFilter) ([]*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return []*OpsAlertEvent{}, nil + } + return s.opsRepo.ListAlertEvents(ctx, filter) +} + +func (s *OpsService) GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return nil, infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + ev, err := s.opsRepo.GetAlertEventByID(ctx, eventID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found") + } + return nil, err + } + if ev == nil { + return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found") + } + return ev, nil +} + +func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + return s.opsRepo.GetActiveAlertEvent(ctx, ruleID) +} + +func (s *OpsService) CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if input == nil { + return nil, infraerrors.BadRequest("INVALID_SILENCE", "invalid silence") + } + if input.RuleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if strings.TrimSpace(input.Platform) == "" { + return nil, infraerrors.BadRequest("INVALID_PLATFORM", "invalid platform") + } + if input.Until.IsZero() { + return nil, infraerrors.BadRequest("INVALID_UNTIL", "invalid until") + } + + created, err := s.opsRepo.CreateAlertSilence(ctx, input) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return false, err + } + if s.opsRepo == nil { + return false, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return false, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if strings.TrimSpace(platform) == "" { + return false, nil + } + return s.opsRepo.IsAlertSilenced(ctx, ruleID, platform, groupID, region, now) +} + +func (s *OpsService) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + return s.opsRepo.GetLatestAlertEvent(ctx, ruleID) +} + +func (s *OpsService) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if event == nil { + return nil, infraerrors.BadRequest("INVALID_EVENT", "invalid event") + } + + created, err := s.opsRepo.CreateAlertEvent(ctx, event) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + status = strings.TrimSpace(status) + if status == "" { + return infraerrors.BadRequest("INVALID_STATUS", "invalid status") + } + if status != OpsAlertStatusResolved && status != OpsAlertStatusManualResolved { + return infraerrors.BadRequest("INVALID_STATUS", "invalid status") + } + return s.opsRepo.UpdateAlertEventStatus(ctx, eventID, status, resolvedAt) +} + +func (s *OpsService) UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + return s.opsRepo.UpdateAlertEventEmailSent(ctx, eventID, emailSent) +} diff --git a/backend/internal/service/ops_cleanup_service.go b/backend/internal/service/ops_cleanup_service.go new file mode 100644 index 00000000..afd2d22c --- /dev/null +++ b/backend/internal/service/ops_cleanup_service.go @@ -0,0 +1,365 @@ +package service + +import ( + "context" + "database/sql" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/robfig/cron/v3" +) + +const ( + opsCleanupJobName = "ops_cleanup" + + opsCleanupLeaderLockKeyDefault = "ops:cleanup:leader" + opsCleanupLeaderLockTTLDefault = 30 * time.Minute +) + +var opsCleanupCronParser = cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + +var opsCleanupReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +// OpsCleanupService periodically deletes old ops data to prevent unbounded DB growth. +// +// - Scheduling: 5-field cron spec (minute hour dom month dow). +// - Multi-instance: best-effort Redis leader lock so only one node runs cleanup. +// - Safety: deletes in batches to avoid long transactions. +type OpsCleanupService struct { + opsRepo OpsRepository + db *sql.DB + redisClient *redis.Client + cfg *config.Config + + instanceID string + + cron *cron.Cron + + startOnce sync.Once + stopOnce sync.Once + + warnNoRedisOnce sync.Once +} + +func NewOpsCleanupService( + opsRepo OpsRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsCleanupService { + return &OpsCleanupService{ + opsRepo: opsRepo, + db: db, + redisClient: redisClient, + cfg: cfg, + instanceID: uuid.NewString(), + } +} + +func (s *OpsCleanupService) Start() { + if s == nil { + return + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + if s.cfg != nil && !s.cfg.Ops.Cleanup.Enabled { + log.Printf("[OpsCleanup] not started (disabled)") + return + } + if s.opsRepo == nil || s.db == nil { + log.Printf("[OpsCleanup] not started (missing deps)") + return + } + + s.startOnce.Do(func() { + schedule := "0 2 * * *" + if s.cfg != nil && strings.TrimSpace(s.cfg.Ops.Cleanup.Schedule) != "" { + schedule = strings.TrimSpace(s.cfg.Ops.Cleanup.Schedule) + } + + loc := time.Local + if s.cfg != nil && strings.TrimSpace(s.cfg.Timezone) != "" { + if parsed, err := time.LoadLocation(strings.TrimSpace(s.cfg.Timezone)); err == nil && parsed != nil { + loc = parsed + } + } + + c := cron.New(cron.WithParser(opsCleanupCronParser), cron.WithLocation(loc)) + _, err := c.AddFunc(schedule, func() { s.runScheduled() }) + if err != nil { + log.Printf("[OpsCleanup] not started (invalid schedule=%q): %v", schedule, err) + return + } + s.cron = c + s.cron.Start() + log.Printf("[OpsCleanup] started (schedule=%q tz=%s)", schedule, loc.String()) + }) +} + +func (s *OpsCleanupService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.cron != nil { + ctx := s.cron.Stop() + select { + case <-ctx.Done(): + case <-time.After(3 * time.Second): + log.Printf("[OpsCleanup] cron stop timed out") + } + } + }) +} + +func (s *OpsCleanupService) runScheduled() { + if s == nil || s.db == nil || s.opsRepo == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + release, ok := s.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + runAt := startedAt + + counts, err := s.runCleanupOnce(ctx) + if err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + log.Printf("[OpsCleanup] cleanup failed: %v", err) + return + } + s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) + log.Printf("[OpsCleanup] cleanup complete: %s", counts) +} + +type opsCleanupDeletedCounts struct { + errorLogs int64 + retryAttempts int64 + alertEvents int64 + systemMetrics int64 + hourlyPreagg int64 + dailyPreagg int64 +} + +func (c opsCleanupDeletedCounts) String() string { + return fmt.Sprintf( + "error_logs=%d retry_attempts=%d alert_events=%d system_metrics=%d hourly_preagg=%d daily_preagg=%d", + c.errorLogs, + c.retryAttempts, + c.alertEvents, + c.systemMetrics, + c.hourlyPreagg, + c.dailyPreagg, + ) +} + +func (s *OpsCleanupService) runCleanupOnce(ctx context.Context) (opsCleanupDeletedCounts, error) { + out := opsCleanupDeletedCounts{} + if s == nil || s.db == nil || s.cfg == nil { + return out, nil + } + + batchSize := 5000 + + now := time.Now().UTC() + + // Error-like tables: error logs / retry attempts / alert events. + if days := s.cfg.Ops.Cleanup.ErrorLogRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_error_logs", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.errorLogs = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_retry_attempts", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.retryAttempts = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_alert_events", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.alertEvents = n + } + + // Minute-level metrics snapshots. + if days := s.cfg.Ops.Cleanup.MinuteMetricsRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_system_metrics", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.systemMetrics = n + } + + // Pre-aggregation tables (hourly/daily). + if days := s.cfg.Ops.Cleanup.HourlyMetricsRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_metrics_hourly", "bucket_start", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.hourlyPreagg = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_metrics_daily", "bucket_date", cutoff, batchSize, true) + if err != nil { + return out, err + } + out.dailyPreagg = n + } + + return out, nil +} + +func deleteOldRowsByID( + ctx context.Context, + db *sql.DB, + table string, + timeColumn string, + cutoff time.Time, + batchSize int, + castCutoffToDate bool, +) (int64, error) { + if db == nil { + return 0, nil + } + if batchSize <= 0 { + batchSize = 5000 + } + + where := fmt.Sprintf("%s < $1", timeColumn) + if castCutoffToDate { + where = fmt.Sprintf("%s < $1::date", timeColumn) + } + + q := fmt.Sprintf(` +WITH batch AS ( + SELECT id FROM %s + WHERE %s + ORDER BY id + LIMIT $2 +) +DELETE FROM %s +WHERE id IN (SELECT id FROM batch) +`, table, where, table) + + var total int64 + for { + res, err := db.ExecContext(ctx, q, cutoff, batchSize) + if err != nil { + // If ops tables aren't present yet (partial deployments), treat as no-op. + if strings.Contains(strings.ToLower(err.Error()), "does not exist") && strings.Contains(strings.ToLower(err.Error()), "relation") { + return total, nil + } + return total, err + } + affected, err := res.RowsAffected() + if err != nil { + return total, err + } + total += affected + if affected == 0 { + break + } + } + return total, nil +} + +func (s *OpsCleanupService) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if s == nil { + return nil, false + } + // In simple run mode, assume single instance. + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + return nil, true + } + + key := opsCleanupLeaderLockKeyDefault + ttl := opsCleanupLeaderLockTTLDefault + + // Prefer Redis leader lock when available, but avoid stampeding the DB when Redis is flaky by + // falling back to a DB advisory lock. + if s.redisClient != nil { + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err == nil { + if !ok { + return nil, false + } + return func() { + _, _ = opsCleanupReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true + } + // Redis error: fall back to DB advisory lock. + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsCleanup] leader lock SetNX failed; falling back to DB advisory lock: %v", err) + }) + } else { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsCleanup] redis not configured; using DB advisory lock") + }) + } + + release, ok := tryAcquireDBAdvisoryLock(ctx, s.db, hashAdvisoryLockID(key)) + if !ok { + return nil, false + } + return release, true +} + +func (s *OpsCleanupService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { + if s == nil || s.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsCleanupJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + }) +} + +func (s *OpsCleanupService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsCleanupJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} diff --git a/backend/internal/service/ops_concurrency.go b/backend/internal/service/ops_concurrency.go new file mode 100644 index 00000000..c3b7b853 --- /dev/null +++ b/backend/internal/service/ops_concurrency.go @@ -0,0 +1,257 @@ +package service + +import ( + "context" + "log" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + opsAccountsPageSize = 100 + opsConcurrencyBatchChunkSize = 200 +) + +func (s *OpsService) listAllAccountsForOps(ctx context.Context, platformFilter string) ([]Account, error) { + if s == nil || s.accountRepo == nil { + return []Account{}, nil + } + + out := make([]Account, 0, 128) + page := 1 + for { + accounts, pageInfo, err := s.accountRepo.ListWithFilters(ctx, pagination.PaginationParams{ + Page: page, + PageSize: opsAccountsPageSize, + }, platformFilter, "", "", "") + if err != nil { + return nil, err + } + if len(accounts) == 0 { + break + } + + out = append(out, accounts...) + if pageInfo != nil && int64(len(out)) >= pageInfo.Total { + break + } + if len(accounts) < opsAccountsPageSize { + break + } + + page++ + if page > 10_000 { + log.Printf("[Ops] listAllAccountsForOps: aborting after too many pages (platform=%q)", platformFilter) + break + } + } + + return out, nil +} + +func (s *OpsService) getAccountsLoadMapBestEffort(ctx context.Context, accounts []Account) map[int64]*AccountLoadInfo { + if s == nil || s.concurrencyService == nil { + return map[int64]*AccountLoadInfo{} + } + if len(accounts) == 0 { + return map[int64]*AccountLoadInfo{} + } + + // De-duplicate IDs (and keep the max concurrency to avoid under-reporting). + unique := make(map[int64]int, len(accounts)) + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + if prev, ok := unique[acc.ID]; !ok || acc.Concurrency > prev { + unique[acc.ID] = acc.Concurrency + } + } + + batch := make([]AccountWithConcurrency, 0, len(unique)) + for id, maxConc := range unique { + batch = append(batch, AccountWithConcurrency{ + ID: id, + MaxConcurrency: maxConc, + }) + } + + out := make(map[int64]*AccountLoadInfo, len(batch)) + for i := 0; i < len(batch); i += opsConcurrencyBatchChunkSize { + end := i + opsConcurrencyBatchChunkSize + if end > len(batch) { + end = len(batch) + } + part, err := s.concurrencyService.GetAccountsLoadBatch(ctx, batch[i:end]) + if err != nil { + // Best-effort: return zeros rather than failing the ops UI. + log.Printf("[Ops] GetAccountsLoadBatch failed: %v", err) + continue + } + for k, v := range part { + out[k] = v + } + } + + return out +} + +// GetConcurrencyStats returns real-time concurrency usage aggregated by platform/group/account. +// +// Optional filters: +// - platformFilter: only include accounts in that platform (best-effort reduces DB load) +// - groupIDFilter: only include accounts that belong to that group +func (s *OpsService) GetConcurrencyStats( + ctx context.Context, + platformFilter string, + groupIDFilter *int64, +) (map[string]*PlatformConcurrencyInfo, map[int64]*GroupConcurrencyInfo, map[int64]*AccountConcurrencyInfo, *time.Time, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, nil, nil, nil, err + } + + accounts, err := s.listAllAccountsForOps(ctx, platformFilter) + if err != nil { + return nil, nil, nil, nil, err + } + + collectedAt := time.Now() + loadMap := s.getAccountsLoadMapBestEffort(ctx, accounts) + + platform := make(map[string]*PlatformConcurrencyInfo) + group := make(map[int64]*GroupConcurrencyInfo) + account := make(map[int64]*AccountConcurrencyInfo) + + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + + var matchedGroup *Group + if groupIDFilter != nil && *groupIDFilter > 0 { + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if grp.ID == *groupIDFilter { + matchedGroup = grp + break + } + } + // Group filter provided: skip accounts not in that group. + if matchedGroup == nil { + continue + } + } + + load := loadMap[acc.ID] + currentInUse := int64(0) + waiting := int64(0) + if load != nil { + currentInUse = int64(load.CurrentConcurrency) + waiting = int64(load.WaitingCount) + } + + // Account-level view picks one display group (the first group). + displayGroupID := int64(0) + displayGroupName := "" + if matchedGroup != nil { + displayGroupID = matchedGroup.ID + displayGroupName = matchedGroup.Name + } else if len(acc.Groups) > 0 && acc.Groups[0] != nil { + displayGroupID = acc.Groups[0].ID + displayGroupName = acc.Groups[0].Name + } + + if _, ok := account[acc.ID]; !ok { + info := &AccountConcurrencyInfo{ + AccountID: acc.ID, + AccountName: acc.Name, + Platform: acc.Platform, + GroupID: displayGroupID, + GroupName: displayGroupName, + CurrentInUse: currentInUse, + MaxCapacity: int64(acc.Concurrency), + WaitingInQueue: waiting, + } + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + account[acc.ID] = info + } + + // Platform aggregation. + if acc.Platform != "" { + if _, ok := platform[acc.Platform]; !ok { + platform[acc.Platform] = &PlatformConcurrencyInfo{ + Platform: acc.Platform, + } + } + p := platform[acc.Platform] + p.MaxCapacity += int64(acc.Concurrency) + p.CurrentInUse += currentInUse + p.WaitingInQueue += waiting + } + + // Group aggregation (one account may contribute to multiple groups). + if matchedGroup != nil { + grp := matchedGroup + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupConcurrencyInfo{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + if g.GroupName == "" && grp.Name != "" { + g.GroupName = grp.Name + } + if g.Platform != "" && grp.Platform != "" && g.Platform != grp.Platform { + // Groups are expected to be platform-scoped. If mismatch is observed, avoid misleading labels. + g.Platform = "" + } + g.MaxCapacity += int64(acc.Concurrency) + g.CurrentInUse += currentInUse + g.WaitingInQueue += waiting + } else { + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupConcurrencyInfo{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + if g.GroupName == "" && grp.Name != "" { + g.GroupName = grp.Name + } + if g.Platform != "" && grp.Platform != "" && g.Platform != grp.Platform { + // Groups are expected to be platform-scoped. If mismatch is observed, avoid misleading labels. + g.Platform = "" + } + g.MaxCapacity += int64(acc.Concurrency) + g.CurrentInUse += currentInUse + g.WaitingInQueue += waiting + } + } + } + + for _, info := range platform { + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + } + for _, info := range group { + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + } + + return platform, group, account, &collectedAt, nil +} diff --git a/backend/internal/service/ops_dashboard.go b/backend/internal/service/ops_dashboard.go new file mode 100644 index 00000000..31822ba8 --- /dev/null +++ b/backend/internal/service/ops_dashboard.go @@ -0,0 +1,90 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "log" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + + // Resolve query mode (requested via query param, or DB default). + filter.QueryMode = s.resolveOpsQueryMode(ctx, filter.QueryMode) + + overview, err := s.opsRepo.GetDashboardOverview(ctx, filter) + if err != nil { + if errors.Is(err, ErrOpsPreaggregatedNotPopulated) { + return nil, infraerrors.Conflict("OPS_PREAGG_NOT_READY", "Pre-aggregated ops metrics are not populated yet") + } + return nil, err + } + + // Best-effort system health + jobs; dashboard metrics should still render if these are missing. + if metrics, err := s.opsRepo.GetLatestSystemMetrics(ctx, 1); err == nil { + // Attach config-derived limits so the UI can show "current / max" for connection pools. + // These are best-effort and should never block the dashboard rendering. + if s != nil && s.cfg != nil { + if s.cfg.Database.MaxOpenConns > 0 { + metrics.DBMaxOpenConns = intPtr(s.cfg.Database.MaxOpenConns) + } + if s.cfg.Redis.PoolSize > 0 { + metrics.RedisPoolSize = intPtr(s.cfg.Redis.PoolSize) + } + } + overview.SystemMetrics = metrics + } else if err != nil && !errors.Is(err, sql.ErrNoRows) { + log.Printf("[Ops] GetLatestSystemMetrics failed: %v", err) + } + + if heartbeats, err := s.opsRepo.ListJobHeartbeats(ctx); err == nil { + overview.JobHeartbeats = heartbeats + } else { + log.Printf("[Ops] ListJobHeartbeats failed: %v", err) + } + + overview.HealthScore = computeDashboardHealthScore(time.Now().UTC(), overview) + + return overview, nil +} + +func (s *OpsService) resolveOpsQueryMode(ctx context.Context, requested OpsQueryMode) OpsQueryMode { + if requested.IsValid() { + // Allow "auto" to be disabled via config until preagg is proven stable in production. + // Forced `preagg` via query param still works. + if requested == OpsQueryModeAuto && s != nil && s.cfg != nil && !s.cfg.Ops.UsePreaggregatedTables { + return OpsQueryModeRaw + } + return requested + } + + mode := OpsQueryModeAuto + if s != nil && s.settingRepo != nil { + if raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsQueryModeDefault); err == nil { + mode = ParseOpsQueryMode(raw) + } + } + + if mode == OpsQueryModeAuto && s != nil && s.cfg != nil && !s.cfg.Ops.UsePreaggregatedTables { + return OpsQueryModeRaw + } + return mode +} diff --git a/backend/internal/service/ops_dashboard_models.go b/backend/internal/service/ops_dashboard_models.go new file mode 100644 index 00000000..f189031b --- /dev/null +++ b/backend/internal/service/ops_dashboard_models.go @@ -0,0 +1,87 @@ +package service + +import "time" + +type OpsDashboardFilter struct { + StartTime time.Time + EndTime time.Time + + Platform string + GroupID *int64 + + // QueryMode controls whether dashboard queries should use raw logs or pre-aggregated tables. + // Expected values: auto/raw/preagg (see OpsQueryMode). + QueryMode OpsQueryMode +} + +type OpsRateSummary struct { + Current float64 `json:"current"` + Peak float64 `json:"peak"` + Avg float64 `json:"avg"` +} + +type OpsPercentiles struct { + P50 *int `json:"p50_ms"` + P90 *int `json:"p90_ms"` + P95 *int `json:"p95_ms"` + P99 *int `json:"p99_ms"` + Avg *int `json:"avg_ms"` + Max *int `json:"max_ms"` +} + +type OpsDashboardOverview struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + // HealthScore is a backend-computed overall health score (0-100). + // It is derived from the monitored metrics in this overview, plus best-effort system metrics/job heartbeats. + HealthScore int `json:"health_score"` + + // Latest system-level snapshot (window=1m, global). + SystemMetrics *OpsSystemMetricsSnapshot `json:"system_metrics"` + + // Background jobs health (heartbeats). + JobHeartbeats []*OpsJobHeartbeat `json:"job_heartbeats"` + + SuccessCount int64 `json:"success_count"` + ErrorCountTotal int64 `json:"error_count_total"` + BusinessLimitedCount int64 `json:"business_limited_count"` + + ErrorCountSLA int64 `json:"error_count_sla"` + RequestCountTotal int64 `json:"request_count_total"` + RequestCountSLA int64 `json:"request_count_sla"` + + TokenConsumed int64 `json:"token_consumed"` + + SLA float64 `json:"sla"` + ErrorRate float64 `json:"error_rate"` + UpstreamErrorRate float64 `json:"upstream_error_rate"` + UpstreamErrorCountExcl429529 int64 `json:"upstream_error_count_excl_429_529"` + Upstream429Count int64 `json:"upstream_429_count"` + Upstream529Count int64 `json:"upstream_529_count"` + + QPS OpsRateSummary `json:"qps"` + TPS OpsRateSummary `json:"tps"` + + Duration OpsPercentiles `json:"duration"` + TTFT OpsPercentiles `json:"ttft"` +} + +type OpsLatencyHistogramBucket struct { + Range string `json:"range"` + Count int64 `json:"count"` +} + +// OpsLatencyHistogramResponse is a coarse latency distribution histogram (success requests only). +// It is used by the Ops dashboard to quickly identify tail latency regressions. +type OpsLatencyHistogramResponse struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + TotalRequests int64 `json:"total_requests"` + Buckets []*OpsLatencyHistogramBucket `json:"buckets"` +} diff --git a/backend/internal/service/ops_errors.go b/backend/internal/service/ops_errors.go new file mode 100644 index 00000000..76b5ce8b --- /dev/null +++ b/backend/internal/service/ops_errors.go @@ -0,0 +1,45 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetErrorTrend(ctx, filter, bucketSeconds) +} + +func (s *OpsService) GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetErrorDistribution(ctx, filter) +} diff --git a/backend/internal/service/ops_health_score.go b/backend/internal/service/ops_health_score.go new file mode 100644 index 00000000..5efae870 --- /dev/null +++ b/backend/internal/service/ops_health_score.go @@ -0,0 +1,143 @@ +package service + +import ( + "math" + "time" +) + +// computeDashboardHealthScore computes a 0-100 health score from the metrics returned by the dashboard overview. +// +// Design goals: +// - Backend-owned scoring (UI only displays). +// - Layered scoring: Business Health (70%) + Infrastructure Health (30%) +// - Avoids double-counting (e.g., DB failure affects both infra and business metrics) +// - Conservative + stable: penalize clear degradations; avoid overreacting to missing/idle data. +func computeDashboardHealthScore(now time.Time, overview *OpsDashboardOverview) int { + if overview == nil { + return 0 + } + + // Idle/no-data: avoid showing a "bad" score when there is no traffic. + // UI can still render a gray/idle state based on QPS + error rate. + if overview.RequestCountSLA <= 0 && overview.RequestCountTotal <= 0 && overview.ErrorCountTotal <= 0 { + return 100 + } + + businessHealth := computeBusinessHealth(overview) + infraHealth := computeInfraHealth(now, overview) + + // Weighted combination: 70% business + 30% infrastructure + score := businessHealth*0.7 + infraHealth*0.3 + return int(math.Round(clampFloat64(score, 0, 100))) +} + +// computeBusinessHealth calculates business health score (0-100) +// Components: Error Rate (50%) + TTFT (50%) +func computeBusinessHealth(overview *OpsDashboardOverview) float64 { + // Error rate score: 1% → 100, 10% → 0 (linear) + // Combines request errors and upstream errors + errorScore := 100.0 + errorPct := clampFloat64(overview.ErrorRate*100, 0, 100) + upstreamPct := clampFloat64(overview.UpstreamErrorRate*100, 0, 100) + combinedErrorPct := math.Max(errorPct, upstreamPct) // Use worst case + if combinedErrorPct > 1.0 { + if combinedErrorPct <= 10.0 { + errorScore = (10.0 - combinedErrorPct) / 9.0 * 100 + } else { + errorScore = 0 + } + } + + // TTFT score: 1s → 100, 3s → 0 (linear) + // Time to first token is critical for user experience + ttftScore := 100.0 + if overview.TTFT.P99 != nil { + p99 := float64(*overview.TTFT.P99) + if p99 > 1000 { + if p99 <= 3000 { + ttftScore = (3000 - p99) / 2000 * 100 + } else { + ttftScore = 0 + } + } + } + + // Weighted combination: 50% error rate + 50% TTFT + return errorScore*0.5 + ttftScore*0.5 +} + +// computeInfraHealth calculates infrastructure health score (0-100) +// Components: Storage (40%) + Compute Resources (30%) + Background Jobs (30%) +func computeInfraHealth(now time.Time, overview *OpsDashboardOverview) float64 { + // Storage score: DB critical, Redis less critical + storageScore := 100.0 + if overview.SystemMetrics != nil { + if overview.SystemMetrics.DBOK != nil && !*overview.SystemMetrics.DBOK { + storageScore = 0 // DB failure is critical + } else if overview.SystemMetrics.RedisOK != nil && !*overview.SystemMetrics.RedisOK { + storageScore = 50 // Redis failure is degraded but not critical + } + } + + // Compute resources score: CPU + Memory + computeScore := 100.0 + if overview.SystemMetrics != nil { + cpuScore := 100.0 + if overview.SystemMetrics.CPUUsagePercent != nil { + cpuPct := clampFloat64(*overview.SystemMetrics.CPUUsagePercent, 0, 100) + if cpuPct > 80 { + if cpuPct <= 100 { + cpuScore = (100 - cpuPct) / 20 * 100 + } else { + cpuScore = 0 + } + } + } + + memScore := 100.0 + if overview.SystemMetrics.MemoryUsagePercent != nil { + memPct := clampFloat64(*overview.SystemMetrics.MemoryUsagePercent, 0, 100) + if memPct > 85 { + if memPct <= 100 { + memScore = (100 - memPct) / 15 * 100 + } else { + memScore = 0 + } + } + } + + computeScore = (cpuScore + memScore) / 2 + } + + // Background jobs score + jobScore := 100.0 + failedJobs := 0 + totalJobs := 0 + for _, hb := range overview.JobHeartbeats { + if hb == nil { + continue + } + totalJobs++ + if hb.LastErrorAt != nil && (hb.LastSuccessAt == nil || hb.LastErrorAt.After(*hb.LastSuccessAt)) { + failedJobs++ + } else if hb.LastSuccessAt != nil && now.Sub(*hb.LastSuccessAt) > 15*time.Minute { + failedJobs++ + } + } + if totalJobs > 0 && failedJobs > 0 { + jobScore = (1 - float64(failedJobs)/float64(totalJobs)) * 100 + } + + // Weighted combination + return storageScore*0.4 + computeScore*0.3 + jobScore*0.3 +} + +func clampFloat64(v float64, min float64, max float64) float64 { + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/backend/internal/service/ops_health_score_test.go b/backend/internal/service/ops_health_score_test.go new file mode 100644 index 00000000..25bfb43d --- /dev/null +++ b/backend/internal/service/ops_health_score_test.go @@ -0,0 +1,442 @@ +//go:build unit + +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestComputeDashboardHealthScore_IdleReturns100(t *testing.T) { + t.Parallel() + + score := computeDashboardHealthScore(time.Now().UTC(), &OpsDashboardOverview{}) + require.Equal(t, 100, score) +} + +func TestComputeDashboardHealthScore_DegradesOnBadSignals(t *testing.T) { + t.Parallel() + + ov := &OpsDashboardOverview{ + RequestCountTotal: 100, + RequestCountSLA: 100, + SuccessCount: 90, + ErrorCountTotal: 10, + ErrorCountSLA: 10, + + SLA: 0.90, + ErrorRate: 0.10, + UpstreamErrorRate: 0.08, + + Duration: OpsPercentiles{P99: intPtr(20_000)}, + TTFT: OpsPercentiles{P99: intPtr(2_000)}, + + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(98.0), + MemoryUsagePercent: float64Ptr(97.0), + DBConnWaiting: intPtr(3), + ConcurrencyQueueDepth: intPtr(10), + }, + JobHeartbeats: []*OpsJobHeartbeat{ + { + JobName: "job-a", + LastErrorAt: timePtr(time.Now().UTC().Add(-1 * time.Minute)), + LastError: stringPtr("boom"), + }, + }, + } + + score := computeDashboardHealthScore(time.Now().UTC(), ov) + require.Less(t, score, 80) + require.GreaterOrEqual(t, score, 0) +} + +func TestComputeDashboardHealthScore_Comprehensive(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin int + wantMax int + }{ + { + name: "nil overview returns 0", + overview: nil, + wantMin: 0, + wantMax: 0, + }, + { + name: "perfect health", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 1.0, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + TTFT: OpsPercentiles{P99: intPtr(100)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "good health - SLA 99.8%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.998, + ErrorRate: 0.003, + UpstreamErrorRate: 0.001, + Duration: OpsPercentiles{P99: intPtr(800)}, + TTFT: OpsPercentiles{P99: intPtr(200)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(50), + MemoryUsagePercent: float64Ptr(60), + }, + }, + wantMin: 95, + wantMax: 100, + }, + { + name: "medium health - SLA 96%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.96, + ErrorRate: 0.02, + UpstreamErrorRate: 0.01, + Duration: OpsPercentiles{P99: intPtr(3000)}, + TTFT: OpsPercentiles{P99: intPtr(600)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(70), + MemoryUsagePercent: float64Ptr(75), + }, + }, + wantMin: 96, + wantMax: 97, + }, + { + name: "DB failure", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 70, + wantMax: 90, + }, + { + name: "Redis failure", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 95, + }, + { + name: "high CPU usage", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(95), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 100, + }, + { + name: "combined failures - business degraded + infra healthy", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.90, + ErrorRate: 0.05, + UpstreamErrorRate: 0.02, + Duration: OpsPercentiles{P99: intPtr(10000)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(20), + MemoryUsagePercent: float64Ptr(30), + }, + }, + wantMin: 84, + wantMax: 85, + }, + { + name: "combined failures - business healthy + infra degraded", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.998, + ErrorRate: 0.001, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(600)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(95), + MemoryUsagePercent: float64Ptr(95), + }, + }, + wantMin: 70, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeDashboardHealthScore(time.Now().UTC(), tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %d", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %d", tt.wantMax) + require.GreaterOrEqual(t, score, 0, "score must be >= 0") + require.LessOrEqual(t, score, 100, "score must be <= 100") + }) + } +} + +func TestComputeBusinessHealth(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin float64 + wantMax float64 + }{ + { + name: "perfect metrics", + overview: &OpsDashboardOverview{ + SLA: 1.0, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "SLA boundary 99.5%", + overview: &OpsDashboardOverview{ + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "SLA boundary 95%", + overview: &OpsDashboardOverview{ + SLA: 0.95, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "error rate boundary 1%", + overview: &OpsDashboardOverview{ + SLA: 0.99, + ErrorRate: 0.01, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "error rate 5%", + overview: &OpsDashboardOverview{ + SLA: 0.95, + ErrorRate: 0.05, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 77, + wantMax: 78, + }, + { + name: "TTFT boundary 2s", + overview: &OpsDashboardOverview{ + SLA: 0.99, + ErrorRate: 0, + UpstreamErrorRate: 0, + TTFT: OpsPercentiles{P99: intPtr(2000)}, + }, + wantMin: 75, + wantMax: 75, + }, + { + name: "upstream error dominates", + overview: &OpsDashboardOverview{ + SLA: 0.995, + ErrorRate: 0.001, + UpstreamErrorRate: 0.03, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 88, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeBusinessHealth(tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %.1f", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %.1f", tt.wantMax) + require.GreaterOrEqual(t, score, 0.0, "score must be >= 0") + require.LessOrEqual(t, score, 100.0, "score must be <= 100") + }) + } +} + +func TestComputeInfraHealth(t *testing.T) { + t.Parallel() + + now := time.Now().UTC() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin float64 + wantMax float64 + }{ + { + name: "all infrastructure healthy", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "DB down", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 50, + wantMax: 70, + }, + { + name: "Redis down", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 80, + wantMax: 95, + }, + { + name: "CPU at 90%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(90), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 95, + }, + { + name: "failed background job", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + JobHeartbeats: []*OpsJobHeartbeat{ + { + JobName: "test-job", + LastErrorAt: &now, + }, + }, + }, + wantMin: 70, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeInfraHealth(now, tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %.1f", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %.1f", tt.wantMax) + require.GreaterOrEqual(t, score, 0.0, "score must be >= 0") + require.LessOrEqual(t, score, 100.0, "score must be <= 100") + }) + } +} + +func timePtr(v time.Time) *time.Time { return &v } + +func stringPtr(v string) *string { return &v } diff --git a/backend/internal/service/ops_histograms.go b/backend/internal/service/ops_histograms.go new file mode 100644 index 00000000..9f5b514f --- /dev/null +++ b/backend/internal/service/ops_histograms.go @@ -0,0 +1,26 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetLatencyHistogram(ctx, filter) +} diff --git a/backend/internal/service/ops_metrics_collector.go b/backend/internal/service/ops_metrics_collector.go new file mode 100644 index 00000000..edf32cf2 --- /dev/null +++ b/backend/internal/service/ops_metrics_collector.go @@ -0,0 +1,920 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "math" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/mem" +) + +const ( + opsMetricsCollectorJobName = "ops_metrics_collector" + opsMetricsCollectorMinInterval = 60 * time.Second + opsMetricsCollectorMaxInterval = 1 * time.Hour + + opsMetricsCollectorTimeout = 10 * time.Second + + opsMetricsCollectorLeaderLockKey = "ops:metrics:collector:leader" + opsMetricsCollectorLeaderLockTTL = 90 * time.Second + + opsMetricsCollectorHeartbeatTimeout = 2 * time.Second + + bytesPerMB = 1024 * 1024 +) + +var opsMetricsCollectorAdvisoryLockID = hashAdvisoryLockID(opsMetricsCollectorLeaderLockKey) + +type OpsMetricsCollector struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + accountRepo AccountRepository + concurrencyService *ConcurrencyService + + db *sql.DB + redisClient *redis.Client + instanceID string + + lastCgroupCPUUsageNanos uint64 + lastCgroupCPUSampleAt time.Time + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + + skipLogMu sync.Mutex + skipLogAt time.Time +} + +func NewOpsMetricsCollector( + opsRepo OpsRepository, + settingRepo SettingRepository, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsMetricsCollector { + return &OpsMetricsCollector{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + accountRepo: accountRepo, + concurrencyService: concurrencyService, + db: db, + redisClient: redisClient, + instanceID: uuid.NewString(), + } +} + +func (c *OpsMetricsCollector) Start() { + if c == nil { + return + } + c.startOnce.Do(func() { + if c.stopCh == nil { + c.stopCh = make(chan struct{}) + } + go c.run() + }) +} + +func (c *OpsMetricsCollector) Stop() { + if c == nil { + return + } + c.stopOnce.Do(func() { + if c.stopCh != nil { + close(c.stopCh) + } + }) +} + +func (c *OpsMetricsCollector) run() { + // First run immediately so the dashboard has data soon after startup. + c.collectOnce() + + for { + interval := c.getInterval() + timer := time.NewTimer(interval) + select { + case <-timer.C: + c.collectOnce() + case <-c.stopCh: + timer.Stop() + return + } + } +} + +func (c *OpsMetricsCollector) getInterval() time.Duration { + interval := opsMetricsCollectorMinInterval + + if c.settingRepo == nil { + return interval + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + raw, err := c.settingRepo.GetValue(ctx, SettingKeyOpsMetricsIntervalSeconds) + if err != nil { + return interval + } + raw = strings.TrimSpace(raw) + if raw == "" { + return interval + } + + seconds, err := strconv.Atoi(raw) + if err != nil { + return interval + } + if seconds < int(opsMetricsCollectorMinInterval.Seconds()) { + seconds = int(opsMetricsCollectorMinInterval.Seconds()) + } + if seconds > int(opsMetricsCollectorMaxInterval.Seconds()) { + seconds = int(opsMetricsCollectorMaxInterval.Seconds()) + } + return time.Duration(seconds) * time.Second +} + +func (c *OpsMetricsCollector) collectOnce() { + if c == nil { + return + } + if c.cfg != nil && !c.cfg.Ops.Enabled { + return + } + if c.opsRepo == nil { + return + } + if c.db == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), opsMetricsCollectorTimeout) + defer cancel() + + if !c.isMonitoringEnabled(ctx) { + return + } + + release, ok := c.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + err := c.collectAndPersist(ctx) + finishedAt := time.Now().UTC() + + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + runAt := startedAt + + if err != nil { + msg := truncateString(err.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), opsMetricsCollectorHeartbeatTimeout) + defer hbCancel() + _ = c.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsMetricsCollectorJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + log.Printf("[OpsMetricsCollector] collect failed: %v", err) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), opsMetricsCollectorHeartbeatTimeout) + defer hbCancel() + _ = c.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsMetricsCollectorJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + }) +} + +func (c *OpsMetricsCollector) isMonitoringEnabled(ctx context.Context) bool { + if c == nil { + return false + } + if c.cfg != nil && !c.cfg.Ops.Enabled { + return false + } + if c.settingRepo == nil { + return true + } + if ctx == nil { + ctx = context.Background() + } + + value, err := c.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return true + } + // Fail-open: collector should not become a hard dependency. + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +func (c *OpsMetricsCollector) collectAndPersist(ctx context.Context) error { + if ctx == nil { + ctx = context.Background() + } + + // Align to stable minute boundaries to avoid partial buckets and to maximize cache hits. + now := time.Now().UTC() + windowEnd := now.Truncate(time.Minute) + windowStart := windowEnd.Add(-1 * time.Minute) + + sys, err := c.collectSystemStats(ctx) + if err != nil { + // Continue; system stats are best-effort. + log.Printf("[OpsMetricsCollector] system stats error: %v", err) + } + + dbOK := c.checkDB(ctx) + redisOK := c.checkRedis(ctx) + active, idle := c.dbPoolStats() + redisTotal, redisIdle, redisStatsOK := c.redisPoolStats() + + successCount, tokenConsumed, err := c.queryUsageCounts(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query usage counts: %w", err) + } + + duration, ttft, err := c.queryUsageLatency(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query usage latency: %w", err) + } + + errorTotal, businessLimited, errorSLA, upstreamExcl, upstream429, upstream529, err := c.queryErrorCounts(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query error counts: %w", err) + } + + windowSeconds := windowEnd.Sub(windowStart).Seconds() + if windowSeconds <= 0 { + windowSeconds = 60 + } + requestTotal := successCount + errorTotal + qps := float64(requestTotal) / windowSeconds + tps := float64(tokenConsumed) / windowSeconds + + goroutines := runtime.NumGoroutine() + concurrencyQueueDepth := c.collectConcurrencyQueueDepth(ctx) + + input := &OpsInsertSystemMetricsInput{ + CreatedAt: windowEnd, + WindowMinutes: 1, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorSLA, + + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + TokenConsumed: tokenConsumed, + QPS: float64Ptr(roundTo1DP(qps)), + TPS: float64Ptr(roundTo1DP(tps)), + + DurationP50Ms: duration.p50, + DurationP90Ms: duration.p90, + DurationP95Ms: duration.p95, + DurationP99Ms: duration.p99, + DurationAvgMs: duration.avg, + DurationMaxMs: duration.max, + + TTFTP50Ms: ttft.p50, + TTFTP90Ms: ttft.p90, + TTFTP95Ms: ttft.p95, + TTFTP99Ms: ttft.p99, + TTFTAvgMs: ttft.avg, + TTFTMaxMs: ttft.max, + + CPUUsagePercent: sys.cpuUsagePercent, + MemoryUsedMB: sys.memoryUsedMB, + MemoryTotalMB: sys.memoryTotalMB, + MemoryUsagePercent: sys.memoryUsagePercent, + + DBOK: boolPtr(dbOK), + RedisOK: boolPtr(redisOK), + + RedisConnTotal: func() *int { + if !redisStatsOK { + return nil + } + return intPtr(redisTotal) + }(), + RedisConnIdle: func() *int { + if !redisStatsOK { + return nil + } + return intPtr(redisIdle) + }(), + + DBConnActive: intPtr(active), + DBConnIdle: intPtr(idle), + GoroutineCount: intPtr(goroutines), + ConcurrencyQueueDepth: concurrencyQueueDepth, + } + + return c.opsRepo.InsertSystemMetrics(ctx, input) +} + +func (c *OpsMetricsCollector) collectConcurrencyQueueDepth(parentCtx context.Context) *int { + if c == nil || c.accountRepo == nil || c.concurrencyService == nil { + return nil + } + if parentCtx == nil { + parentCtx = context.Background() + } + + // Best-effort: never let concurrency sampling break the metrics collector. + ctx, cancel := context.WithTimeout(parentCtx, 2*time.Second) + defer cancel() + + accounts, err := c.accountRepo.ListSchedulable(ctx) + if err != nil { + return nil + } + if len(accounts) == 0 { + zero := 0 + return &zero + } + + batch := make([]AccountWithConcurrency, 0, len(accounts)) + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + maxConc := acc.Concurrency + if maxConc < 0 { + maxConc = 0 + } + batch = append(batch, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: maxConc, + }) + } + if len(batch) == 0 { + zero := 0 + return &zero + } + + loadMap, err := c.concurrencyService.GetAccountsLoadBatch(ctx, batch) + if err != nil { + return nil + } + + var total int64 + for _, info := range loadMap { + if info == nil || info.WaitingCount <= 0 { + continue + } + total += int64(info.WaitingCount) + } + if total < 0 { + total = 0 + } + + maxInt := int64(^uint(0) >> 1) + if total > maxInt { + total = maxInt + } + v := int(total) + return &v +} + +type opsCollectedPercentiles struct { + p50 *int + p90 *int + p95 *int + p99 *int + avg *float64 + max *int +} + +func (c *OpsMetricsCollector) queryUsageCounts(ctx context.Context, start, end time.Time) (successCount int64, tokenConsumed int64, err error) { + q := ` +SELECT + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2` + + var tokens sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&successCount, &tokens); err != nil { + return 0, 0, err + } + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + return successCount, tokenConsumed, nil +} + +func (c *OpsMetricsCollector) queryUsageLatency(ctx context.Context, start, end time.Time) (duration opsCollectedPercentiles, ttft opsCollectedPercentiles, err error) { + { + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) AS p99, + AVG(duration_ms) AS avg_ms, + MAX(duration_ms) AS max_ms +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2 + AND duration_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return opsCollectedPercentiles{}, opsCollectedPercentiles{}, err + } + duration.p50 = floatToIntPtr(p50) + duration.p90 = floatToIntPtr(p90) + duration.p95 = floatToIntPtr(p95) + duration.p99 = floatToIntPtr(p99) + if avg.Valid { + v := roundTo1DP(avg.Float64) + duration.avg = &v + } + if max.Valid { + v := int(max.Int64) + duration.max = &v + } + } + + { + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) AS p99, + AVG(first_token_ms) AS avg_ms, + MAX(first_token_ms) AS max_ms +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2 + AND first_token_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return opsCollectedPercentiles{}, opsCollectedPercentiles{}, err + } + ttft.p50 = floatToIntPtr(p50) + ttft.p90 = floatToIntPtr(p90) + ttft.p95 = floatToIntPtr(p95) + ttft.p99 = floatToIntPtr(p99) + if avg.Valid { + v := roundTo1DP(avg.Float64) + ttft.avg = &v + } + if max.Valid { + v := int(max.Int64) + ttft.max = &v + } + } + + return duration, ttft, nil +} + +func (c *OpsMetricsCollector) queryErrorCounts(ctx context.Context, start, end time.Time) ( + errorTotal int64, + businessLimited int64, + errorSLA int64, + upstreamExcl429529 int64, + upstream429 int64, + upstream529 int64, + err error, +) { + q := ` +SELECT + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400), 0) AS error_total, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited), 0) AS business_limited, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited), 0) AS error_sla, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)), 0) AS upstream_excl, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429), 0) AS upstream_429, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529), 0) AS upstream_529 +FROM ops_error_logs +WHERE created_at >= $1 AND created_at < $2` + + if err := c.db.QueryRowContext(ctx, q, start, end).Scan( + &errorTotal, + &businessLimited, + &errorSLA, + &upstreamExcl429529, + &upstream429, + &upstream529, + ); err != nil { + return 0, 0, 0, 0, 0, 0, err + } + return errorTotal, businessLimited, errorSLA, upstreamExcl429529, upstream429, upstream529, nil +} + +type opsCollectedSystemStats struct { + cpuUsagePercent *float64 + memoryUsedMB *int64 + memoryTotalMB *int64 + memoryUsagePercent *float64 +} + +func (c *OpsMetricsCollector) collectSystemStats(ctx context.Context) (*opsCollectedSystemStats, error) { + out := &opsCollectedSystemStats{} + if ctx == nil { + ctx = context.Background() + } + + sampleAt := time.Now().UTC() + + // Prefer cgroup (container) metrics when available. + if cpuPct := c.tryCgroupCPUPercent(sampleAt); cpuPct != nil { + out.cpuUsagePercent = cpuPct + } + + cgroupUsed, cgroupTotal, cgroupOK := readCgroupMemoryBytes() + if cgroupOK { + usedMB := int64(cgroupUsed / bytesPerMB) + out.memoryUsedMB = &usedMB + if cgroupTotal > 0 { + totalMB := int64(cgroupTotal / bytesPerMB) + out.memoryTotalMB = &totalMB + pct := roundTo1DP(float64(cgroupUsed) / float64(cgroupTotal) * 100) + out.memoryUsagePercent = &pct + } + } + + // Fallback to host metrics if cgroup metrics are unavailable (or incomplete). + if out.cpuUsagePercent == nil { + if cpuPercents, err := cpu.PercentWithContext(ctx, 0, false); err == nil && len(cpuPercents) > 0 { + v := roundTo1DP(cpuPercents[0]) + out.cpuUsagePercent = &v + } + } + + // If total memory isn't available from cgroup (e.g. memory.max = "max"), fill total from host. + if out.memoryUsedMB == nil || out.memoryTotalMB == nil || out.memoryUsagePercent == nil { + if vm, err := mem.VirtualMemoryWithContext(ctx); err == nil && vm != nil { + if out.memoryUsedMB == nil { + usedMB := int64(vm.Used / bytesPerMB) + out.memoryUsedMB = &usedMB + } + if out.memoryTotalMB == nil { + totalMB := int64(vm.Total / bytesPerMB) + out.memoryTotalMB = &totalMB + } + if out.memoryUsagePercent == nil { + if out.memoryUsedMB != nil && out.memoryTotalMB != nil && *out.memoryTotalMB > 0 { + pct := roundTo1DP(float64(*out.memoryUsedMB) / float64(*out.memoryTotalMB) * 100) + out.memoryUsagePercent = &pct + } else { + pct := roundTo1DP(vm.UsedPercent) + out.memoryUsagePercent = &pct + } + } + } + } + + return out, nil +} + +func (c *OpsMetricsCollector) tryCgroupCPUPercent(now time.Time) *float64 { + usageNanos, ok := readCgroupCPUUsageNanos() + if !ok { + return nil + } + + // Initialize baseline sample. + if c.lastCgroupCPUSampleAt.IsZero() { + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + return nil + } + + elapsed := now.Sub(c.lastCgroupCPUSampleAt) + if elapsed <= 0 { + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + return nil + } + + prev := c.lastCgroupCPUUsageNanos + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + + if usageNanos < prev { + // Counter reset (container restarted). + return nil + } + + deltaUsageSec := float64(usageNanos-prev) / 1e9 + elapsedSec := elapsed.Seconds() + if elapsedSec <= 0 { + return nil + } + + cores := readCgroupCPULimitCores() + if cores <= 0 { + // Can't reliably normalize; skip and fall back to gopsutil. + return nil + } + + pct := (deltaUsageSec / (elapsedSec * cores)) * 100 + if pct < 0 { + pct = 0 + } + // Clamp to avoid noise/jitter showing impossible values. + if pct > 100 { + pct = 100 + } + v := roundTo1DP(pct) + return &v +} + +func readCgroupMemoryBytes() (usedBytes uint64, totalBytes uint64, ok bool) { + // cgroup v2 (most common in modern containers) + if used, ok1 := readUintFile("/sys/fs/cgroup/memory.current"); ok1 { + usedBytes = used + rawMax, err := os.ReadFile("/sys/fs/cgroup/memory.max") + if err == nil { + s := strings.TrimSpace(string(rawMax)) + if s != "" && s != "max" { + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + totalBytes = v + } + } + } + return usedBytes, totalBytes, true + } + + // cgroup v1 fallback + if used, ok1 := readUintFile("/sys/fs/cgroup/memory/memory.usage_in_bytes"); ok1 { + usedBytes = used + if limit, ok2 := readUintFile("/sys/fs/cgroup/memory/memory.limit_in_bytes"); ok2 { + // Some environments report a very large number when unlimited. + if limit > 0 && limit < (1<<60) { + totalBytes = limit + } + } + return usedBytes, totalBytes, true + } + + return 0, 0, false +} + +func readCgroupCPUUsageNanos() (usageNanos uint64, ok bool) { + // cgroup v2: cpu.stat has usage_usec + if raw, err := os.ReadFile("/sys/fs/cgroup/cpu.stat"); err == nil { + lines := strings.Split(string(raw), "\n") + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) != 2 { + continue + } + if fields[0] != "usage_usec" { + continue + } + v, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + return v * 1000, true + } + } + + // cgroup v1: cpuacct.usage is in nanoseconds + if v, ok := readUintFile("/sys/fs/cgroup/cpuacct/cpuacct.usage"); ok { + return v, true + } + + return 0, false +} + +func readCgroupCPULimitCores() float64 { + // cgroup v2: cpu.max => " " or "max " + if raw, err := os.ReadFile("/sys/fs/cgroup/cpu.max"); err == nil { + fields := strings.Fields(string(raw)) + if len(fields) >= 2 && fields[0] != "max" { + quota, err1 := strconv.ParseFloat(fields[0], 64) + period, err2 := strconv.ParseFloat(fields[1], 64) + if err1 == nil && err2 == nil && quota > 0 && period > 0 { + return quota / period + } + } + } + + // cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + quota, okQuota := readIntFile("/sys/fs/cgroup/cpu/cpu.cfs_quota_us") + period, okPeriod := readIntFile("/sys/fs/cgroup/cpu/cpu.cfs_period_us") + if okQuota && okPeriod && quota > 0 && period > 0 { + return float64(quota) / float64(period) + } + + return 0 +} + +func readUintFile(path string) (uint64, bool) { + raw, err := os.ReadFile(path) + if err != nil { + return 0, false + } + s := strings.TrimSpace(string(raw)) + if s == "" { + return 0, false + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +func readIntFile(path string) (int64, bool) { + raw, err := os.ReadFile(path) + if err != nil { + return 0, false + } + s := strings.TrimSpace(string(raw)) + if s == "" { + return 0, false + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +func (c *OpsMetricsCollector) checkDB(ctx context.Context) bool { + if c == nil || c.db == nil { + return false + } + if ctx == nil { + ctx = context.Background() + } + var one int + if err := c.db.QueryRowContext(ctx, "SELECT 1").Scan(&one); err != nil { + return false + } + return one == 1 +} + +func (c *OpsMetricsCollector) checkRedis(ctx context.Context) bool { + if c == nil || c.redisClient == nil { + return false + } + if ctx == nil { + ctx = context.Background() + } + return c.redisClient.Ping(ctx).Err() == nil +} + +func (c *OpsMetricsCollector) redisPoolStats() (total int, idle int, ok bool) { + if c == nil || c.redisClient == nil { + return 0, 0, false + } + stats := c.redisClient.PoolStats() + if stats == nil { + return 0, 0, false + } + return int(stats.TotalConns), int(stats.IdleConns), true +} + +func (c *OpsMetricsCollector) dbPoolStats() (active int, idle int) { + if c == nil || c.db == nil { + return 0, 0 + } + stats := c.db.Stats() + return stats.InUse, stats.Idle +} + +var opsMetricsCollectorReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +func (c *OpsMetricsCollector) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if c == nil || c.redisClient == nil { + return nil, true + } + if ctx == nil { + ctx = context.Background() + } + + ok, err := c.redisClient.SetNX(ctx, opsMetricsCollectorLeaderLockKey, c.instanceID, opsMetricsCollectorLeaderLockTTL).Result() + if err != nil { + // Prefer fail-closed to avoid stampeding the database when Redis is flaky. + // Fallback to a DB advisory lock when Redis is present but unavailable. + release, ok := tryAcquireDBAdvisoryLock(ctx, c.db, opsMetricsCollectorAdvisoryLockID) + if !ok { + c.maybeLogSkip() + return nil, false + } + return release, true + } + if !ok { + c.maybeLogSkip() + return nil, false + } + + release := func() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = opsMetricsCollectorReleaseScript.Run(ctx, c.redisClient, []string{opsMetricsCollectorLeaderLockKey}, c.instanceID).Result() + } + return release, true +} + +func (c *OpsMetricsCollector) maybeLogSkip() { + c.skipLogMu.Lock() + defer c.skipLogMu.Unlock() + + now := time.Now() + if !c.skipLogAt.IsZero() && now.Sub(c.skipLogAt) < time.Minute { + return + } + c.skipLogAt = now + log.Printf("[OpsMetricsCollector] leader lock held by another instance; skipping") +} + +func floatToIntPtr(v sql.NullFloat64) *int { + if !v.Valid { + return nil + } + n := int(math.Round(v.Float64)) + return &n +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func truncateString(s string, max int) string { + if max <= 0 { + return "" + } + if len(s) <= max { + return s + } + cut := s[:max] + for len(cut) > 0 && !utf8.ValidString(cut) { + cut = cut[:len(cut)-1] + } + return cut +} + +func boolPtr(v bool) *bool { + out := v + return &out +} + +func intPtr(v int) *int { + out := v + return &out +} + +func float64Ptr(v float64) *float64 { + out := v + return &out +} diff --git a/backend/internal/service/ops_models.go b/backend/internal/service/ops_models.go new file mode 100644 index 00000000..347cd52b --- /dev/null +++ b/backend/internal/service/ops_models.go @@ -0,0 +1,169 @@ +package service + +import "time" + +type OpsErrorLog struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + + // Standardized classification + // - phase: request|auth|routing|upstream|network|internal + // - owner: client|provider|platform + // - source: client_request|upstream_http|gateway + Phase string `json:"phase"` + Type string `json:"type"` + + Owner string `json:"error_owner"` + Source string `json:"error_source"` + + Severity string `json:"severity"` + + StatusCode int `json:"status_code"` + Platform string `json:"platform"` + Model string `json:"model"` + + IsRetryable bool `json:"is_retryable"` + RetryCount int `json:"retry_count"` + + Resolved bool `json:"resolved"` + ResolvedAt *time.Time `json:"resolved_at"` + ResolvedByUserID *int64 `json:"resolved_by_user_id"` + ResolvedByUserName string `json:"resolved_by_user_name"` + ResolvedRetryID *int64 `json:"resolved_retry_id"` + ResolvedStatusRaw string `json:"-"` + + ClientRequestID string `json:"client_request_id"` + RequestID string `json:"request_id"` + Message string `json:"message"` + + UserID *int64 `json:"user_id"` + UserEmail string `json:"user_email"` + APIKeyID *int64 `json:"api_key_id"` + AccountID *int64 `json:"account_id"` + AccountName string `json:"account_name"` + GroupID *int64 `json:"group_id"` + GroupName string `json:"group_name"` + + ClientIP *string `json:"client_ip"` + RequestPath string `json:"request_path"` + Stream bool `json:"stream"` +} + +type OpsErrorLogDetail struct { + OpsErrorLog + + ErrorBody string `json:"error_body"` + UserAgent string `json:"user_agent"` + + // Upstream context (optional) + UpstreamStatusCode *int `json:"upstream_status_code,omitempty"` + UpstreamErrorMessage string `json:"upstream_error_message,omitempty"` + UpstreamErrorDetail string `json:"upstream_error_detail,omitempty"` + UpstreamErrors string `json:"upstream_errors,omitempty"` // JSON array (string) for display/parsing + + // Timings (optional) + AuthLatencyMs *int64 `json:"auth_latency_ms"` + RoutingLatencyMs *int64 `json:"routing_latency_ms"` + UpstreamLatencyMs *int64 `json:"upstream_latency_ms"` + ResponseLatencyMs *int64 `json:"response_latency_ms"` + TimeToFirstTokenMs *int64 `json:"time_to_first_token_ms"` + + // Retry context + RequestBody string `json:"request_body"` + RequestBodyTruncated bool `json:"request_body_truncated"` + RequestBodyBytes *int `json:"request_body_bytes"` + RequestHeaders string `json:"request_headers,omitempty"` + + // vNext metric semantics + IsBusinessLimited bool `json:"is_business_limited"` +} + +type OpsErrorLogFilter struct { + StartTime *time.Time + EndTime *time.Time + + Platform string + GroupID *int64 + AccountID *int64 + + StatusCodes []int + StatusCodesOther bool + Phase string + Owner string + Source string + Resolved *bool + Query string + UserQuery string // Search by user email + + // Optional correlation keys for exact matching. + RequestID string + ClientRequestID string + + // View controls error categorization for list endpoints. + // - errors: show actionable errors (exclude business-limited / 429 / 529) + // - excluded: only show excluded errors + // - all: show everything + View string + + Page int + PageSize int +} + +type OpsErrorLogList struct { + Errors []*OpsErrorLog `json:"errors"` + Total int `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +type OpsRetryAttempt struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + + RequestedByUserID int64 `json:"requested_by_user_id"` + SourceErrorID int64 `json:"source_error_id"` + Mode string `json:"mode"` + PinnedAccountID *int64 `json:"pinned_account_id"` + PinnedAccountName string `json:"pinned_account_name"` + + Status string `json:"status"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + DurationMs *int64 `json:"duration_ms"` + + // Persisted execution results (best-effort) + Success *bool `json:"success"` + HTTPStatusCode *int `json:"http_status_code"` + UpstreamRequestID *string `json:"upstream_request_id"` + UsedAccountID *int64 `json:"used_account_id"` + UsedAccountName string `json:"used_account_name"` + ResponsePreview *string `json:"response_preview"` + ResponseTruncated *bool `json:"response_truncated"` + + // Optional correlation + ResultRequestID *string `json:"result_request_id"` + ResultErrorID *int64 `json:"result_error_id"` + + ErrorMessage *string `json:"error_message"` +} + +type OpsRetryResult struct { + AttemptID int64 `json:"attempt_id"` + Mode string `json:"mode"` + Status string `json:"status"` + + PinnedAccountID *int64 `json:"pinned_account_id"` + UsedAccountID *int64 `json:"used_account_id"` + + HTTPStatusCode int `json:"http_status_code"` + UpstreamRequestID string `json:"upstream_request_id"` + + ResponsePreview string `json:"response_preview"` + ResponseTruncated bool `json:"response_truncated"` + + ErrorMessage string `json:"error_message"` + + StartedAt time.Time `json:"started_at"` + FinishedAt time.Time `json:"finished_at"` + DurationMs int64 `json:"duration_ms"` +} diff --git a/backend/internal/service/ops_port.go b/backend/internal/service/ops_port.go new file mode 100644 index 00000000..cdeea241 --- /dev/null +++ b/backend/internal/service/ops_port.go @@ -0,0 +1,259 @@ +package service + +import ( + "context" + "time" +) + +type OpsRepository interface { + InsertErrorLog(ctx context.Context, input *OpsInsertErrorLogInput) (int64, error) + ListErrorLogs(ctx context.Context, filter *OpsErrorLogFilter) (*OpsErrorLogList, error) + GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLogDetail, error) + ListRequestDetails(ctx context.Context, filter *OpsRequestDetailFilter) ([]*OpsRequestDetail, int64, error) + + InsertRetryAttempt(ctx context.Context, input *OpsInsertRetryAttemptInput) (int64, error) + UpdateRetryAttempt(ctx context.Context, input *OpsUpdateRetryAttemptInput) error + GetLatestRetryAttemptForError(ctx context.Context, sourceErrorID int64) (*OpsRetryAttempt, error) + ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*OpsRetryAttempt, error) + UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error + + // Lightweight window stats (for realtime WS / quick sampling). + GetWindowStats(ctx context.Context, filter *OpsDashboardFilter) (*OpsWindowStats, error) + // Lightweight realtime traffic summary (for the Ops dashboard header card). + GetRealtimeTrafficSummary(ctx context.Context, filter *OpsDashboardFilter) (*OpsRealtimeTrafficSummary, error) + + GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) + GetThroughputTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsThroughputTrendResponse, error) + GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error) + GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error) + GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error) + + InsertSystemMetrics(ctx context.Context, input *OpsInsertSystemMetricsInput) error + GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*OpsSystemMetricsSnapshot, error) + + UpsertJobHeartbeat(ctx context.Context, input *OpsUpsertJobHeartbeatInput) error + ListJobHeartbeats(ctx context.Context) ([]*OpsJobHeartbeat, error) + + // Alerts (rules + events) + ListAlertRules(ctx context.Context) ([]*OpsAlertRule, error) + CreateAlertRule(ctx context.Context, input *OpsAlertRule) (*OpsAlertRule, error) + UpdateAlertRule(ctx context.Context, input *OpsAlertRule) (*OpsAlertRule, error) + DeleteAlertRule(ctx context.Context, id int64) error + + ListAlertEvents(ctx context.Context, filter *OpsAlertEventFilter) ([]*OpsAlertEvent, error) + GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error) + GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) + GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) + CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) (*OpsAlertEvent, error) + UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error + UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error + + // Alert silences + CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error) + IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) + + // Pre-aggregation (hourly/daily) used for long-window dashboard performance. + UpsertHourlyMetrics(ctx context.Context, startTime, endTime time.Time) error + UpsertDailyMetrics(ctx context.Context, startTime, endTime time.Time) error + GetLatestHourlyBucketStart(ctx context.Context) (time.Time, bool, error) + GetLatestDailyBucketDate(ctx context.Context) (time.Time, bool, error) +} + +type OpsInsertErrorLogInput struct { + RequestID string + ClientRequestID string + + UserID *int64 + APIKeyID *int64 + AccountID *int64 + GroupID *int64 + ClientIP *string + + Platform string + Model string + RequestPath string + Stream bool + UserAgent string + + ErrorPhase string + ErrorType string + Severity string + StatusCode int + IsBusinessLimited bool + IsCountTokens bool // 是否为 count_tokens 请求 + + ErrorMessage string + ErrorBody string + + ErrorSource string + ErrorOwner string + + UpstreamStatusCode *int + UpstreamErrorMessage *string + UpstreamErrorDetail *string + // UpstreamErrors captures all upstream error attempts observed during handling this request. + // It is populated during request processing (gin context) and sanitized+serialized by OpsService. + UpstreamErrors []*OpsUpstreamErrorEvent + // UpstreamErrorsJSON is the sanitized JSON string stored into ops_error_logs.upstream_errors. + // It is set by OpsService.RecordError before persisting. + UpstreamErrorsJSON *string + + TimeToFirstTokenMs *int64 + + RequestBodyJSON *string // sanitized json string (not raw bytes) + RequestBodyTruncated bool + RequestBodyBytes *int + RequestHeadersJSON *string // optional json string + + IsRetryable bool + RetryCount int + + CreatedAt time.Time +} + +type OpsInsertRetryAttemptInput struct { + RequestedByUserID int64 + SourceErrorID int64 + Mode string + PinnedAccountID *int64 + + // running|queued etc. + Status string + StartedAt time.Time +} + +type OpsUpdateRetryAttemptInput struct { + ID int64 + + // succeeded|failed + Status string + FinishedAt time.Time + DurationMs int64 + + // Persisted execution results (best-effort) + Success *bool + HTTPStatusCode *int + UpstreamRequestID *string + UsedAccountID *int64 + ResponsePreview *string + ResponseTruncated *bool + + // Optional correlation (legacy fields kept) + ResultRequestID *string + ResultErrorID *int64 + + ErrorMessage *string +} + +type OpsInsertSystemMetricsInput struct { + CreatedAt time.Time + WindowMinutes int + + Platform *string + GroupID *int64 + + SuccessCount int64 + ErrorCountTotal int64 + BusinessLimitedCount int64 + ErrorCountSLA int64 + + UpstreamErrorCountExcl429529 int64 + Upstream429Count int64 + Upstream529Count int64 + + TokenConsumed int64 + + QPS *float64 + TPS *float64 + + DurationP50Ms *int + DurationP90Ms *int + DurationP95Ms *int + DurationP99Ms *int + DurationAvgMs *float64 + DurationMaxMs *int + + TTFTP50Ms *int + TTFTP90Ms *int + TTFTP95Ms *int + TTFTP99Ms *int + TTFTAvgMs *float64 + TTFTMaxMs *int + + CPUUsagePercent *float64 + MemoryUsedMB *int64 + MemoryTotalMB *int64 + MemoryUsagePercent *float64 + + DBOK *bool + RedisOK *bool + + RedisConnTotal *int + RedisConnIdle *int + + DBConnActive *int + DBConnIdle *int + DBConnWaiting *int + + GoroutineCount *int + ConcurrencyQueueDepth *int +} + +type OpsSystemMetricsSnapshot struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + WindowMinutes int `json:"window_minutes"` + + CPUUsagePercent *float64 `json:"cpu_usage_percent"` + MemoryUsedMB *int64 `json:"memory_used_mb"` + MemoryTotalMB *int64 `json:"memory_total_mb"` + MemoryUsagePercent *float64 `json:"memory_usage_percent"` + + DBOK *bool `json:"db_ok"` + RedisOK *bool `json:"redis_ok"` + + // Config-derived limits (best-effort). These are not historical metrics; they help UI render "current vs max". + DBMaxOpenConns *int `json:"db_max_open_conns"` + RedisPoolSize *int `json:"redis_pool_size"` + + RedisConnTotal *int `json:"redis_conn_total"` + RedisConnIdle *int `json:"redis_conn_idle"` + + DBConnActive *int `json:"db_conn_active"` + DBConnIdle *int `json:"db_conn_idle"` + DBConnWaiting *int `json:"db_conn_waiting"` + + GoroutineCount *int `json:"goroutine_count"` + ConcurrencyQueueDepth *int `json:"concurrency_queue_depth"` +} + +type OpsUpsertJobHeartbeatInput struct { + JobName string + + LastRunAt *time.Time + LastSuccessAt *time.Time + LastErrorAt *time.Time + LastError *string + LastDurationMs *int64 +} + +type OpsJobHeartbeat struct { + JobName string `json:"job_name"` + + LastRunAt *time.Time `json:"last_run_at"` + LastSuccessAt *time.Time `json:"last_success_at"` + LastErrorAt *time.Time `json:"last_error_at"` + LastError *string `json:"last_error"` + LastDurationMs *int64 `json:"last_duration_ms"` + + UpdatedAt time.Time `json:"updated_at"` +} + +type OpsWindowStats struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + + SuccessCount int64 `json:"success_count"` + ErrorCountTotal int64 `json:"error_count_total"` + TokenConsumed int64 `json:"token_consumed"` +} diff --git a/backend/internal/service/ops_query_mode.go b/backend/internal/service/ops_query_mode.go new file mode 100644 index 00000000..e6fa9c1e --- /dev/null +++ b/backend/internal/service/ops_query_mode.go @@ -0,0 +1,40 @@ +package service + +import ( + "errors" + "strings" +) + +type OpsQueryMode string + +const ( + OpsQueryModeAuto OpsQueryMode = "auto" + OpsQueryModeRaw OpsQueryMode = "raw" + OpsQueryModePreagg OpsQueryMode = "preagg" +) + +// ErrOpsPreaggregatedNotPopulated indicates that raw logs exist for a window, but the +// pre-aggregation tables are not populated yet. This is primarily used to implement +// the forced `preagg` mode UX. +var ErrOpsPreaggregatedNotPopulated = errors.New("ops pre-aggregated tables not populated") + +func ParseOpsQueryMode(raw string) OpsQueryMode { + v := strings.ToLower(strings.TrimSpace(raw)) + switch v { + case string(OpsQueryModeRaw): + return OpsQueryModeRaw + case string(OpsQueryModePreagg): + return OpsQueryModePreagg + default: + return OpsQueryModeAuto + } +} + +func (m OpsQueryMode) IsValid() bool { + switch m { + case OpsQueryModeAuto, OpsQueryModeRaw, OpsQueryModePreagg: + return true + default: + return false + } +} diff --git a/backend/internal/service/ops_realtime.go b/backend/internal/service/ops_realtime.go new file mode 100644 index 00000000..479b9482 --- /dev/null +++ b/backend/internal/service/ops_realtime.go @@ -0,0 +1,36 @@ +package service + +import ( + "context" + "errors" + "strings" +) + +// IsRealtimeMonitoringEnabled returns true when realtime ops features are enabled. +// +// This is a soft switch controlled by the DB setting `ops_realtime_monitoring_enabled`, +// and it is also gated by the hard switch/soft switch of overall ops monitoring. +func (s *OpsService) IsRealtimeMonitoringEnabled(ctx context.Context) bool { + if !s.IsMonitoringEnabled(ctx) { + return false + } + if s.settingRepo == nil { + return true + } + + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsRealtimeMonitoringEnabled) + if err != nil { + // Default enabled when key is missing; fail-open on transient errors. + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} diff --git a/backend/internal/service/ops_realtime_models.go b/backend/internal/service/ops_realtime_models.go new file mode 100644 index 00000000..f7514a24 --- /dev/null +++ b/backend/internal/service/ops_realtime_models.go @@ -0,0 +1,81 @@ +package service + +import "time" + +// PlatformConcurrencyInfo aggregates concurrency usage by platform. +type PlatformConcurrencyInfo struct { + Platform string `json:"platform"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// GroupConcurrencyInfo aggregates concurrency usage by group. +// +// Note: one account can belong to multiple groups; group totals are therefore not additive across groups. +type GroupConcurrencyInfo struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// AccountConcurrencyInfo represents real-time concurrency usage for a single account. +type AccountConcurrencyInfo struct { + AccountID int64 `json:"account_id"` + AccountName string `json:"account_name"` + Platform string `json:"platform"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// PlatformAvailability aggregates account availability by platform. +type PlatformAvailability struct { + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` +} + +// GroupAvailability aggregates account availability by group. +type GroupAvailability struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` +} + +// AccountAvailability represents current availability for a single account. +type AccountAvailability struct { + AccountID int64 `json:"account_id"` + AccountName string `json:"account_name"` + Platform string `json:"platform"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + + Status string `json:"status"` + + IsAvailable bool `json:"is_available"` + IsRateLimited bool `json:"is_rate_limited"` + IsOverloaded bool `json:"is_overloaded"` + HasError bool `json:"has_error"` + + RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` + RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` + OverloadUntil *time.Time `json:"overload_until"` + OverloadRemainingSec *int64 `json:"overload_remaining_sec"` + ErrorMessage string `json:"error_message"` + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` +} diff --git a/backend/internal/service/ops_realtime_traffic.go b/backend/internal/service/ops_realtime_traffic.go new file mode 100644 index 00000000..458905c5 --- /dev/null +++ b/backend/internal/service/ops_realtime_traffic.go @@ -0,0 +1,36 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// GetRealtimeTrafficSummary returns QPS/TPS current/peak/avg for the provided window. +// This is used by the Ops dashboard "Realtime Traffic" card and is intentionally lightweight. +func (s *OpsService) GetRealtimeTrafficSummary(ctx context.Context, filter *OpsDashboardFilter) (*OpsRealtimeTrafficSummary, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + if filter.EndTime.Sub(filter.StartTime) > time.Hour { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_TOO_LARGE", "invalid time range: max window is 1 hour") + } + + // Realtime traffic summary always uses raw logs (minute granularity peaks). + filter.QueryMode = OpsQueryModeRaw + + return s.opsRepo.GetRealtimeTrafficSummary(ctx, filter) +} diff --git a/backend/internal/service/ops_realtime_traffic_models.go b/backend/internal/service/ops_realtime_traffic_models.go new file mode 100644 index 00000000..e88a890b --- /dev/null +++ b/backend/internal/service/ops_realtime_traffic_models.go @@ -0,0 +1,19 @@ +package service + +import "time" + +// OpsRealtimeTrafficSummary is a lightweight summary used by the Ops dashboard "Realtime Traffic" card. +// It reports QPS/TPS current/peak/avg for the requested time window. +type OpsRealtimeTrafficSummary struct { + // Window is a normalized label (e.g. "1min", "5min", "30min", "1h"). + Window string `json:"window"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + QPS OpsRateSummary `json:"qps"` + TPS OpsRateSummary `json:"tps"` +} diff --git a/backend/internal/service/ops_request_details.go b/backend/internal/service/ops_request_details.go new file mode 100644 index 00000000..12b9aa1b --- /dev/null +++ b/backend/internal/service/ops_request_details.go @@ -0,0 +1,151 @@ +package service + +import ( + "context" + "time" +) + +type OpsRequestKind string + +const ( + OpsRequestKindSuccess OpsRequestKind = "success" + OpsRequestKindError OpsRequestKind = "error" +) + +// OpsRequestDetail is a request-level view across success (usage_logs) and error (ops_error_logs). +// It powers "request drilldown" UIs without exposing full request bodies for successful requests. +type OpsRequestDetail struct { + Kind OpsRequestKind `json:"kind"` + CreatedAt time.Time `json:"created_at"` + RequestID string `json:"request_id"` + + Platform string `json:"platform,omitempty"` + Model string `json:"model,omitempty"` + + DurationMs *int `json:"duration_ms,omitempty"` + StatusCode *int `json:"status_code,omitempty"` + + // When Kind == "error", ErrorID links to /admin/ops/errors/:id. + ErrorID *int64 `json:"error_id,omitempty"` + + Phase string `json:"phase,omitempty"` + Severity string `json:"severity,omitempty"` + Message string `json:"message,omitempty"` + + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + + Stream bool `json:"stream"` +} + +type OpsRequestDetailFilter struct { + StartTime *time.Time + EndTime *time.Time + + // kind: success|error|all + Kind string + + Platform string + GroupID *int64 + + UserID *int64 + APIKeyID *int64 + AccountID *int64 + + Model string + RequestID string + Query string + + MinDurationMs *int + MaxDurationMs *int + + // Sort: created_at_desc (default) or duration_desc. + Sort string + + Page int + PageSize int +} + +func (f *OpsRequestDetailFilter) Normalize() (page, pageSize int, startTime, endTime time.Time) { + page = 1 + pageSize = 50 + endTime = time.Now() + startTime = endTime.Add(-1 * time.Hour) + + if f == nil { + return page, pageSize, startTime, endTime + } + + if f.Page > 0 { + page = f.Page + } + if f.PageSize > 0 { + pageSize = f.PageSize + } + if pageSize > 100 { + pageSize = 100 + } + + if f.EndTime != nil { + endTime = *f.EndTime + } + if f.StartTime != nil { + startTime = *f.StartTime + } else if f.EndTime != nil { + startTime = endTime.Add(-1 * time.Hour) + } + + if startTime.After(endTime) { + startTime, endTime = endTime, startTime + } + + return page, pageSize, startTime, endTime +} + +type OpsRequestDetailList struct { + Items []*OpsRequestDetail `json:"items"` + Total int64 `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +func (s *OpsService) ListRequestDetails(ctx context.Context, filter *OpsRequestDetailFilter) (*OpsRequestDetailList, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return &OpsRequestDetailList{ + Items: []*OpsRequestDetail{}, + Total: 0, + Page: 1, + PageSize: 50, + }, nil + } + + page, pageSize, startTime, endTime := filter.Normalize() + filterCopy := &OpsRequestDetailFilter{} + if filter != nil { + *filterCopy = *filter + } + filterCopy.Page = page + filterCopy.PageSize = pageSize + filterCopy.StartTime = &startTime + filterCopy.EndTime = &endTime + + items, total, err := s.opsRepo.ListRequestDetails(ctx, filterCopy) + if err != nil { + return nil, err + } + if items == nil { + items = []*OpsRequestDetail{} + } + + return &OpsRequestDetailList{ + Items: items, + Total: total, + Page: page, + PageSize: pageSize, + }, nil +} diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go new file mode 100644 index 00000000..25c10af6 --- /dev/null +++ b/backend/internal/service/ops_retry.go @@ -0,0 +1,720 @@ +package service + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/gin-gonic/gin" + "github.com/lib/pq" +) + +const ( + OpsRetryModeClient = "client" + OpsRetryModeUpstream = "upstream" +) + +const ( + opsRetryStatusRunning = "running" + opsRetryStatusSucceeded = "succeeded" + opsRetryStatusFailed = "failed" +) + +const ( + opsRetryTimeout = 60 * time.Second + opsRetryCaptureBytesLimit = 64 * 1024 + opsRetryResponsePreviewMax = 8 * 1024 + opsRetryMinIntervalPerError = 10 * time.Second + opsRetryMaxAccountSwitches = 3 +) + +var opsRetryRequestHeaderAllowlist = map[string]bool{ + "anthropic-beta": true, + "anthropic-version": true, +} + +type opsRetryRequestType string + +const ( + opsRetryTypeMessages opsRetryRequestType = "messages" + opsRetryTypeOpenAI opsRetryRequestType = "openai_responses" + opsRetryTypeGeminiV1B opsRetryRequestType = "gemini_v1beta" +) + +type limitedResponseWriter struct { + header http.Header + wroteHeader bool + + limit int + totalWritten int64 + buf bytes.Buffer +} + +func newLimitedResponseWriter(limit int) *limitedResponseWriter { + if limit <= 0 { + limit = 1 + } + return &limitedResponseWriter{ + header: make(http.Header), + limit: limit, + } +} + +func (w *limitedResponseWriter) Header() http.Header { + return w.header +} + +func (w *limitedResponseWriter) WriteHeader(statusCode int) { + if w.wroteHeader { + return + } + w.wroteHeader = true +} + +func (w *limitedResponseWriter) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + w.totalWritten += int64(len(p)) + + if w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(p) > remaining { + _, _ = w.buf.Write(p[:remaining]) + } else { + _, _ = w.buf.Write(p) + } + } + + // Pretend we wrote everything to avoid upstream/client code treating it as an error. + return len(p), nil +} + +func (w *limitedResponseWriter) Flush() {} + +func (w *limitedResponseWriter) bodyBytes() []byte { + return w.buf.Bytes() +} + +func (w *limitedResponseWriter) truncated() bool { + return w.totalWritten > int64(w.limit) +} + +const ( + OpsRetryModeUpstreamEvent = "upstream_event" +) + +func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, errorID int64, mode string, pinnedAccountID *int64) (*OpsRetryResult, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + + mode = strings.ToLower(strings.TrimSpace(mode)) + switch mode { + case OpsRetryModeClient, OpsRetryModeUpstream: + default: + return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_MODE", "mode must be client or upstream") + } + + errorLog, err := s.GetErrorLogByID(ctx, errorID) + if err != nil { + return nil, err + } + if errorLog == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + if strings.TrimSpace(errorLog.RequestBody) == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry") + } + + var pinned *int64 + if mode == OpsRetryModeUpstream { + if pinnedAccountID != nil && *pinnedAccountID > 0 { + pinned = pinnedAccountID + } else if errorLog.AccountID != nil && *errorLog.AccountID > 0 { + pinned = errorLog.AccountID + } else { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "pinned_account_id is required for upstream retry") + } + } + + return s.retryWithErrorLog(ctx, requestedByUserID, errorID, mode, mode, pinned, errorLog) +} + +// RetryUpstreamEvent retries a specific upstream attempt captured inside ops_error_logs.upstream_errors. +// idx is 0-based. It always pins the original event account_id. +func (s *OpsService) RetryUpstreamEvent(ctx context.Context, requestedByUserID int64, errorID int64, idx int) (*OpsRetryResult, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if idx < 0 { + return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_UPSTREAM_IDX", "invalid upstream idx") + } + + errorLog, err := s.GetErrorLogByID(ctx, errorID) + if err != nil { + return nil, err + } + if errorLog == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + + events, err := ParseOpsUpstreamErrors(errorLog.UpstreamErrors) + if err != nil { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENTS_INVALID", "invalid upstream_errors") + } + if idx >= len(events) { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_IDX_OOB", "upstream idx out of range") + } + ev := events[idx] + if ev == nil { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENT_MISSING", "upstream event missing") + } + if ev.AccountID <= 0 { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry") + } + + upstreamBody := strings.TrimSpace(ev.UpstreamRequestBody) + if upstreamBody == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_NO_REQUEST_BODY", "No upstream request body found to retry") + } + + override := *errorLog + override.RequestBody = upstreamBody + pinned := ev.AccountID + + // Persist as upstream_event, execute as upstream pinned retry. + return s.retryWithErrorLog(ctx, requestedByUserID, errorID, OpsRetryModeUpstreamEvent, OpsRetryModeUpstream, &pinned, &override) +} + +func (s *OpsService) retryWithErrorLog(ctx context.Context, requestedByUserID int64, errorID int64, mode string, execMode string, pinnedAccountID *int64, errorLog *OpsErrorLogDetail) (*OpsRetryResult, error) { + latest, err := s.opsRepo.GetLatestRetryAttemptForError(ctx, errorID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.InternalServer("OPS_RETRY_LOAD_LATEST_FAILED", "Failed to check retry status").WithCause(err) + } + if latest != nil { + if strings.EqualFold(latest.Status, opsRetryStatusRunning) || strings.EqualFold(latest.Status, "queued") { + return nil, infraerrors.Conflict("OPS_RETRY_IN_PROGRESS", "A retry is already in progress for this error") + } + + lastAttemptAt := latest.CreatedAt + if latest.FinishedAt != nil && !latest.FinishedAt.IsZero() { + lastAttemptAt = *latest.FinishedAt + } else if latest.StartedAt != nil && !latest.StartedAt.IsZero() { + lastAttemptAt = *latest.StartedAt + } + + if time.Since(lastAttemptAt) < opsRetryMinIntervalPerError { + return nil, infraerrors.Conflict("OPS_RETRY_TOO_FREQUENT", "Please wait before retrying this error again") + } + } + + if errorLog == nil || strings.TrimSpace(errorLog.RequestBody) == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry") + } + + var pinned *int64 + if execMode == OpsRetryModeUpstream { + if pinnedAccountID != nil && *pinnedAccountID > 0 { + pinned = pinnedAccountID + } else if errorLog.AccountID != nil && *errorLog.AccountID > 0 { + pinned = errorLog.AccountID + } else { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry") + } + } + + startedAt := time.Now() + attemptID, err := s.opsRepo.InsertRetryAttempt(ctx, &OpsInsertRetryAttemptInput{ + RequestedByUserID: requestedByUserID, + SourceErrorID: errorID, + Mode: mode, + PinnedAccountID: pinned, + Status: opsRetryStatusRunning, + StartedAt: startedAt, + }) + if err != nil { + var pqErr *pq.Error + if errors.As(err, &pqErr) && string(pqErr.Code) == "23505" { + return nil, infraerrors.Conflict("OPS_RETRY_IN_PROGRESS", "A retry is already in progress for this error") + } + return nil, infraerrors.InternalServer("OPS_RETRY_CREATE_ATTEMPT_FAILED", "Failed to create retry attempt").WithCause(err) + } + + result := &OpsRetryResult{ + AttemptID: attemptID, + Mode: mode, + Status: opsRetryStatusFailed, + PinnedAccountID: pinned, + HTTPStatusCode: 0, + UpstreamRequestID: "", + ResponsePreview: "", + ResponseTruncated: false, + ErrorMessage: "", + StartedAt: startedAt, + } + + execCtx, cancel := context.WithTimeout(ctx, opsRetryTimeout) + defer cancel() + + execRes := s.executeRetry(execCtx, errorLog, execMode, pinned) + + finishedAt := time.Now() + result.FinishedAt = finishedAt + result.DurationMs = finishedAt.Sub(startedAt).Milliseconds() + + if execRes != nil { + result.Status = execRes.status + result.UsedAccountID = execRes.usedAccountID + result.HTTPStatusCode = execRes.httpStatusCode + result.UpstreamRequestID = execRes.upstreamRequestID + result.ResponsePreview = execRes.responsePreview + result.ResponseTruncated = execRes.responseTruncated + result.ErrorMessage = execRes.errorMessage + } + + updateCtx, updateCancel := context.WithTimeout(context.Background(), 3*time.Second) + defer updateCancel() + + var updateErrMsg *string + if strings.TrimSpace(result.ErrorMessage) != "" { + msg := result.ErrorMessage + updateErrMsg = &msg + } + // Keep legacy result_request_id empty; use upstream_request_id instead. + var resultRequestID *string + + finalStatus := result.Status + if strings.TrimSpace(finalStatus) == "" { + finalStatus = opsRetryStatusFailed + } + + success := strings.EqualFold(finalStatus, opsRetryStatusSucceeded) + httpStatus := result.HTTPStatusCode + upstreamReqID := result.UpstreamRequestID + usedAccountID := result.UsedAccountID + preview := result.ResponsePreview + truncated := result.ResponseTruncated + + if err := s.opsRepo.UpdateRetryAttempt(updateCtx, &OpsUpdateRetryAttemptInput{ + ID: attemptID, + Status: finalStatus, + FinishedAt: finishedAt, + DurationMs: result.DurationMs, + Success: &success, + HTTPStatusCode: &httpStatus, + UpstreamRequestID: &upstreamReqID, + UsedAccountID: usedAccountID, + ResponsePreview: &preview, + ResponseTruncated: &truncated, + ResultRequestID: resultRequestID, + ErrorMessage: updateErrMsg, + }); err != nil { + log.Printf("[Ops] UpdateRetryAttempt failed: %v", err) + } else if success { + if err := s.opsRepo.UpdateErrorResolution(updateCtx, errorID, true, &requestedByUserID, &attemptID, &finishedAt); err != nil { + log.Printf("[Ops] UpdateErrorResolution failed: %v", err) + } + } + + return result, nil +} + +type opsRetryExecution struct { + status string + + usedAccountID *int64 + httpStatusCode int + upstreamRequestID string + + responsePreview string + responseTruncated bool + + errorMessage string +} + +func (s *OpsService) executeRetry(ctx context.Context, errorLog *OpsErrorLogDetail, mode string, pinnedAccountID *int64) *opsRetryExecution { + if errorLog == nil { + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "missing error log", + } + } + + reqType := detectOpsRetryType(errorLog.RequestPath) + bodyBytes := []byte(errorLog.RequestBody) + + switch reqType { + case opsRetryTypeMessages: + bodyBytes = FilterThinkingBlocksForRetry(bodyBytes) + case opsRetryTypeOpenAI, opsRetryTypeGeminiV1B: + // No-op + } + + switch strings.ToLower(strings.TrimSpace(mode)) { + case OpsRetryModeUpstream: + if pinnedAccountID == nil || *pinnedAccountID <= 0 { + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "pinned_account_id required for upstream retry", + } + } + return s.executePinnedRetry(ctx, reqType, errorLog, bodyBytes, *pinnedAccountID) + case OpsRetryModeClient: + return s.executeClientRetry(ctx, reqType, errorLog, bodyBytes) + default: + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "invalid retry mode", + } + } +} + +func detectOpsRetryType(path string) opsRetryRequestType { + p := strings.ToLower(strings.TrimSpace(path)) + switch { + case strings.Contains(p, "/responses"): + return opsRetryTypeOpenAI + case strings.Contains(p, "/v1beta/"): + return opsRetryTypeGeminiV1B + default: + return opsRetryTypeMessages + } +} + +func (s *OpsService) executePinnedRetry(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte, pinnedAccountID int64) *opsRetryExecution { + if s.accountRepo == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account repository not available"} + } + + account, err := s.accountRepo.GetByID(ctx, pinnedAccountID) + if err != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: fmt.Sprintf("account not found: %v", err)} + } + if account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account not found"} + } + if !account.IsSchedulable() { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account is not schedulable"} + } + if errorLog.GroupID != nil && *errorLog.GroupID > 0 { + if !containsInt64(account.GroupIDs, *errorLog.GroupID) { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "pinned account is not in the same group as the original request"} + } + } + + var release func() + if s.concurrencyService != nil { + acq, err := s.concurrencyService.AcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: fmt.Sprintf("acquire account slot failed: %v", err)} + } + if acq == nil || !acq.Acquired { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account concurrency limit reached"} + } + release = acq.ReleaseFunc + } + if release != nil { + defer release() + } + + usedID := account.ID + exec := s.executeWithAccount(ctx, reqType, errorLog, body, account) + exec.usedAccountID = &usedID + if exec.status == "" { + exec.status = opsRetryStatusFailed + } + return exec +} + +func (s *OpsService) executeClientRetry(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte) *opsRetryExecution { + groupID := errorLog.GroupID + if groupID == nil || *groupID <= 0 { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "group_id missing; cannot reselect account"} + } + + model, stream, parsedErr := extractRetryModelAndStream(reqType, errorLog, body) + if parsedErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: parsedErr.Error()} + } + _ = stream + + excluded := make(map[int64]struct{}) + switches := 0 + + for { + if switches >= opsRetryMaxAccountSwitches { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "retry failed after exhausting account failovers"} + } + + selection, selErr := s.selectAccountForRetry(ctx, reqType, groupID, model, excluded) + if selErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: selErr.Error()} + } + if selection == nil || selection.Account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "no available accounts"} + } + + account := selection.Account + if !selection.Acquired || selection.ReleaseFunc == nil { + excluded[account.ID] = struct{}{} + switches++ + continue + } + + exec := func() *opsRetryExecution { + defer selection.ReleaseFunc() + return s.executeWithAccount(ctx, reqType, errorLog, body, account) + }() + + if exec != nil { + if exec.status == opsRetryStatusSucceeded { + usedID := account.ID + exec.usedAccountID = &usedID + return exec + } + // If the gateway services ask for failover, try another account. + if s.isFailoverError(exec.errorMessage) { + excluded[account.ID] = struct{}{} + switches++ + continue + } + usedID := account.ID + exec.usedAccountID = &usedID + return exec + } + + excluded[account.ID] = struct{}{} + switches++ + } +} + +func (s *OpsService) selectAccountForRetry(ctx context.Context, reqType opsRetryRequestType, groupID *int64, model string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + switch reqType { + case opsRetryTypeOpenAI: + if s.openAIGatewayService == nil { + return nil, fmt.Errorf("openai gateway service not available") + } + return s.openAIGatewayService.SelectAccountWithLoadAwareness(ctx, groupID, "", model, excludedIDs) + case opsRetryTypeGeminiV1B, opsRetryTypeMessages: + if s.gatewayService == nil { + return nil, fmt.Errorf("gateway service not available") + } + return s.gatewayService.SelectAccountWithLoadAwareness(ctx, groupID, "", model, excludedIDs) + default: + return nil, fmt.Errorf("unsupported retry type: %s", reqType) + } +} + +func extractRetryModelAndStream(reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte) (model string, stream bool, err error) { + switch reqType { + case opsRetryTypeMessages: + parsed, parseErr := ParseGatewayRequest(body) + if parseErr != nil { + return "", false, fmt.Errorf("failed to parse messages request body: %w", parseErr) + } + return parsed.Model, parsed.Stream, nil + case opsRetryTypeOpenAI: + var v struct { + Model string `json:"model"` + Stream bool `json:"stream"` + } + if err := json.Unmarshal(body, &v); err != nil { + return "", false, fmt.Errorf("failed to parse openai request body: %w", err) + } + return strings.TrimSpace(v.Model), v.Stream, nil + case opsRetryTypeGeminiV1B: + if strings.TrimSpace(errorLog.Model) == "" { + return "", false, fmt.Errorf("missing model for gemini v1beta retry") + } + return strings.TrimSpace(errorLog.Model), errorLog.Stream, nil + default: + return "", false, fmt.Errorf("unsupported retry type: %s", reqType) + } +} + +func (s *OpsService) executeWithAccount(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte, account *Account) *opsRetryExecution { + if account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "missing account"} + } + + c, w := newOpsRetryContext(ctx, errorLog) + + var err error + switch reqType { + case opsRetryTypeOpenAI: + if s.openAIGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "openai gateway service not available"} + } + _, err = s.openAIGatewayService.Forward(ctx, c, account, body) + case opsRetryTypeGeminiV1B: + if s.geminiCompatService == nil || s.antigravityGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gemini services not available"} + } + modelName := strings.TrimSpace(errorLog.Model) + action := "generateContent" + if errorLog.Stream { + action = "streamGenerateContent" + } + if account.Platform == PlatformAntigravity { + _, err = s.antigravityGatewayService.ForwardGemini(ctx, c, account, modelName, action, errorLog.Stream, body) + } else { + _, err = s.geminiCompatService.ForwardNative(ctx, c, account, modelName, action, errorLog.Stream, body) + } + case opsRetryTypeMessages: + switch account.Platform { + case PlatformAntigravity: + if s.antigravityGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "antigravity gateway service not available"} + } + _, err = s.antigravityGatewayService.Forward(ctx, c, account, body) + case PlatformGemini: + if s.geminiCompatService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gemini gateway service not available"} + } + _, err = s.geminiCompatService.Forward(ctx, c, account, body) + default: + if s.gatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gateway service not available"} + } + parsedReq, parseErr := ParseGatewayRequest(body) + if parseErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "failed to parse request body"} + } + _, err = s.gatewayService.Forward(ctx, c, account, parsedReq) + } + default: + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "unsupported retry type"} + } + + statusCode := http.StatusOK + if c != nil && c.Writer != nil { + statusCode = c.Writer.Status() + } + + upstreamReqID := extractUpstreamRequestID(c) + preview, truncated := extractResponsePreview(w) + + exec := &opsRetryExecution{ + status: opsRetryStatusFailed, + httpStatusCode: statusCode, + upstreamRequestID: upstreamReqID, + responsePreview: preview, + responseTruncated: truncated, + errorMessage: "", + } + + if err == nil && statusCode < 400 { + exec.status = opsRetryStatusSucceeded + return exec + } + + if err != nil { + exec.errorMessage = err.Error() + } else { + exec.errorMessage = fmt.Sprintf("upstream returned status %d", statusCode) + } + + return exec +} + +func newOpsRetryContext(ctx context.Context, errorLog *OpsErrorLogDetail) (*gin.Context, *limitedResponseWriter) { + w := newLimitedResponseWriter(opsRetryCaptureBytesLimit) + c, _ := gin.CreateTestContext(w) + + path := "/" + if errorLog != nil && strings.TrimSpace(errorLog.RequestPath) != "" { + path = errorLog.RequestPath + } + + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "http://localhost"+path, bytes.NewReader(nil)) + req.Header.Set("content-type", "application/json") + if errorLog != nil && strings.TrimSpace(errorLog.UserAgent) != "" { + req.Header.Set("user-agent", errorLog.UserAgent) + } + // Restore a minimal, whitelisted subset of request headers to improve retry fidelity + // (e.g. anthropic-beta / anthropic-version). Never replay auth credentials. + if errorLog != nil && strings.TrimSpace(errorLog.RequestHeaders) != "" { + var stored map[string]string + if err := json.Unmarshal([]byte(errorLog.RequestHeaders), &stored); err == nil { + for k, v := range stored { + key := strings.TrimSpace(k) + if key == "" { + continue + } + if !opsRetryRequestHeaderAllowlist[strings.ToLower(key)] { + continue + } + val := strings.TrimSpace(v) + if val == "" { + continue + } + req.Header.Set(key, val) + } + } + } + + c.Request = req + return c, w +} + +func extractUpstreamRequestID(c *gin.Context) string { + if c == nil || c.Writer == nil { + return "" + } + h := c.Writer.Header() + if h == nil { + return "" + } + for _, key := range []string{"x-request-id", "X-Request-Id", "X-Request-ID"} { + if v := strings.TrimSpace(h.Get(key)); v != "" { + return v + } + } + return "" +} + +func extractResponsePreview(w *limitedResponseWriter) (preview string, truncated bool) { + if w == nil { + return "", false + } + b := bytes.TrimSpace(w.bodyBytes()) + if len(b) == 0 { + return "", w.truncated() + } + if len(b) > opsRetryResponsePreviewMax { + return string(b[:opsRetryResponsePreviewMax]), true + } + return string(b), w.truncated() +} + +func containsInt64(items []int64, needle int64) bool { + for _, v := range items { + if v == needle { + return true + } + } + return false +} + +func (s *OpsService) isFailoverError(message string) bool { + msg := strings.ToLower(strings.TrimSpace(message)) + if msg == "" { + return false + } + return strings.Contains(msg, "upstream error:") && strings.Contains(msg, "failover") +} diff --git a/backend/internal/service/ops_scheduled_report_service.go b/backend/internal/service/ops_scheduled_report_service.go new file mode 100644 index 00000000..28902cbc --- /dev/null +++ b/backend/internal/service/ops_scheduled_report_service.go @@ -0,0 +1,705 @@ +package service + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/robfig/cron/v3" +) + +const ( + opsScheduledReportJobName = "ops_scheduled_reports" + + opsScheduledReportLeaderLockKeyDefault = "ops:scheduled_reports:leader" + opsScheduledReportLeaderLockTTLDefault = 5 * time.Minute + + opsScheduledReportLastRunKeyPrefix = "ops:scheduled_reports:last_run:" + + opsScheduledReportTickInterval = 1 * time.Minute +) + +var opsScheduledReportCronParser = cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + +var opsScheduledReportReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +type OpsScheduledReportService struct { + opsService *OpsService + userService *UserService + emailService *EmailService + redisClient *redis.Client + cfg *config.Config + + instanceID string + loc *time.Location + + distributedLockOn bool + warnNoRedisOnce sync.Once + + startOnce sync.Once + stopOnce sync.Once + stopCtx context.Context + stop context.CancelFunc + wg sync.WaitGroup +} + +func NewOpsScheduledReportService( + opsService *OpsService, + userService *UserService, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsScheduledReportService { + lockOn := cfg == nil || strings.TrimSpace(cfg.RunMode) != config.RunModeSimple + + loc := time.Local + if cfg != nil && strings.TrimSpace(cfg.Timezone) != "" { + if parsed, err := time.LoadLocation(strings.TrimSpace(cfg.Timezone)); err == nil && parsed != nil { + loc = parsed + } + } + return &OpsScheduledReportService{ + opsService: opsService, + userService: userService, + emailService: emailService, + redisClient: redisClient, + cfg: cfg, + + instanceID: uuid.NewString(), + loc: loc, + distributedLockOn: lockOn, + warnNoRedisOnce: sync.Once{}, + startOnce: sync.Once{}, + stopOnce: sync.Once{}, + stopCtx: nil, + stop: nil, + wg: sync.WaitGroup{}, + } +} + +func (s *OpsScheduledReportService) Start() { + s.StartWithContext(context.Background()) +} + +func (s *OpsScheduledReportService) StartWithContext(ctx context.Context) { + if s == nil { + return + } + if ctx == nil { + ctx = context.Background() + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + if s.opsService == nil || s.emailService == nil { + return + } + + s.startOnce.Do(func() { + s.stopCtx, s.stop = context.WithCancel(ctx) + s.wg.Add(1) + go s.run() + }) +} + +func (s *OpsScheduledReportService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stop != nil { + s.stop() + } + }) + s.wg.Wait() +} + +func (s *OpsScheduledReportService) run() { + defer s.wg.Done() + + ticker := time.NewTicker(opsScheduledReportTickInterval) + defer ticker.Stop() + + s.runOnce() + for { + select { + case <-ticker.C: + s.runOnce() + case <-s.stopCtx.Done(): + return + } + } +} + +func (s *OpsScheduledReportService) runOnce() { + if s == nil || s.opsService == nil || s.emailService == nil { + return + } + + startedAt := time.Now().UTC() + runAt := startedAt + + ctx, cancel := context.WithTimeout(s.stopCtx, 60*time.Second) + defer cancel() + + // Respect ops monitoring enabled switch. + if !s.opsService.IsMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + now := time.Now() + if s.loc != nil { + now = now.In(s.loc) + } + + reports := s.listScheduledReports(ctx, now) + if len(reports) == 0 { + return + } + + for _, report := range reports { + if report == nil || !report.Enabled { + continue + } + if report.NextRunAt.After(now) { + continue + } + + if err := s.runReport(ctx, report, now); err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + return + } + } + + s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) +} + +type opsScheduledReport struct { + Name string + ReportType string + Schedule string + Enabled bool + + TimeRange time.Duration + + Recipients []string + + ErrorDigestMinCount int + AccountHealthErrorRateThreshold float64 + + LastRunAt *time.Time + NextRunAt time.Time +} + +func (s *OpsScheduledReportService) listScheduledReports(ctx context.Context, now time.Time) []*opsScheduledReport { + if s == nil || s.opsService == nil { + return nil + } + if ctx == nil { + ctx = context.Background() + } + + emailCfg, err := s.opsService.GetEmailNotificationConfig(ctx) + if err != nil || emailCfg == nil { + return nil + } + if !emailCfg.Report.Enabled { + return nil + } + + recipients := normalizeEmails(emailCfg.Report.Recipients) + + type reportDef struct { + enabled bool + name string + kind string + timeRange time.Duration + schedule string + } + + defs := []reportDef{ + {enabled: emailCfg.Report.DailySummaryEnabled, name: "日报", kind: "daily_summary", timeRange: 24 * time.Hour, schedule: emailCfg.Report.DailySummarySchedule}, + {enabled: emailCfg.Report.WeeklySummaryEnabled, name: "周报", kind: "weekly_summary", timeRange: 7 * 24 * time.Hour, schedule: emailCfg.Report.WeeklySummarySchedule}, + {enabled: emailCfg.Report.ErrorDigestEnabled, name: "错误摘要", kind: "error_digest", timeRange: 24 * time.Hour, schedule: emailCfg.Report.ErrorDigestSchedule}, + {enabled: emailCfg.Report.AccountHealthEnabled, name: "账号健康", kind: "account_health", timeRange: 24 * time.Hour, schedule: emailCfg.Report.AccountHealthSchedule}, + } + + out := make([]*opsScheduledReport, 0, len(defs)) + for _, d := range defs { + if !d.enabled { + continue + } + spec := strings.TrimSpace(d.schedule) + if spec == "" { + continue + } + sched, err := opsScheduledReportCronParser.Parse(spec) + if err != nil { + log.Printf("[OpsScheduledReport] invalid cron spec=%q for report=%s: %v", spec, d.kind, err) + continue + } + + lastRun := s.getLastRunAt(ctx, d.kind) + base := lastRun + if base.IsZero() { + // Allow a schedule matching the current minute to trigger right after startup. + base = now.Add(-1 * time.Minute) + } + next := sched.Next(base) + if next.IsZero() { + continue + } + + var lastRunPtr *time.Time + if !lastRun.IsZero() { + lastCopy := lastRun + lastRunPtr = &lastCopy + } + + out = append(out, &opsScheduledReport{ + Name: d.name, + ReportType: d.kind, + Schedule: spec, + Enabled: true, + + TimeRange: d.timeRange, + + Recipients: recipients, + + ErrorDigestMinCount: emailCfg.Report.ErrorDigestMinCount, + AccountHealthErrorRateThreshold: emailCfg.Report.AccountHealthErrorRateThreshold, + + LastRunAt: lastRunPtr, + NextRunAt: next, + }) + } + + return out +} + +func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsScheduledReport, now time.Time) error { + if s == nil || s.opsService == nil || s.emailService == nil || report == nil { + return nil + } + if ctx == nil { + ctx = context.Background() + } + + // Mark as "run" up-front so a broken SMTP config doesn't spam retries every minute. + s.setLastRunAt(ctx, report.ReportType, now) + + content, err := s.generateReportHTML(ctx, report, now) + if err != nil { + return err + } + if strings.TrimSpace(content) == "" { + // Skip sending when the report decides not to emit content (e.g., digest below min count). + return nil + } + + recipients := report.Recipients + if len(recipients) == 0 && s.userService != nil { + admin, err := s.userService.GetFirstAdmin(ctx) + if err == nil && admin != nil && strings.TrimSpace(admin.Email) != "" { + recipients = []string{strings.TrimSpace(admin.Email)} + } + } + if len(recipients) == 0 { + return nil + } + + subject := fmt.Sprintf("[Ops Report] %s", strings.TrimSpace(report.Name)) + + for _, to := range recipients { + addr := strings.TrimSpace(to) + if addr == "" { + continue + } + if err := s.emailService.SendEmail(ctx, addr, subject, content); err != nil { + // Ignore per-recipient failures; continue best-effort. + continue + } + } + return nil +} + +func (s *OpsScheduledReportService) generateReportHTML(ctx context.Context, report *opsScheduledReport, now time.Time) (string, error) { + if s == nil || s.opsService == nil || report == nil { + return "", fmt.Errorf("service not initialized") + } + if report.TimeRange <= 0 { + return "", fmt.Errorf("invalid time range") + } + + end := now.UTC() + start := end.Add(-report.TimeRange) + + switch strings.TrimSpace(report.ReportType) { + case "daily_summary", "weekly_summary": + overview, err := s.opsService.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: "", + GroupID: nil, + QueryMode: OpsQueryModeAuto, + }) + if err != nil { + // If pre-aggregation isn't ready but the report is requested, fall back to raw. + if strings.TrimSpace(report.ReportType) == "daily_summary" || strings.TrimSpace(report.ReportType) == "weekly_summary" { + overview, err = s.opsService.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: "", + GroupID: nil, + QueryMode: OpsQueryModeRaw, + }) + } + if err != nil { + return "", err + } + } + return buildOpsSummaryEmailHTML(report.Name, start, end, overview), nil + case "error_digest": + // Lightweight digest: list recent errors (status>=400) and breakdown by type. + startTime := start + endTime := end + filter := &OpsErrorLogFilter{ + StartTime: &startTime, + EndTime: &endTime, + Page: 1, + PageSize: 100, + } + out, err := s.opsService.GetErrorLogs(ctx, filter) + if err != nil { + return "", err + } + if report.ErrorDigestMinCount > 0 && out != nil && out.Total < report.ErrorDigestMinCount { + return "", nil + } + return buildOpsErrorDigestEmailHTML(report.Name, start, end, out), nil + case "account_health": + // Best-effort: use account availability (not error rate yet). + avail, err := s.opsService.GetAccountAvailability(ctx, "", nil) + if err != nil { + return "", err + } + _ = report.AccountHealthErrorRateThreshold // reserved for future per-account error rate report + return buildOpsAccountHealthEmailHTML(report.Name, start, end, avail), nil + default: + return "", fmt.Errorf("unknown report type: %s", report.ReportType) + } +} + +func buildOpsSummaryEmailHTML(title string, start, end time.Time, overview *OpsDashboardOverview) string { + if overview == nil { + return fmt.Sprintf("

%s

No data.

", htmlEscape(title)) + } + + latP50 := "-" + latP99 := "-" + if overview.Duration.P50 != nil { + latP50 = fmt.Sprintf("%dms", *overview.Duration.P50) + } + if overview.Duration.P99 != nil { + latP99 = fmt.Sprintf("%dms", *overview.Duration.P99) + } + + ttftP50 := "-" + ttftP99 := "-" + if overview.TTFT.P50 != nil { + ttftP50 = fmt.Sprintf("%dms", *overview.TTFT.P50) + } + if overview.TTFT.P99 != nil { + ttftP99 = fmt.Sprintf("%dms", *overview.TTFT.P99) + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+
    +
  • Total Requests: %d
  • +
  • Success: %d
  • +
  • Errors (SLA): %d
  • +
  • Business Limited: %d
  • +
  • SLA: %.2f%%
  • +
  • Error Rate: %.2f%%
  • +
  • Upstream Error Rate (excl 429/529): %.2f%%
  • +
  • Upstream Errors: excl429/529=%d, 429=%d, 529=%d
  • +
  • Latency: p50=%s, p99=%s
  • +
  • TTFT: p50=%s, p99=%s
  • +
  • Tokens: %d
  • +
  • QPS: current=%.1f, peak=%.1f, avg=%.1f
  • +
  • TPS: current=%.1f, peak=%.1f, avg=%.1f
  • +
+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + overview.RequestCountTotal, + overview.SuccessCount, + overview.ErrorCountSLA, + overview.BusinessLimitedCount, + overview.SLA*100, + overview.ErrorRate*100, + overview.UpstreamErrorRate*100, + overview.UpstreamErrorCountExcl429529, + overview.Upstream429Count, + overview.Upstream529Count, + htmlEscape(latP50), + htmlEscape(latP99), + htmlEscape(ttftP50), + htmlEscape(ttftP99), + overview.TokenConsumed, + overview.QPS.Current, + overview.QPS.Peak, + overview.QPS.Avg, + overview.TPS.Current, + overview.TPS.Peak, + overview.TPS.Avg, + ) +} + +func buildOpsErrorDigestEmailHTML(title string, start, end time.Time, list *OpsErrorLogList) string { + total := 0 + recent := []*OpsErrorLog{} + if list != nil { + total = list.Total + recent = list.Errors + } + if len(recent) > 10 { + recent = recent[:10] + } + + rows := "" + for _, item := range recent { + if item == nil { + continue + } + rows += fmt.Sprintf( + "%s%s%d%s", + htmlEscape(item.CreatedAt.UTC().Format(time.RFC3339)), + htmlEscape(item.Platform), + item.StatusCode, + htmlEscape(truncateString(item.Message, 180)), + ) + } + if rows == "" { + rows = "No recent errors." + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+

Total Errors: %d

+

Recent

+ + + %s +
TimePlatformStatusMessage
+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + total, + rows, + ) +} + +func buildOpsAccountHealthEmailHTML(title string, start, end time.Time, avail *OpsAccountAvailability) string { + total := 0 + available := 0 + rateLimited := 0 + hasError := 0 + + if avail != nil && avail.Accounts != nil { + for _, a := range avail.Accounts { + if a == nil { + continue + } + total++ + if a.IsAvailable { + available++ + } + if a.IsRateLimited { + rateLimited++ + } + if a.HasError { + hasError++ + } + } + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+
    +
  • Total Accounts: %d
  • +
  • Available: %d
  • +
  • Rate Limited: %d
  • +
  • Error: %d
  • +
+

Note: This report currently reflects account availability status only.

+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + total, + available, + rateLimited, + hasError, + ) +} + +func (s *OpsScheduledReportService) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if s == nil || !s.distributedLockOn { + return nil, true + } + if s.redisClient == nil { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsScheduledReport] redis not configured; running without distributed lock") + }) + return nil, true + } + if ctx == nil { + ctx = context.Background() + } + + key := opsScheduledReportLeaderLockKeyDefault + ttl := opsScheduledReportLeaderLockTTLDefault + if strings.TrimSpace(key) == "" { + key = "ops:scheduled_reports:leader" + } + if ttl <= 0 { + ttl = 5 * time.Minute + } + + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err != nil { + // Prefer fail-closed to avoid duplicate report sends when Redis is flaky. + log.Printf("[OpsScheduledReport] leader lock SetNX failed; skipping this cycle: %v", err) + return nil, false + } + if !ok { + return nil, false + } + return func() { + _, _ = opsScheduledReportReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true +} + +func (s *OpsScheduledReportService) getLastRunAt(ctx context.Context, reportType string) time.Time { + if s == nil || s.redisClient == nil { + return time.Time{} + } + kind := strings.TrimSpace(reportType) + if kind == "" { + return time.Time{} + } + key := opsScheduledReportLastRunKeyPrefix + kind + + raw, err := s.redisClient.Get(ctx, key).Result() + if err != nil || strings.TrimSpace(raw) == "" { + return time.Time{} + } + sec, err := strconv.ParseInt(strings.TrimSpace(raw), 10, 64) + if err != nil || sec <= 0 { + return time.Time{} + } + last := time.Unix(sec, 0) + // Cron schedules are interpreted in the configured timezone (s.loc). Ensure the base time + // passed into cron.Next() uses the same location; otherwise the job will drift by timezone + // offset (e.g. Asia/Shanghai default would run 8h later after the first execution). + if s.loc != nil { + return last.In(s.loc) + } + return last.UTC() +} + +func (s *OpsScheduledReportService) setLastRunAt(ctx context.Context, reportType string, t time.Time) { + if s == nil || s.redisClient == nil { + return + } + kind := strings.TrimSpace(reportType) + if kind == "" { + return + } + if t.IsZero() { + t = time.Now().UTC() + } + key := opsScheduledReportLastRunKeyPrefix + kind + _ = s.redisClient.Set(ctx, key, strconv.FormatInt(t.UTC().Unix(), 10), 14*24*time.Hour).Err() +} + +func (s *OpsScheduledReportService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { + if s == nil || s.opsService == nil || s.opsService.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsService.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsScheduledReportJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + }) +} + +func (s *OpsScheduledReportService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsService == nil || s.opsService.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsService.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsScheduledReportJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} + +func normalizeEmails(in []string) []string { + if len(in) == 0 { + return nil + } + seen := make(map[string]struct{}, len(in)) + out := make([]string, 0, len(in)) + for _, raw := range in { + addr := strings.ToLower(strings.TrimSpace(raw)) + if addr == "" { + continue + } + if _, ok := seen[addr]; ok { + continue + } + seen[addr] = struct{}{} + out = append(out, addr) + } + return out +} diff --git a/backend/internal/service/ops_service.go b/backend/internal/service/ops_service.go new file mode 100644 index 00000000..abb8ae12 --- /dev/null +++ b/backend/internal/service/ops_service.go @@ -0,0 +1,613 @@ +package service + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ErrOpsDisabled = infraerrors.NotFound("OPS_DISABLED", "Ops monitoring is disabled") + +const ( + opsMaxStoredRequestBodyBytes = 10 * 1024 + opsMaxStoredErrorBodyBytes = 20 * 1024 +) + +// OpsService provides ingestion and query APIs for the Ops monitoring module. +type OpsService struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + accountRepo AccountRepository + + // getAccountAvailability is a unit-test hook for overriding account availability lookup. + getAccountAvailability func(ctx context.Context, platformFilter string, groupIDFilter *int64) (*OpsAccountAvailability, error) + + concurrencyService *ConcurrencyService + gatewayService *GatewayService + openAIGatewayService *OpenAIGatewayService + geminiCompatService *GeminiMessagesCompatService + antigravityGatewayService *AntigravityGatewayService +} + +func NewOpsService( + opsRepo OpsRepository, + settingRepo SettingRepository, + cfg *config.Config, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + gatewayService *GatewayService, + openAIGatewayService *OpenAIGatewayService, + geminiCompatService *GeminiMessagesCompatService, + antigravityGatewayService *AntigravityGatewayService, +) *OpsService { + return &OpsService{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + + accountRepo: accountRepo, + + concurrencyService: concurrencyService, + gatewayService: gatewayService, + openAIGatewayService: openAIGatewayService, + geminiCompatService: geminiCompatService, + antigravityGatewayService: antigravityGatewayService, + } +} + +func (s *OpsService) RequireMonitoringEnabled(ctx context.Context) error { + if s.IsMonitoringEnabled(ctx) { + return nil + } + return ErrOpsDisabled +} + +func (s *OpsService) IsMonitoringEnabled(ctx context.Context) bool { + // Hard switch: disable ops entirely. + if s.cfg != nil && !s.cfg.Ops.Enabled { + return false + } + if s.settingRepo == nil { + return true + } + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + // Default enabled when key is missing, and fail-open on transient errors + // (ops should never block gateway traffic). + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +func (s *OpsService) RecordError(ctx context.Context, entry *OpsInsertErrorLogInput, rawRequestBody []byte) error { + if entry == nil { + return nil + } + if !s.IsMonitoringEnabled(ctx) { + return nil + } + if s.opsRepo == nil { + return nil + } + + // Ensure timestamps are always populated. + if entry.CreatedAt.IsZero() { + entry.CreatedAt = time.Now() + } + + // Ensure required fields exist (DB has NOT NULL constraints). + entry.ErrorPhase = strings.TrimSpace(entry.ErrorPhase) + entry.ErrorType = strings.TrimSpace(entry.ErrorType) + if entry.ErrorPhase == "" { + entry.ErrorPhase = "internal" + } + if entry.ErrorType == "" { + entry.ErrorType = "api_error" + } + + // Sanitize + trim request body (errors only). + if len(rawRequestBody) > 0 { + sanitized, truncated, bytesLen := sanitizeAndTrimRequestBody(rawRequestBody, opsMaxStoredRequestBodyBytes) + if sanitized != "" { + entry.RequestBodyJSON = &sanitized + } + entry.RequestBodyTruncated = truncated + entry.RequestBodyBytes = &bytesLen + } + + // Sanitize + truncate error_body to avoid storing sensitive data. + if strings.TrimSpace(entry.ErrorBody) != "" { + sanitized, _ := sanitizeErrorBodyForStorage(entry.ErrorBody, opsMaxStoredErrorBodyBytes) + entry.ErrorBody = sanitized + } + + // Sanitize upstream error context if provided by gateway services. + if entry.UpstreamStatusCode != nil && *entry.UpstreamStatusCode <= 0 { + entry.UpstreamStatusCode = nil + } + if entry.UpstreamErrorMessage != nil { + msg := strings.TrimSpace(*entry.UpstreamErrorMessage) + msg = sanitizeUpstreamErrorMessage(msg) + msg = truncateString(msg, 2048) + if strings.TrimSpace(msg) == "" { + entry.UpstreamErrorMessage = nil + } else { + entry.UpstreamErrorMessage = &msg + } + } + if entry.UpstreamErrorDetail != nil { + detail := strings.TrimSpace(*entry.UpstreamErrorDetail) + if detail == "" { + entry.UpstreamErrorDetail = nil + } else { + sanitized, _ := sanitizeErrorBodyForStorage(detail, opsMaxStoredErrorBodyBytes) + if strings.TrimSpace(sanitized) == "" { + entry.UpstreamErrorDetail = nil + } else { + entry.UpstreamErrorDetail = &sanitized + } + } + } + + // Sanitize + serialize upstream error events list. + if len(entry.UpstreamErrors) > 0 { + const maxEvents = 32 + events := entry.UpstreamErrors + if len(events) > maxEvents { + events = events[len(events)-maxEvents:] + } + + sanitized := make([]*OpsUpstreamErrorEvent, 0, len(events)) + for _, ev := range events { + if ev == nil { + continue + } + out := *ev + + out.Platform = strings.TrimSpace(out.Platform) + out.UpstreamRequestID = truncateString(strings.TrimSpace(out.UpstreamRequestID), 128) + out.Kind = truncateString(strings.TrimSpace(out.Kind), 64) + + if out.AccountID < 0 { + out.AccountID = 0 + } + if out.UpstreamStatusCode < 0 { + out.UpstreamStatusCode = 0 + } + if out.AtUnixMs < 0 { + out.AtUnixMs = 0 + } + + msg := sanitizeUpstreamErrorMessage(strings.TrimSpace(out.Message)) + msg = truncateString(msg, 2048) + out.Message = msg + + detail := strings.TrimSpace(out.Detail) + if detail != "" { + // Keep upstream detail small; request bodies are not stored here, only upstream error payloads. + sanitizedDetail, _ := sanitizeErrorBodyForStorage(detail, opsMaxStoredErrorBodyBytes) + out.Detail = sanitizedDetail + } else { + out.Detail = "" + } + + out.UpstreamRequestBody = strings.TrimSpace(out.UpstreamRequestBody) + if out.UpstreamRequestBody != "" { + // Reuse the same sanitization/trimming strategy as request body storage. + // Keep it small so it is safe to persist in ops_error_logs JSON. + sanitized, truncated, _ := sanitizeAndTrimRequestBody([]byte(out.UpstreamRequestBody), 10*1024) + if sanitized != "" { + out.UpstreamRequestBody = sanitized + if truncated { + out.Kind = strings.TrimSpace(out.Kind) + if out.Kind == "" { + out.Kind = "upstream" + } + out.Kind = out.Kind + ":request_body_truncated" + } + } else { + out.UpstreamRequestBody = "" + } + } + + // Drop fully-empty events (can happen if only status code was known). + if out.UpstreamStatusCode == 0 && out.Message == "" && out.Detail == "" { + continue + } + + evCopy := out + sanitized = append(sanitized, &evCopy) + } + + entry.UpstreamErrorsJSON = marshalOpsUpstreamErrors(sanitized) + entry.UpstreamErrors = nil + } + + if _, err := s.opsRepo.InsertErrorLog(ctx, entry); err != nil { + // Never bubble up to gateway; best-effort logging. + log.Printf("[Ops] RecordError failed: %v", err) + return err + } + return nil +} + +func (s *OpsService) GetErrorLogs(ctx context.Context, filter *OpsErrorLogFilter) (*OpsErrorLogList, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return &OpsErrorLogList{Errors: []*OpsErrorLog{}, Total: 0, Page: 1, PageSize: 20}, nil + } + result, err := s.opsRepo.ListErrorLogs(ctx, filter) + if err != nil { + log.Printf("[Ops] GetErrorLogs failed: %v", err) + return nil, err + } + + return result, nil +} + +func (s *OpsService) GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLogDetail, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + detail, err := s.opsRepo.GetErrorLogByID(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + return nil, infraerrors.InternalServer("OPS_ERROR_LOAD_FAILED", "Failed to load ops error log").WithCause(err) + } + return detail, nil +} + +func (s *OpsService) ListRetryAttemptsByErrorID(ctx context.Context, errorID int64, limit int) ([]*OpsRetryAttempt, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if errorID <= 0 { + return nil, infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id") + } + items, err := s.opsRepo.ListRetryAttemptsByErrorID(ctx, errorID, limit) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []*OpsRetryAttempt{}, nil + } + return nil, infraerrors.InternalServer("OPS_RETRY_LIST_FAILED", "Failed to list retry attempts").WithCause(err) + } + return items, nil +} + +func (s *OpsService) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if errorID <= 0 { + return infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id") + } + // Best-effort ensure the error exists + if _, err := s.opsRepo.GetErrorLogByID(ctx, errorID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + return infraerrors.InternalServer("OPS_ERROR_LOAD_FAILED", "Failed to load ops error log").WithCause(err) + } + return s.opsRepo.UpdateErrorResolution(ctx, errorID, resolved, resolvedByUserID, resolvedRetryID, nil) +} + +func sanitizeAndTrimRequestBody(raw []byte, maxBytes int) (jsonString string, truncated bool, bytesLen int) { + bytesLen = len(raw) + if len(raw) == 0 { + return "", false, 0 + } + + var decoded any + if err := json.Unmarshal(raw, &decoded); err != nil { + // If it's not valid JSON, don't store (retry would not be reliable anyway). + return "", false, bytesLen + } + + decoded = redactSensitiveJSON(decoded) + + encoded, err := json.Marshal(decoded) + if err != nil { + return "", false, bytesLen + } + if len(encoded) <= maxBytes { + return string(encoded), false, bytesLen + } + + // Trim conversation history to keep the most recent context. + if root, ok := decoded.(map[string]any); ok { + if trimmed, ok := trimConversationArrays(root, maxBytes); ok { + encoded2, err2 := json.Marshal(trimmed) + if err2 == nil && len(encoded2) <= maxBytes { + return string(encoded2), true, bytesLen + } + // Fallthrough: keep shrinking. + decoded = trimmed + } + + essential := shrinkToEssentials(root) + encoded3, err3 := json.Marshal(essential) + if err3 == nil && len(encoded3) <= maxBytes { + return string(encoded3), true, bytesLen + } + } + + // Last resort: keep JSON shape but drop big fields. + // This avoids downstream code that expects certain top-level keys from crashing. + if root, ok := decoded.(map[string]any); ok { + placeholder := shallowCopyMap(root) + placeholder["request_body_truncated"] = true + + // Replace potentially huge arrays/strings, but keep the keys present. + for _, k := range []string{"messages", "contents", "input", "prompt"} { + if _, exists := placeholder[k]; exists { + placeholder[k] = []any{} + } + } + for _, k := range []string{"text"} { + if _, exists := placeholder[k]; exists { + placeholder[k] = "" + } + } + + encoded4, err4 := json.Marshal(placeholder) + if err4 == nil { + if len(encoded4) <= maxBytes { + return string(encoded4), true, bytesLen + } + } + } + + // Final fallback: minimal valid JSON. + encoded4, err4 := json.Marshal(map[string]any{"request_body_truncated": true}) + if err4 != nil { + return "", true, bytesLen + } + return string(encoded4), true, bytesLen +} + +func redactSensitiveJSON(v any) any { + switch t := v.(type) { + case map[string]any: + out := make(map[string]any, len(t)) + for k, vv := range t { + if isSensitiveKey(k) { + out[k] = "[REDACTED]" + continue + } + out[k] = redactSensitiveJSON(vv) + } + return out + case []any: + out := make([]any, 0, len(t)) + for _, vv := range t { + out = append(out, redactSensitiveJSON(vv)) + } + return out + default: + return v + } +} + +func isSensitiveKey(key string) bool { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "" { + return false + } + + // Exact matches (common credential fields). + switch k { + case "authorization", + "proxy-authorization", + "x-api-key", + "api_key", + "apikey", + "access_token", + "refresh_token", + "id_token", + "session_token", + "token", + "password", + "passwd", + "passphrase", + "secret", + "client_secret", + "private_key", + "jwt", + "signature", + "accesskeyid", + "secretaccesskey": + return true + } + + // Suffix matches. + for _, suffix := range []string{ + "_secret", + "_token", + "_id_token", + "_session_token", + "_password", + "_passwd", + "_passphrase", + "_key", + "secret_key", + "private_key", + } { + if strings.HasSuffix(k, suffix) { + return true + } + } + + // Substring matches (conservative, but errs on the side of privacy). + for _, sub := range []string{ + "secret", + "token", + "password", + "passwd", + "passphrase", + "privatekey", + "private_key", + "apikey", + "api_key", + "accesskeyid", + "secretaccesskey", + "bearer", + "cookie", + "credential", + "session", + "jwt", + "signature", + } { + if strings.Contains(k, sub) { + return true + } + } + + return false +} + +func trimConversationArrays(root map[string]any, maxBytes int) (map[string]any, bool) { + // Supported: anthropic/openai: messages; gemini: contents. + if out, ok := trimArrayField(root, "messages", maxBytes); ok { + return out, true + } + if out, ok := trimArrayField(root, "contents", maxBytes); ok { + return out, true + } + return root, false +} + +func trimArrayField(root map[string]any, field string, maxBytes int) (map[string]any, bool) { + raw, ok := root[field] + if !ok { + return nil, false + } + arr, ok := raw.([]any) + if !ok || len(arr) == 0 { + return nil, false + } + + // Keep at least the last message/content. Use binary search so we don't marshal O(n) times. + // We are dropping from the *front* of the array (oldest context first). + lo := 0 + hi := len(arr) - 1 // inclusive; hi ensures at least one item remains + + var best map[string]any + found := false + + for lo <= hi { + mid := (lo + hi) / 2 + candidateArr := arr[mid:] + if len(candidateArr) == 0 { + lo = mid + 1 + continue + } + + next := shallowCopyMap(root) + next[field] = candidateArr + encoded, err := json.Marshal(next) + if err != nil { + // If marshal fails, try dropping more. + lo = mid + 1 + continue + } + + if len(encoded) <= maxBytes { + best = next + found = true + // Try to keep more context by dropping fewer items. + hi = mid - 1 + continue + } + + // Need to drop more. + lo = mid + 1 + } + + if found { + return best, true + } + + // Nothing fit (even with only one element); return the smallest slice and let the + // caller fall back to shrinkToEssentials(). + next := shallowCopyMap(root) + next[field] = arr[len(arr)-1:] + return next, true +} + +func shrinkToEssentials(root map[string]any) map[string]any { + out := make(map[string]any) + for _, key := range []string{"model", "stream", "max_tokens", "temperature", "top_p", "top_k"} { + if v, ok := root[key]; ok { + out[key] = v + } + } + + // Keep only the last element of the conversation array. + if v, ok := root["messages"]; ok { + if arr, ok := v.([]any); ok && len(arr) > 0 { + out["messages"] = []any{arr[len(arr)-1]} + } + } + if v, ok := root["contents"]; ok { + if arr, ok := v.([]any); ok && len(arr) > 0 { + out["contents"] = []any{arr[len(arr)-1]} + } + } + return out +} + +func shallowCopyMap(m map[string]any) map[string]any { + out := make(map[string]any, len(m)) + for k, v := range m { + out[k] = v + } + return out +} + +func sanitizeErrorBodyForStorage(raw string, maxBytes int) (sanitized string, truncated bool) { + raw = strings.TrimSpace(raw) + if raw == "" { + return "", false + } + + // Prefer JSON-safe sanitization when possible. + if out, trunc, _ := sanitizeAndTrimRequestBody([]byte(raw), maxBytes); out != "" { + return out, trunc + } + + // Non-JSON: best-effort truncate. + if maxBytes > 0 && len(raw) > maxBytes { + return truncateString(raw, maxBytes), true + } + return raw, false +} diff --git a/backend/internal/service/ops_settings.go b/backend/internal/service/ops_settings.go new file mode 100644 index 00000000..a6a4a0d7 --- /dev/null +++ b/backend/internal/service/ops_settings.go @@ -0,0 +1,562 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "strings" + "time" +) + +const ( + opsAlertEvaluatorLeaderLockKeyDefault = "ops:alert:evaluator:leader" + opsAlertEvaluatorLeaderLockTTLDefault = 30 * time.Second +) + +// ========================= +// Email notification config +// ========================= + +func (s *OpsService) GetEmailNotificationConfig(ctx context.Context) (*OpsEmailNotificationConfig, error) { + defaultCfg := defaultOpsEmailNotificationConfig() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsEmailNotificationConfig) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + // Initialize defaults on first read (best-effort). + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsEmailNotificationConfig, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsEmailNotificationConfig{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + // Corrupted JSON should not break ops UI; fall back to defaults. + return defaultCfg, nil + } + normalizeOpsEmailNotificationConfig(cfg) + return cfg, nil +} + +func (s *OpsService) UpdateEmailNotificationConfig(ctx context.Context, req *OpsEmailNotificationConfigUpdateRequest) (*OpsEmailNotificationConfig, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if req == nil { + return nil, errors.New("invalid request") + } + + cfg, err := s.GetEmailNotificationConfig(ctx) + if err != nil { + return nil, err + } + + if req.Alert != nil { + cfg.Alert.Enabled = req.Alert.Enabled + if req.Alert.Recipients != nil { + cfg.Alert.Recipients = req.Alert.Recipients + } + cfg.Alert.MinSeverity = strings.TrimSpace(req.Alert.MinSeverity) + cfg.Alert.RateLimitPerHour = req.Alert.RateLimitPerHour + cfg.Alert.BatchingWindowSeconds = req.Alert.BatchingWindowSeconds + cfg.Alert.IncludeResolvedAlerts = req.Alert.IncludeResolvedAlerts + } + + if req.Report != nil { + cfg.Report.Enabled = req.Report.Enabled + if req.Report.Recipients != nil { + cfg.Report.Recipients = req.Report.Recipients + } + cfg.Report.DailySummaryEnabled = req.Report.DailySummaryEnabled + cfg.Report.DailySummarySchedule = strings.TrimSpace(req.Report.DailySummarySchedule) + cfg.Report.WeeklySummaryEnabled = req.Report.WeeklySummaryEnabled + cfg.Report.WeeklySummarySchedule = strings.TrimSpace(req.Report.WeeklySummarySchedule) + cfg.Report.ErrorDigestEnabled = req.Report.ErrorDigestEnabled + cfg.Report.ErrorDigestSchedule = strings.TrimSpace(req.Report.ErrorDigestSchedule) + cfg.Report.ErrorDigestMinCount = req.Report.ErrorDigestMinCount + cfg.Report.AccountHealthEnabled = req.Report.AccountHealthEnabled + cfg.Report.AccountHealthSchedule = strings.TrimSpace(req.Report.AccountHealthSchedule) + cfg.Report.AccountHealthErrorRateThreshold = req.Report.AccountHealthErrorRateThreshold + } + + if err := validateOpsEmailNotificationConfig(cfg); err != nil { + return nil, err + } + + normalizeOpsEmailNotificationConfig(cfg) + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsEmailNotificationConfig, string(raw)); err != nil { + return nil, err + } + return cfg, nil +} + +func defaultOpsEmailNotificationConfig() *OpsEmailNotificationConfig { + return &OpsEmailNotificationConfig{ + Alert: OpsEmailAlertConfig{ + Enabled: true, + Recipients: []string{}, + MinSeverity: "", + RateLimitPerHour: 0, + BatchingWindowSeconds: 0, + IncludeResolvedAlerts: false, + }, + Report: OpsEmailReportConfig{ + Enabled: false, + Recipients: []string{}, + DailySummaryEnabled: false, + DailySummarySchedule: "0 9 * * *", + WeeklySummaryEnabled: false, + WeeklySummarySchedule: "0 9 * * 1", + ErrorDigestEnabled: false, + ErrorDigestSchedule: "0 9 * * *", + ErrorDigestMinCount: 10, + AccountHealthEnabled: false, + AccountHealthSchedule: "0 9 * * *", + AccountHealthErrorRateThreshold: 10.0, + }, + } +} + +func normalizeOpsEmailNotificationConfig(cfg *OpsEmailNotificationConfig) { + if cfg == nil { + return + } + if cfg.Alert.Recipients == nil { + cfg.Alert.Recipients = []string{} + } + if cfg.Report.Recipients == nil { + cfg.Report.Recipients = []string{} + } + + cfg.Alert.MinSeverity = strings.TrimSpace(cfg.Alert.MinSeverity) + cfg.Report.DailySummarySchedule = strings.TrimSpace(cfg.Report.DailySummarySchedule) + cfg.Report.WeeklySummarySchedule = strings.TrimSpace(cfg.Report.WeeklySummarySchedule) + cfg.Report.ErrorDigestSchedule = strings.TrimSpace(cfg.Report.ErrorDigestSchedule) + cfg.Report.AccountHealthSchedule = strings.TrimSpace(cfg.Report.AccountHealthSchedule) + + // Fill missing schedules with defaults to avoid breaking cron logic if clients send empty strings. + if cfg.Report.DailySummarySchedule == "" { + cfg.Report.DailySummarySchedule = "0 9 * * *" + } + if cfg.Report.WeeklySummarySchedule == "" { + cfg.Report.WeeklySummarySchedule = "0 9 * * 1" + } + if cfg.Report.ErrorDigestSchedule == "" { + cfg.Report.ErrorDigestSchedule = "0 9 * * *" + } + if cfg.Report.AccountHealthSchedule == "" { + cfg.Report.AccountHealthSchedule = "0 9 * * *" + } +} + +func validateOpsEmailNotificationConfig(cfg *OpsEmailNotificationConfig) error { + if cfg == nil { + return errors.New("invalid config") + } + + if cfg.Alert.RateLimitPerHour < 0 { + return errors.New("alert.rate_limit_per_hour must be >= 0") + } + if cfg.Alert.BatchingWindowSeconds < 0 { + return errors.New("alert.batching_window_seconds must be >= 0") + } + switch strings.TrimSpace(cfg.Alert.MinSeverity) { + case "", "critical", "warning", "info": + default: + return errors.New("alert.min_severity must be one of: critical, warning, info, or empty") + } + + if cfg.Report.ErrorDigestMinCount < 0 { + return errors.New("report.error_digest_min_count must be >= 0") + } + if cfg.Report.AccountHealthErrorRateThreshold < 0 || cfg.Report.AccountHealthErrorRateThreshold > 100 { + return errors.New("report.account_health_error_rate_threshold must be between 0 and 100") + } + return nil +} + +// ========================= +// Alert runtime settings +// ========================= + +func defaultOpsAlertRuntimeSettings() *OpsAlertRuntimeSettings { + return &OpsAlertRuntimeSettings{ + EvaluationIntervalSeconds: 60, + DistributedLock: OpsDistributedLockSettings{ + Enabled: true, + Key: opsAlertEvaluatorLeaderLockKeyDefault, + TTLSeconds: int(opsAlertEvaluatorLeaderLockTTLDefault.Seconds()), + }, + Silencing: OpsAlertSilencingSettings{ + Enabled: false, + GlobalUntilRFC3339: "", + GlobalReason: "", + Entries: []OpsAlertSilenceEntry{}, + }, + } +} + +func normalizeOpsDistributedLockSettings(s *OpsDistributedLockSettings, defaultKey string, defaultTTLSeconds int) { + if s == nil { + return + } + s.Key = strings.TrimSpace(s.Key) + if s.Key == "" { + s.Key = defaultKey + } + if s.TTLSeconds <= 0 { + s.TTLSeconds = defaultTTLSeconds + } +} + +func normalizeOpsAlertSilencingSettings(s *OpsAlertSilencingSettings) { + if s == nil { + return + } + s.GlobalUntilRFC3339 = strings.TrimSpace(s.GlobalUntilRFC3339) + s.GlobalReason = strings.TrimSpace(s.GlobalReason) + if s.Entries == nil { + s.Entries = []OpsAlertSilenceEntry{} + } + for i := range s.Entries { + s.Entries[i].UntilRFC3339 = strings.TrimSpace(s.Entries[i].UntilRFC3339) + s.Entries[i].Reason = strings.TrimSpace(s.Entries[i].Reason) + } +} + +func validateOpsDistributedLockSettings(s OpsDistributedLockSettings) error { + if strings.TrimSpace(s.Key) == "" { + return errors.New("distributed_lock.key is required") + } + if s.TTLSeconds <= 0 || s.TTLSeconds > int((24*time.Hour).Seconds()) { + return errors.New("distributed_lock.ttl_seconds must be between 1 and 86400") + } + return nil +} + +func validateOpsAlertSilencingSettings(s OpsAlertSilencingSettings) error { + parse := func(raw string) error { + if strings.TrimSpace(raw) == "" { + return nil + } + if _, err := time.Parse(time.RFC3339, raw); err != nil { + return errors.New("silencing time must be RFC3339") + } + return nil + } + + if err := parse(s.GlobalUntilRFC3339); err != nil { + return err + } + for _, entry := range s.Entries { + if strings.TrimSpace(entry.UntilRFC3339) == "" { + return errors.New("silencing.entries.until_rfc3339 is required") + } + if _, err := time.Parse(time.RFC3339, entry.UntilRFC3339); err != nil { + return errors.New("silencing.entries.until_rfc3339 must be RFC3339") + } + } + return nil +} + +func (s *OpsService) GetOpsAlertRuntimeSettings(ctx context.Context) (*OpsAlertRuntimeSettings, error) { + defaultCfg := defaultOpsAlertRuntimeSettings() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsAlertRuntimeSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsAlertRuntimeSettings, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsAlertRuntimeSettings{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + if cfg.EvaluationIntervalSeconds <= 0 { + cfg.EvaluationIntervalSeconds = defaultCfg.EvaluationIntervalSeconds + } + normalizeOpsDistributedLockSettings(&cfg.DistributedLock, opsAlertEvaluatorLeaderLockKeyDefault, defaultCfg.DistributedLock.TTLSeconds) + normalizeOpsAlertSilencingSettings(&cfg.Silencing) + + return cfg, nil +} + +func (s *OpsService) UpdateOpsAlertRuntimeSettings(ctx context.Context, cfg *OpsAlertRuntimeSettings) (*OpsAlertRuntimeSettings, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + if cfg.EvaluationIntervalSeconds < 1 || cfg.EvaluationIntervalSeconds > int((24*time.Hour).Seconds()) { + return nil, errors.New("evaluation_interval_seconds must be between 1 and 86400") + } + if cfg.DistributedLock.Enabled { + if err := validateOpsDistributedLockSettings(cfg.DistributedLock); err != nil { + return nil, err + } + } + if cfg.Silencing.Enabled { + if err := validateOpsAlertSilencingSettings(cfg.Silencing); err != nil { + return nil, err + } + } + + defaultCfg := defaultOpsAlertRuntimeSettings() + normalizeOpsDistributedLockSettings(&cfg.DistributedLock, opsAlertEvaluatorLeaderLockKeyDefault, defaultCfg.DistributedLock.TTLSeconds) + normalizeOpsAlertSilencingSettings(&cfg.Silencing) + + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsAlertRuntimeSettings, string(raw)); err != nil { + return nil, err + } + + // Return a fresh copy (avoid callers holding pointers into internal slices that may be mutated). + updated := &OpsAlertRuntimeSettings{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} + +// ========================= +// Advanced settings +// ========================= + +func defaultOpsAdvancedSettings() *OpsAdvancedSettings { + return &OpsAdvancedSettings{ + DataRetention: OpsDataRetentionSettings{ + CleanupEnabled: false, + CleanupSchedule: "0 2 * * *", + ErrorLogRetentionDays: 30, + MinuteMetricsRetentionDays: 30, + HourlyMetricsRetentionDays: 30, + }, + Aggregation: OpsAggregationSettings{ + AggregationEnabled: false, + }, + IgnoreCountTokensErrors: false, + IgnoreContextCanceled: true, // Default to true - client disconnects are not errors + IgnoreNoAvailableAccounts: false, // Default to false - this is a real routing issue + AutoRefreshEnabled: false, + AutoRefreshIntervalSec: 30, + } +} + +func normalizeOpsAdvancedSettings(cfg *OpsAdvancedSettings) { + if cfg == nil { + return + } + cfg.DataRetention.CleanupSchedule = strings.TrimSpace(cfg.DataRetention.CleanupSchedule) + if cfg.DataRetention.CleanupSchedule == "" { + cfg.DataRetention.CleanupSchedule = "0 2 * * *" + } + if cfg.DataRetention.ErrorLogRetentionDays <= 0 { + cfg.DataRetention.ErrorLogRetentionDays = 30 + } + if cfg.DataRetention.MinuteMetricsRetentionDays <= 0 { + cfg.DataRetention.MinuteMetricsRetentionDays = 30 + } + if cfg.DataRetention.HourlyMetricsRetentionDays <= 0 { + cfg.DataRetention.HourlyMetricsRetentionDays = 30 + } + // Normalize auto refresh interval (default 30 seconds) + if cfg.AutoRefreshIntervalSec <= 0 { + cfg.AutoRefreshIntervalSec = 30 + } +} + +func validateOpsAdvancedSettings(cfg *OpsAdvancedSettings) error { + if cfg == nil { + return errors.New("invalid config") + } + if cfg.DataRetention.ErrorLogRetentionDays < 1 || cfg.DataRetention.ErrorLogRetentionDays > 365 { + return errors.New("error_log_retention_days must be between 1 and 365") + } + if cfg.DataRetention.MinuteMetricsRetentionDays < 1 || cfg.DataRetention.MinuteMetricsRetentionDays > 365 { + return errors.New("minute_metrics_retention_days must be between 1 and 365") + } + if cfg.DataRetention.HourlyMetricsRetentionDays < 1 || cfg.DataRetention.HourlyMetricsRetentionDays > 365 { + return errors.New("hourly_metrics_retention_days must be between 1 and 365") + } + if cfg.AutoRefreshIntervalSec < 15 || cfg.AutoRefreshIntervalSec > 300 { + return errors.New("auto_refresh_interval_seconds must be between 15 and 300") + } + return nil +} + +func (s *OpsService) GetOpsAdvancedSettings(ctx context.Context) (*OpsAdvancedSettings, error) { + defaultCfg := defaultOpsAdvancedSettings() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsAdvancedSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsAdvancedSettings, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsAdvancedSettings{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + normalizeOpsAdvancedSettings(cfg) + return cfg, nil +} + +func (s *OpsService) UpdateOpsAdvancedSettings(ctx context.Context, cfg *OpsAdvancedSettings) (*OpsAdvancedSettings, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + if err := validateOpsAdvancedSettings(cfg); err != nil { + return nil, err + } + + normalizeOpsAdvancedSettings(cfg) + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsAdvancedSettings, string(raw)); err != nil { + return nil, err + } + + updated := &OpsAdvancedSettings{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} + +// ========================= +// Metric thresholds +// ========================= + +const SettingKeyOpsMetricThresholds = "ops_metric_thresholds" + +func defaultOpsMetricThresholds() *OpsMetricThresholds { + slaMin := 99.5 + ttftMax := 500.0 + reqErrMax := 5.0 + upstreamErrMax := 5.0 + return &OpsMetricThresholds{ + SLAPercentMin: &slaMin, + TTFTp99MsMax: &ttftMax, + RequestErrorRatePercentMax: &reqErrMax, + UpstreamErrorRatePercentMax: &upstreamErrMax, + } +} + +func (s *OpsService) GetMetricThresholds(ctx context.Context) (*OpsMetricThresholds, error) { + defaultCfg := defaultOpsMetricThresholds() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMetricThresholds) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsMetricThresholds, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsMetricThresholds{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + return cfg, nil +} + +func (s *OpsService) UpdateMetricThresholds(ctx context.Context, cfg *OpsMetricThresholds) (*OpsMetricThresholds, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + // Validate thresholds + if cfg.SLAPercentMin != nil && (*cfg.SLAPercentMin < 0 || *cfg.SLAPercentMin > 100) { + return nil, errors.New("sla_percent_min must be between 0 and 100") + } + if cfg.TTFTp99MsMax != nil && *cfg.TTFTp99MsMax < 0 { + return nil, errors.New("ttft_p99_ms_max must be >= 0") + } + if cfg.RequestErrorRatePercentMax != nil && (*cfg.RequestErrorRatePercentMax < 0 || *cfg.RequestErrorRatePercentMax > 100) { + return nil, errors.New("request_error_rate_percent_max must be between 0 and 100") + } + if cfg.UpstreamErrorRatePercentMax != nil && (*cfg.UpstreamErrorRatePercentMax < 0 || *cfg.UpstreamErrorRatePercentMax > 100) { + return nil, errors.New("upstream_error_rate_percent_max must be between 0 and 100") + } + + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsMetricThresholds, string(raw)); err != nil { + return nil, err + } + + updated := &OpsMetricThresholds{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} diff --git a/backend/internal/service/ops_settings_models.go b/backend/internal/service/ops_settings_models.go new file mode 100644 index 00000000..df06f578 --- /dev/null +++ b/backend/internal/service/ops_settings_models.go @@ -0,0 +1,100 @@ +package service + +// Ops settings models stored in DB `settings` table (JSON blobs). + +type OpsEmailNotificationConfig struct { + Alert OpsEmailAlertConfig `json:"alert"` + Report OpsEmailReportConfig `json:"report"` +} + +type OpsEmailAlertConfig struct { + Enabled bool `json:"enabled"` + Recipients []string `json:"recipients"` + MinSeverity string `json:"min_severity"` + RateLimitPerHour int `json:"rate_limit_per_hour"` + BatchingWindowSeconds int `json:"batching_window_seconds"` + IncludeResolvedAlerts bool `json:"include_resolved_alerts"` +} + +type OpsEmailReportConfig struct { + Enabled bool `json:"enabled"` + Recipients []string `json:"recipients"` + DailySummaryEnabled bool `json:"daily_summary_enabled"` + DailySummarySchedule string `json:"daily_summary_schedule"` + WeeklySummaryEnabled bool `json:"weekly_summary_enabled"` + WeeklySummarySchedule string `json:"weekly_summary_schedule"` + ErrorDigestEnabled bool `json:"error_digest_enabled"` + ErrorDigestSchedule string `json:"error_digest_schedule"` + ErrorDigestMinCount int `json:"error_digest_min_count"` + AccountHealthEnabled bool `json:"account_health_enabled"` + AccountHealthSchedule string `json:"account_health_schedule"` + AccountHealthErrorRateThreshold float64 `json:"account_health_error_rate_threshold"` +} + +// OpsEmailNotificationConfigUpdateRequest allows partial updates, while the +// frontend can still send the full config shape. +type OpsEmailNotificationConfigUpdateRequest struct { + Alert *OpsEmailAlertConfig `json:"alert"` + Report *OpsEmailReportConfig `json:"report"` +} + +type OpsDistributedLockSettings struct { + Enabled bool `json:"enabled"` + Key string `json:"key"` + TTLSeconds int `json:"ttl_seconds"` +} + +type OpsAlertSilenceEntry struct { + RuleID *int64 `json:"rule_id,omitempty"` + Severities []string `json:"severities,omitempty"` + + UntilRFC3339 string `json:"until_rfc3339"` + Reason string `json:"reason"` +} + +type OpsAlertSilencingSettings struct { + Enabled bool `json:"enabled"` + + GlobalUntilRFC3339 string `json:"global_until_rfc3339"` + GlobalReason string `json:"global_reason"` + + Entries []OpsAlertSilenceEntry `json:"entries,omitempty"` +} + +type OpsMetricThresholds struct { + SLAPercentMin *float64 `json:"sla_percent_min,omitempty"` // SLA低于此值变红 + TTFTp99MsMax *float64 `json:"ttft_p99_ms_max,omitempty"` // TTFT P99高于此值变红 + RequestErrorRatePercentMax *float64 `json:"request_error_rate_percent_max,omitempty"` // 请求错误率高于此值变红 + UpstreamErrorRatePercentMax *float64 `json:"upstream_error_rate_percent_max,omitempty"` // 上游错误率高于此值变红 +} + +type OpsAlertRuntimeSettings struct { + EvaluationIntervalSeconds int `json:"evaluation_interval_seconds"` + + DistributedLock OpsDistributedLockSettings `json:"distributed_lock"` + Silencing OpsAlertSilencingSettings `json:"silencing"` + Thresholds OpsMetricThresholds `json:"thresholds"` // 指标阈值配置 +} + +// OpsAdvancedSettings stores advanced ops configuration (data retention, aggregation). +type OpsAdvancedSettings struct { + DataRetention OpsDataRetentionSettings `json:"data_retention"` + Aggregation OpsAggregationSettings `json:"aggregation"` + IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"` + IgnoreContextCanceled bool `json:"ignore_context_canceled"` + IgnoreNoAvailableAccounts bool `json:"ignore_no_available_accounts"` + AutoRefreshEnabled bool `json:"auto_refresh_enabled"` + AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"` +} + +type OpsDataRetentionSettings struct { + CleanupEnabled bool `json:"cleanup_enabled"` + CleanupSchedule string `json:"cleanup_schedule"` + ErrorLogRetentionDays int `json:"error_log_retention_days"` + MinuteMetricsRetentionDays int `json:"minute_metrics_retention_days"` + HourlyMetricsRetentionDays int `json:"hourly_metrics_retention_days"` +} + +type OpsAggregationSettings struct { + AggregationEnabled bool `json:"aggregation_enabled"` +} diff --git a/backend/internal/service/ops_trend_models.go b/backend/internal/service/ops_trend_models.go new file mode 100644 index 00000000..f6d07c14 --- /dev/null +++ b/backend/internal/service/ops_trend_models.go @@ -0,0 +1,65 @@ +package service + +import "time" + +type OpsThroughputTrendPoint struct { + BucketStart time.Time `json:"bucket_start"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` + QPS float64 `json:"qps"` + TPS float64 `json:"tps"` +} + +type OpsThroughputPlatformBreakdownItem struct { + Platform string `json:"platform"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` +} + +type OpsThroughputGroupBreakdownItem struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` +} + +type OpsThroughputTrendResponse struct { + Bucket string `json:"bucket"` + + Points []*OpsThroughputTrendPoint `json:"points"` + + // Optional drilldown helpers: + // - When no platform/group is selected: returns totals by platform. + // - When platform is selected but group is not: returns top groups in that platform. + ByPlatform []*OpsThroughputPlatformBreakdownItem `json:"by_platform,omitempty"` + TopGroups []*OpsThroughputGroupBreakdownItem `json:"top_groups,omitempty"` +} + +type OpsErrorTrendPoint struct { + BucketStart time.Time `json:"bucket_start"` + + ErrorCountTotal int64 `json:"error_count_total"` + BusinessLimitedCount int64 `json:"business_limited_count"` + ErrorCountSLA int64 `json:"error_count_sla"` + + UpstreamErrorCountExcl429529 int64 `json:"upstream_error_count_excl_429_529"` + Upstream429Count int64 `json:"upstream_429_count"` + Upstream529Count int64 `json:"upstream_529_count"` +} + +type OpsErrorTrendResponse struct { + Bucket string `json:"bucket"` + Points []*OpsErrorTrendPoint `json:"points"` +} + +type OpsErrorDistributionItem struct { + StatusCode int `json:"status_code"` + Total int64 `json:"total"` + SLA int64 `json:"sla"` + BusinessLimited int64 `json:"business_limited"` +} + +type OpsErrorDistributionResponse struct { + Total int64 `json:"total"` + Items []*OpsErrorDistributionItem `json:"items"` +} diff --git a/backend/internal/service/ops_trends.go b/backend/internal/service/ops_trends.go new file mode 100644 index 00000000..ec55c6ce --- /dev/null +++ b/backend/internal/service/ops_trends.go @@ -0,0 +1,26 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetThroughputTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsThroughputTrendResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetThroughputTrend(ctx, filter, bucketSeconds) +} diff --git a/backend/internal/service/ops_upstream_context.go b/backend/internal/service/ops_upstream_context.go new file mode 100644 index 00000000..96bcc9fe --- /dev/null +++ b/backend/internal/service/ops_upstream_context.go @@ -0,0 +1,131 @@ +package service + +import ( + "encoding/json" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +// Gin context keys used by Ops error logger for capturing upstream error details. +// These keys are set by gateway services and consumed by handler/ops_error_logger.go. +const ( + OpsUpstreamStatusCodeKey = "ops_upstream_status_code" + OpsUpstreamErrorMessageKey = "ops_upstream_error_message" + OpsUpstreamErrorDetailKey = "ops_upstream_error_detail" + OpsUpstreamErrorsKey = "ops_upstream_errors" + + // Best-effort capture of the current upstream request body so ops can + // retry the specific upstream attempt (not just the client request). + // This value is sanitized+trimmed before being persisted. + OpsUpstreamRequestBodyKey = "ops_upstream_request_body" +) + +func setOpsUpstreamError(c *gin.Context, upstreamStatusCode int, upstreamMessage, upstreamDetail string) { + if c == nil { + return + } + if upstreamStatusCode > 0 { + c.Set(OpsUpstreamStatusCodeKey, upstreamStatusCode) + } + if msg := strings.TrimSpace(upstreamMessage); msg != "" { + c.Set(OpsUpstreamErrorMessageKey, msg) + } + if detail := strings.TrimSpace(upstreamDetail); detail != "" { + c.Set(OpsUpstreamErrorDetailKey, detail) + } +} + +// OpsUpstreamErrorEvent describes one upstream error attempt during a single gateway request. +// It is stored in ops_error_logs.upstream_errors as a JSON array. +type OpsUpstreamErrorEvent struct { + AtUnixMs int64 `json:"at_unix_ms,omitempty"` + + // Context + Platform string `json:"platform,omitempty"` + AccountID int64 `json:"account_id,omitempty"` + AccountName string `json:"account_name,omitempty"` + + // Outcome + UpstreamStatusCode int `json:"upstream_status_code,omitempty"` + UpstreamRequestID string `json:"upstream_request_id,omitempty"` + + // Best-effort upstream request capture (sanitized+trimmed). + // Required for retrying a specific upstream attempt. + UpstreamRequestBody string `json:"upstream_request_body,omitempty"` + + // Best-effort upstream response capture (sanitized+trimmed). + UpstreamResponseBody string `json:"upstream_response_body,omitempty"` + + // Kind: http_error | request_error | retry_exhausted | failover + Kind string `json:"kind,omitempty"` + + Message string `json:"message,omitempty"` + Detail string `json:"detail,omitempty"` +} + +func appendOpsUpstreamError(c *gin.Context, ev OpsUpstreamErrorEvent) { + if c == nil { + return + } + if ev.AtUnixMs <= 0 { + ev.AtUnixMs = time.Now().UnixMilli() + } + ev.Platform = strings.TrimSpace(ev.Platform) + ev.UpstreamRequestID = strings.TrimSpace(ev.UpstreamRequestID) + ev.UpstreamRequestBody = strings.TrimSpace(ev.UpstreamRequestBody) + ev.UpstreamResponseBody = strings.TrimSpace(ev.UpstreamResponseBody) + ev.Kind = strings.TrimSpace(ev.Kind) + ev.Message = strings.TrimSpace(ev.Message) + ev.Detail = strings.TrimSpace(ev.Detail) + if ev.Message != "" { + ev.Message = sanitizeUpstreamErrorMessage(ev.Message) + } + + // If the caller didn't explicitly pass upstream request body but the gateway + // stored it on the context, attach it so ops can retry this specific attempt. + if ev.UpstreamRequestBody == "" { + if v, ok := c.Get(OpsUpstreamRequestBodyKey); ok { + if s, ok := v.(string); ok { + ev.UpstreamRequestBody = strings.TrimSpace(s) + } + } + } + + var existing []*OpsUpstreamErrorEvent + if v, ok := c.Get(OpsUpstreamErrorsKey); ok { + if arr, ok := v.([]*OpsUpstreamErrorEvent); ok { + existing = arr + } + } + + evCopy := ev + existing = append(existing, &evCopy) + c.Set(OpsUpstreamErrorsKey, existing) +} + +func marshalOpsUpstreamErrors(events []*OpsUpstreamErrorEvent) *string { + if len(events) == 0 { + return nil + } + // Ensure we always store a valid JSON value. + raw, err := json.Marshal(events) + if err != nil || len(raw) == 0 { + return nil + } + s := string(raw) + return &s +} + +func ParseOpsUpstreamErrors(raw string) ([]*OpsUpstreamErrorEvent, error) { + raw = strings.TrimSpace(raw) + if raw == "" { + return []*OpsUpstreamErrorEvent{}, nil + } + var out []*OpsUpstreamErrorEvent + if err := json.Unmarshal([]byte(raw), &out); err != nil { + return nil, err + } + return out, nil +} diff --git a/backend/internal/service/ops_window_stats.go b/backend/internal/service/ops_window_stats.go new file mode 100644 index 00000000..71021d15 --- /dev/null +++ b/backend/internal/service/ops_window_stats.go @@ -0,0 +1,24 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// GetWindowStats returns lightweight request/token counts for the provided window. +// It is intended for realtime sampling (e.g. WebSocket QPS push) without computing percentiles/peaks. +func (s *OpsService) GetWindowStats(ctx context.Context, startTime, endTime time.Time) (*OpsWindowStats, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + filter := &OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + } + return s.opsRepo.GetWindowStats(ctx, filter) +} diff --git a/backend/internal/service/pricing_service.go b/backend/internal/service/pricing_service.go new file mode 100644 index 00000000..392fb65c --- /dev/null +++ b/backend/internal/service/pricing_service.go @@ -0,0 +1,745 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" +) + +var ( + openAIModelDatePattern = regexp.MustCompile(`-\d{8}$`) + openAIModelBasePattern = regexp.MustCompile(`^(gpt-\d+(?:\.\d+)?)(?:-|$)`) +) + +// LiteLLMModelPricing LiteLLM价格数据结构 +// 只保留我们需要的字段,使用指针来处理可能缺失的值 +type LiteLLMModelPricing struct { + InputCostPerToken float64 `json:"input_cost_per_token"` + OutputCostPerToken float64 `json:"output_cost_per_token"` + CacheCreationInputTokenCost float64 `json:"cache_creation_input_token_cost"` + CacheReadInputTokenCost float64 `json:"cache_read_input_token_cost"` + LiteLLMProvider string `json:"litellm_provider"` + Mode string `json:"mode"` + SupportsPromptCaching bool `json:"supports_prompt_caching"` + OutputCostPerImage float64 `json:"output_cost_per_image"` // 图片生成模型每张图片价格 +} + +// PricingRemoteClient 远程价格数据获取接口 +type PricingRemoteClient interface { + FetchPricingJSON(ctx context.Context, url string) ([]byte, error) + FetchHashText(ctx context.Context, url string) (string, error) +} + +// LiteLLMRawEntry 用于解析原始JSON数据 +type LiteLLMRawEntry struct { + InputCostPerToken *float64 `json:"input_cost_per_token"` + OutputCostPerToken *float64 `json:"output_cost_per_token"` + CacheCreationInputTokenCost *float64 `json:"cache_creation_input_token_cost"` + CacheReadInputTokenCost *float64 `json:"cache_read_input_token_cost"` + LiteLLMProvider string `json:"litellm_provider"` + Mode string `json:"mode"` + SupportsPromptCaching bool `json:"supports_prompt_caching"` + OutputCostPerImage *float64 `json:"output_cost_per_image"` +} + +// PricingService 动态价格服务 +type PricingService struct { + cfg *config.Config + remoteClient PricingRemoteClient + mu sync.RWMutex + pricingData map[string]*LiteLLMModelPricing + lastUpdated time.Time + localHash string + + // 停止信号 + stopCh chan struct{} + wg sync.WaitGroup +} + +// NewPricingService 创建价格服务 +func NewPricingService(cfg *config.Config, remoteClient PricingRemoteClient) *PricingService { + s := &PricingService{ + cfg: cfg, + remoteClient: remoteClient, + pricingData: make(map[string]*LiteLLMModelPricing), + stopCh: make(chan struct{}), + } + return s +} + +// Initialize 初始化价格服务 +func (s *PricingService) Initialize() error { + // 确保数据目录存在 + if err := os.MkdirAll(s.cfg.Pricing.DataDir, 0755); err != nil { + log.Printf("[Pricing] Failed to create data directory: %v", err) + } + + // 首次加载价格数据 + if err := s.checkAndUpdatePricing(); err != nil { + log.Printf("[Pricing] Initial load failed, using fallback: %v", err) + if err := s.useFallbackPricing(); err != nil { + return fmt.Errorf("failed to load pricing data: %w", err) + } + } + + // 启动定时更新 + s.startUpdateScheduler() + + log.Printf("[Pricing] Service initialized with %d models", len(s.pricingData)) + return nil +} + +// Stop 停止价格服务 +func (s *PricingService) Stop() { + close(s.stopCh) + s.wg.Wait() + log.Println("[Pricing] Service stopped") +} + +// startUpdateScheduler 启动定时更新调度器 +func (s *PricingService) startUpdateScheduler() { + // 定期检查哈希更新 + hashInterval := time.Duration(s.cfg.Pricing.HashCheckIntervalMinutes) * time.Minute + if hashInterval < time.Minute { + hashInterval = 10 * time.Minute + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + ticker := time.NewTicker(hashInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := s.syncWithRemote(); err != nil { + log.Printf("[Pricing] Sync failed: %v", err) + } + case <-s.stopCh: + return + } + } + }() + + log.Printf("[Pricing] Update scheduler started (check every %v)", hashInterval) +} + +// checkAndUpdatePricing 检查并更新价格数据 +func (s *PricingService) checkAndUpdatePricing() error { + pricingFile := s.getPricingFilePath() + + // 检查本地文件是否存在 + if _, err := os.Stat(pricingFile); os.IsNotExist(err) { + log.Println("[Pricing] Local pricing file not found, downloading...") + return s.downloadPricingData() + } + + // 检查文件是否过期 + info, err := os.Stat(pricingFile) + if err != nil { + return s.downloadPricingData() + } + + fileAge := time.Since(info.ModTime()) + maxAge := time.Duration(s.cfg.Pricing.UpdateIntervalHours) * time.Hour + + if fileAge > maxAge { + log.Printf("[Pricing] Local file is %v old, updating...", fileAge.Round(time.Hour)) + if err := s.downloadPricingData(); err != nil { + log.Printf("[Pricing] Download failed, using existing file: %v", err) + } + } + + // 加载本地文件 + return s.loadPricingData(pricingFile) +} + +// syncWithRemote 与远程同步(基于哈希校验) +func (s *PricingService) syncWithRemote() error { + pricingFile := s.getPricingFilePath() + + // 计算本地文件哈希 + localHash, err := s.computeFileHash(pricingFile) + if err != nil { + log.Printf("[Pricing] Failed to compute local hash: %v", err) + return s.downloadPricingData() + } + + // 如果配置了哈希URL,从远程获取哈希进行比对 + if s.cfg.Pricing.HashURL != "" { + remoteHash, err := s.fetchRemoteHash() + if err != nil { + log.Printf("[Pricing] Failed to fetch remote hash: %v", err) + return nil // 哈希获取失败不影响正常使用 + } + + if remoteHash != localHash { + log.Println("[Pricing] Remote hash differs, downloading new version...") + return s.downloadPricingData() + } + log.Println("[Pricing] Hash check passed, no update needed") + return nil + } + + // 没有哈希URL时,基于时间检查 + info, err := os.Stat(pricingFile) + if err != nil { + return s.downloadPricingData() + } + + fileAge := time.Since(info.ModTime()) + maxAge := time.Duration(s.cfg.Pricing.UpdateIntervalHours) * time.Hour + + if fileAge > maxAge { + log.Printf("[Pricing] File is %v old, downloading...", fileAge.Round(time.Hour)) + return s.downloadPricingData() + } + + return nil +} + +// downloadPricingData 从远程下载价格数据 +func (s *PricingService) downloadPricingData() error { + remoteURL, err := s.validatePricingURL(s.cfg.Pricing.RemoteURL) + if err != nil { + return err + } + log.Printf("[Pricing] Downloading from %s", remoteURL) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + var expectedHash string + if strings.TrimSpace(s.cfg.Pricing.HashURL) != "" { + expectedHash, err = s.fetchRemoteHash() + if err != nil { + return fmt.Errorf("fetch remote hash: %w", err) + } + } + + body, err := s.remoteClient.FetchPricingJSON(ctx, remoteURL) + if err != nil { + return fmt.Errorf("download failed: %w", err) + } + + if expectedHash != "" { + actualHash := sha256.Sum256(body) + if !strings.EqualFold(expectedHash, hex.EncodeToString(actualHash[:])) { + return fmt.Errorf("pricing hash mismatch") + } + } + + // 解析JSON数据(使用灵活的解析方式) + data, err := s.parsePricingData(body) + if err != nil { + return fmt.Errorf("parse pricing data: %w", err) + } + + // 保存到本地文件 + pricingFile := s.getPricingFilePath() + if err := os.WriteFile(pricingFile, body, 0644); err != nil { + log.Printf("[Pricing] Failed to save file: %v", err) + } + + // 保存哈希 + hash := sha256.Sum256(body) + hashStr := hex.EncodeToString(hash[:]) + hashFile := s.getHashFilePath() + if err := os.WriteFile(hashFile, []byte(hashStr+"\n"), 0644); err != nil { + log.Printf("[Pricing] Failed to save hash: %v", err) + } + + // 更新内存数据 + s.mu.Lock() + s.pricingData = data + s.lastUpdated = time.Now() + s.localHash = hashStr + s.mu.Unlock() + + log.Printf("[Pricing] Downloaded %d models successfully", len(data)) + return nil +} + +// parsePricingData 解析价格数据(处理各种格式) +func (s *PricingService) parsePricingData(body []byte) (map[string]*LiteLLMModelPricing, error) { + // 首先解析为 map[string]json.RawMessage + var rawData map[string]json.RawMessage + if err := json.Unmarshal(body, &rawData); err != nil { + return nil, fmt.Errorf("parse raw JSON: %w", err) + } + + result := make(map[string]*LiteLLMModelPricing) + skipped := 0 + + for modelName, rawEntry := range rawData { + // 跳过 sample_spec 等文档条目 + if modelName == "sample_spec" { + continue + } + + // 尝试解析每个条目 + var entry LiteLLMRawEntry + if err := json.Unmarshal(rawEntry, &entry); err != nil { + skipped++ + continue + } + + // 只保留有有效价格的条目 + if entry.InputCostPerToken == nil && entry.OutputCostPerToken == nil { + continue + } + + pricing := &LiteLLMModelPricing{ + LiteLLMProvider: entry.LiteLLMProvider, + Mode: entry.Mode, + SupportsPromptCaching: entry.SupportsPromptCaching, + } + + if entry.InputCostPerToken != nil { + pricing.InputCostPerToken = *entry.InputCostPerToken + } + if entry.OutputCostPerToken != nil { + pricing.OutputCostPerToken = *entry.OutputCostPerToken + } + if entry.CacheCreationInputTokenCost != nil { + pricing.CacheCreationInputTokenCost = *entry.CacheCreationInputTokenCost + } + if entry.CacheReadInputTokenCost != nil { + pricing.CacheReadInputTokenCost = *entry.CacheReadInputTokenCost + } + if entry.OutputCostPerImage != nil { + pricing.OutputCostPerImage = *entry.OutputCostPerImage + } + + result[modelName] = pricing + } + + if skipped > 0 { + log.Printf("[Pricing] Skipped %d invalid entries", skipped) + } + + if len(result) == 0 { + return nil, fmt.Errorf("no valid pricing entries found") + } + + return result, nil +} + +// loadPricingData 从本地文件加载价格数据 +func (s *PricingService) loadPricingData(filePath string) error { + data, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("read file failed: %w", err) + } + + // 使用灵活的解析方式 + pricingData, err := s.parsePricingData(data) + if err != nil { + return fmt.Errorf("parse pricing data: %w", err) + } + + // 计算哈希 + hash := sha256.Sum256(data) + hashStr := hex.EncodeToString(hash[:]) + + s.mu.Lock() + s.pricingData = pricingData + s.localHash = hashStr + + info, _ := os.Stat(filePath) + if info != nil { + s.lastUpdated = info.ModTime() + } else { + s.lastUpdated = time.Now() + } + s.mu.Unlock() + + log.Printf("[Pricing] Loaded %d models from %s", len(pricingData), filePath) + return nil +} + +// useFallbackPricing 使用回退价格文件 +func (s *PricingService) useFallbackPricing() error { + fallbackFile := s.cfg.Pricing.FallbackFile + + if _, err := os.Stat(fallbackFile); os.IsNotExist(err) { + return fmt.Errorf("fallback file not found: %s", fallbackFile) + } + + log.Printf("[Pricing] Using fallback file: %s", fallbackFile) + + // 复制到数据目录 + data, err := os.ReadFile(fallbackFile) + if err != nil { + return fmt.Errorf("read fallback failed: %w", err) + } + + pricingFile := s.getPricingFilePath() + if err := os.WriteFile(pricingFile, data, 0644); err != nil { + log.Printf("[Pricing] Failed to copy fallback: %v", err) + } + + return s.loadPricingData(fallbackFile) +} + +// fetchRemoteHash 从远程获取哈希值 +func (s *PricingService) fetchRemoteHash() (string, error) { + hashURL, err := s.validatePricingURL(s.cfg.Pricing.HashURL) + if err != nil { + return "", err + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + hash, err := s.remoteClient.FetchHashText(ctx, hashURL) + if err != nil { + return "", err + } + return strings.TrimSpace(hash), nil +} + +func (s *PricingService) validatePricingURL(raw string) (string, error) { + if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { + normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) + if err != nil { + return "", fmt.Errorf("invalid pricing url: %w", err) + } + return normalized, nil + } + normalized, err := urlvalidator.ValidateHTTPSURL(raw, urlvalidator.ValidationOptions{ + AllowedHosts: s.cfg.Security.URLAllowlist.PricingHosts, + RequireAllowlist: true, + AllowPrivate: s.cfg.Security.URLAllowlist.AllowPrivateHosts, + }) + if err != nil { + return "", fmt.Errorf("invalid pricing url: %w", err) + } + return normalized, nil +} + +// computeFileHash 计算文件哈希 +func (s *PricingService) computeFileHash(filePath string) (string, error) { + data, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + hash := sha256.Sum256(data) + return hex.EncodeToString(hash[:]), nil +} + +// GetModelPricing 获取模型价格(带模糊匹配) +func (s *PricingService) GetModelPricing(modelName string) *LiteLLMModelPricing { + s.mu.RLock() + defer s.mu.RUnlock() + + if modelName == "" { + return nil + } + + // 标准化模型名称(同时兼容 "models/xxx"、VertexAI 资源名等前缀) + modelLower := strings.ToLower(strings.TrimSpace(modelName)) + lookupCandidates := s.buildModelLookupCandidates(modelLower) + + // 1. 精确匹配 + for _, candidate := range lookupCandidates { + if candidate == "" { + continue + } + if pricing, ok := s.pricingData[candidate]; ok { + return pricing + } + } + + // 2. 处理常见的模型名称变体 + // claude-opus-4-5-20251101 -> claude-opus-4.5-20251101 + for _, candidate := range lookupCandidates { + normalized := strings.ReplaceAll(candidate, "-4-5-", "-4.5-") + if pricing, ok := s.pricingData[normalized]; ok { + return pricing + } + } + + // 3. 尝试模糊匹配(去掉版本号后缀) + // claude-opus-4-5-20251101 -> claude-opus-4.5 + baseName := s.extractBaseName(lookupCandidates[0]) + for key, pricing := range s.pricingData { + keyBase := s.extractBaseName(strings.ToLower(key)) + if keyBase == baseName { + return pricing + } + } + + // 4. 基于模型系列匹配(Claude) + if pricing := s.matchByModelFamily(lookupCandidates[0]); pricing != nil { + return pricing + } + + // 5. OpenAI 模型回退策略 + if strings.HasPrefix(lookupCandidates[0], "gpt-") { + return s.matchOpenAIModel(lookupCandidates[0]) + } + + return nil +} + +func (s *PricingService) buildModelLookupCandidates(modelLower string) []string { + // Prefer canonical model name first (this also improves billing compatibility with "models/xxx"). + candidates := []string{ + normalizeModelNameForPricing(modelLower), + modelLower, + } + candidates = append(candidates, + strings.TrimPrefix(modelLower, "models/"), + lastSegment(modelLower), + lastSegment(strings.TrimPrefix(modelLower, "models/")), + ) + + seen := make(map[string]struct{}, len(candidates)) + out := make([]string, 0, len(candidates)) + for _, c := range candidates { + c = strings.TrimSpace(c) + if c == "" { + continue + } + if _, ok := seen[c]; ok { + continue + } + seen[c] = struct{}{} + out = append(out, c) + } + if len(out) == 0 { + return []string{modelLower} + } + return out +} + +func normalizeModelNameForPricing(model string) string { + // Common Gemini/VertexAI forms: + // - models/gemini-2.0-flash-exp + // - publishers/google/models/gemini-1.5-pro + // - projects/.../locations/.../publishers/google/models/gemini-1.5-pro + model = strings.TrimSpace(model) + model = strings.TrimLeft(model, "/") + model = strings.TrimPrefix(model, "models/") + model = strings.TrimPrefix(model, "publishers/google/models/") + + if idx := strings.LastIndex(model, "/publishers/google/models/"); idx != -1 { + model = model[idx+len("/publishers/google/models/"):] + } + if idx := strings.LastIndex(model, "/models/"); idx != -1 { + model = model[idx+len("/models/"):] + } + + model = strings.TrimLeft(model, "/") + return model +} + +func lastSegment(model string) string { + if idx := strings.LastIndex(model, "/"); idx != -1 { + return model[idx+1:] + } + return model +} + +// extractBaseName 提取基础模型名称(去掉日期版本号) +func (s *PricingService) extractBaseName(model string) string { + // 移除日期后缀 (如 -20251101, -20241022) + parts := strings.Split(model, "-") + result := make([]string, 0, len(parts)) + for _, part := range parts { + // 跳过看起来像日期的部分(8位数字) + if len(part) == 8 && isNumeric(part) { + continue + } + // 跳过版本号(如 v1:0) + if strings.Contains(part, ":") { + continue + } + result = append(result, part) + } + return strings.Join(result, "-") +} + +// matchByModelFamily 基于模型系列匹配 +func (s *PricingService) matchByModelFamily(model string) *LiteLLMModelPricing { + // Claude模型系列匹配规则 + familyPatterns := map[string][]string{ + "opus-4.5": {"claude-opus-4.5", "claude-opus-4-5"}, + "opus-4": {"claude-opus-4", "claude-3-opus"}, + "sonnet-4.5": {"claude-sonnet-4.5", "claude-sonnet-4-5"}, + "sonnet-4": {"claude-sonnet-4", "claude-3-5-sonnet"}, + "sonnet-3.5": {"claude-3-5-sonnet", "claude-3.5-sonnet"}, + "sonnet-3": {"claude-3-sonnet"}, + "haiku-3.5": {"claude-3-5-haiku", "claude-3.5-haiku"}, + "haiku-3": {"claude-3-haiku"}, + } + + // 确定模型属于哪个系列 + var matchedFamily string + for family, patterns := range familyPatterns { + for _, pattern := range patterns { + if strings.Contains(model, pattern) || strings.Contains(model, strings.ReplaceAll(pattern, "-", "")) { + matchedFamily = family + break + } + } + if matchedFamily != "" { + break + } + } + + if matchedFamily == "" { + // 简单的系列匹配 + if strings.Contains(model, "opus") { + if strings.Contains(model, "4.5") || strings.Contains(model, "4-5") { + matchedFamily = "opus-4.5" + } else { + matchedFamily = "opus-4" + } + } else if strings.Contains(model, "sonnet") { + if strings.Contains(model, "4.5") || strings.Contains(model, "4-5") { + matchedFamily = "sonnet-4.5" + } else if strings.Contains(model, "3-5") || strings.Contains(model, "3.5") { + matchedFamily = "sonnet-3.5" + } else { + matchedFamily = "sonnet-4" + } + } else if strings.Contains(model, "haiku") { + if strings.Contains(model, "3-5") || strings.Contains(model, "3.5") { + matchedFamily = "haiku-3.5" + } else { + matchedFamily = "haiku-3" + } + } + } + + if matchedFamily == "" { + return nil + } + + // 在价格数据中查找该系列的模型 + patterns := familyPatterns[matchedFamily] + for _, pattern := range patterns { + for key, pricing := range s.pricingData { + keyLower := strings.ToLower(key) + if strings.Contains(keyLower, pattern) { + log.Printf("[Pricing] Fuzzy matched %s -> %s", model, key) + return pricing + } + } + } + + return nil +} + +// matchOpenAIModel OpenAI 模型回退匹配策略 +// 回退顺序: +// 1. gpt-5.2-codex -> gpt-5.2(去掉后缀如 -codex, -mini, -max 等) +// 2. gpt-5.2-20251222 -> gpt-5.2(去掉日期版本号) +// 3. 最终回退到 DefaultTestModel (gpt-5.1-codex) +func (s *PricingService) matchOpenAIModel(model string) *LiteLLMModelPricing { + // 尝试的回退变体 + variants := s.generateOpenAIModelVariants(model, openAIModelDatePattern) + + for _, variant := range variants { + if pricing, ok := s.pricingData[variant]; ok { + log.Printf("[Pricing] OpenAI fallback matched %s -> %s", model, variant) + return pricing + } + } + + // 最终回退到 DefaultTestModel + defaultModel := strings.ToLower(openai.DefaultTestModel) + if pricing, ok := s.pricingData[defaultModel]; ok { + log.Printf("[Pricing] OpenAI fallback to default model %s -> %s", model, defaultModel) + return pricing + } + + return nil +} + +// generateOpenAIModelVariants 生成 OpenAI 模型的回退变体列表 +func (s *PricingService) generateOpenAIModelVariants(model string, datePattern *regexp.Regexp) []string { + seen := make(map[string]bool) + var variants []string + + addVariant := func(v string) { + if v != model && !seen[v] { + seen[v] = true + variants = append(variants, v) + } + } + + // 1. 去掉日期版本号: gpt-5.2-20251222 -> gpt-5.2 + withoutDate := datePattern.ReplaceAllString(model, "") + if withoutDate != model { + addVariant(withoutDate) + } + + // 2. 提取基础版本号: gpt-5.2-codex -> gpt-5.2 + // 只匹配纯数字版本号格式 gpt-X 或 gpt-X.Y,不匹配 gpt-4o 这种带字母后缀的 + if matches := openAIModelBasePattern.FindStringSubmatch(model); len(matches) > 1 { + addVariant(matches[1]) + } + + // 3. 同时去掉日期后再提取基础版本号 + if withoutDate != model { + if matches := openAIModelBasePattern.FindStringSubmatch(withoutDate); len(matches) > 1 { + addVariant(matches[1]) + } + } + + return variants +} + +// GetStatus 获取服务状态 +func (s *PricingService) GetStatus() map[string]any { + s.mu.RLock() + defer s.mu.RUnlock() + + return map[string]any{ + "model_count": len(s.pricingData), + "last_updated": s.lastUpdated, + "local_hash": s.localHash[:min(8, len(s.localHash))], + } +} + +// ForceUpdate 强制更新 +func (s *PricingService) ForceUpdate() error { + return s.downloadPricingData() +} + +// getPricingFilePath 获取价格文件路径 +func (s *PricingService) getPricingFilePath() string { + return filepath.Join(s.cfg.Pricing.DataDir, "model_pricing.json") +} + +// getHashFilePath 获取哈希文件路径 +func (s *PricingService) getHashFilePath() string { + return filepath.Join(s.cfg.Pricing.DataDir, "model_pricing.sha256") +} + +// isNumeric 检查字符串是否为纯数字 +func isNumeric(s string) bool { + for _, c := range s { + if c < '0' || c > '9' { + return false + } + } + return true +} diff --git a/backend/internal/service/promo_code.go b/backend/internal/service/promo_code.go new file mode 100644 index 00000000..94e733a8 --- /dev/null +++ b/backend/internal/service/promo_code.go @@ -0,0 +1,73 @@ +package service + +import ( + "time" +) + +// PromoCode 注册优惠码 +type PromoCode struct { + ID int64 + Code string + BonusAmount float64 + MaxUses int + UsedCount int + Status string + ExpiresAt *time.Time + Notes string + CreatedAt time.Time + UpdatedAt time.Time + + // 关联 + UsageRecords []PromoCodeUsage +} + +// PromoCodeUsage 优惠码使用记录 +type PromoCodeUsage struct { + ID int64 + PromoCodeID int64 + UserID int64 + BonusAmount float64 + UsedAt time.Time + + // 关联 + PromoCode *PromoCode + User *User +} + +// CanUse 检查优惠码是否可用 +func (p *PromoCode) CanUse() bool { + if p.Status != PromoCodeStatusActive { + return false + } + if p.ExpiresAt != nil && time.Now().After(*p.ExpiresAt) { + return false + } + if p.MaxUses > 0 && p.UsedCount >= p.MaxUses { + return false + } + return true +} + +// IsExpired 检查是否已过期 +func (p *PromoCode) IsExpired() bool { + return p.ExpiresAt != nil && time.Now().After(*p.ExpiresAt) +} + +// CreatePromoCodeInput 创建优惠码输入 +type CreatePromoCodeInput struct { + Code string + BonusAmount float64 + MaxUses int + ExpiresAt *time.Time + Notes string +} + +// UpdatePromoCodeInput 更新优惠码输入 +type UpdatePromoCodeInput struct { + Code *string + BonusAmount *float64 + MaxUses *int + Status *string + ExpiresAt *time.Time + Notes *string +} diff --git a/backend/internal/service/promo_code_repository.go b/backend/internal/service/promo_code_repository.go new file mode 100644 index 00000000..f55f9a6b --- /dev/null +++ b/backend/internal/service/promo_code_repository.go @@ -0,0 +1,30 @@ +package service + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +// PromoCodeRepository 优惠码仓储接口 +type PromoCodeRepository interface { + // 基础 CRUD + Create(ctx context.Context, code *PromoCode) error + GetByID(ctx context.Context, id int64) (*PromoCode, error) + GetByCode(ctx context.Context, code string) (*PromoCode, error) + GetByCodeForUpdate(ctx context.Context, code string) (*PromoCode, error) // 带行锁的查询,用于并发控制 + Update(ctx context.Context, code *PromoCode) error + Delete(ctx context.Context, id int64) error + + // 列表查询 + List(ctx context.Context, params pagination.PaginationParams) ([]PromoCode, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, status, search string) ([]PromoCode, *pagination.PaginationResult, error) + + // 使用记录 + CreateUsage(ctx context.Context, usage *PromoCodeUsage) error + GetUsageByPromoCodeAndUser(ctx context.Context, promoCodeID, userID int64) (*PromoCodeUsage, error) + ListUsagesByPromoCode(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]PromoCodeUsage, *pagination.PaginationResult, error) + + // 计数操作 + IncrementUsedCount(ctx context.Context, id int64) error +} diff --git a/backend/internal/service/promo_service.go b/backend/internal/service/promo_service.go new file mode 100644 index 00000000..5ff63bdc --- /dev/null +++ b/backend/internal/service/promo_service.go @@ -0,0 +1,268 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrPromoCodeNotFound = infraerrors.NotFound("PROMO_CODE_NOT_FOUND", "promo code not found") + ErrPromoCodeExpired = infraerrors.BadRequest("PROMO_CODE_EXPIRED", "promo code has expired") + ErrPromoCodeDisabled = infraerrors.BadRequest("PROMO_CODE_DISABLED", "promo code is disabled") + ErrPromoCodeMaxUsed = infraerrors.BadRequest("PROMO_CODE_MAX_USED", "promo code has reached maximum uses") + ErrPromoCodeAlreadyUsed = infraerrors.Conflict("PROMO_CODE_ALREADY_USED", "you have already used this promo code") + ErrPromoCodeInvalid = infraerrors.BadRequest("PROMO_CODE_INVALID", "invalid promo code") +) + +// PromoService 优惠码服务 +type PromoService struct { + promoRepo PromoCodeRepository + userRepo UserRepository + billingCacheService *BillingCacheService + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewPromoService 创建优惠码服务实例 +func NewPromoService( + promoRepo PromoCodeRepository, + userRepo UserRepository, + billingCacheService *BillingCacheService, + entClient *dbent.Client, + authCacheInvalidator APIKeyAuthCacheInvalidator, +) *PromoService { + return &PromoService{ + promoRepo: promoRepo, + userRepo: userRepo, + billingCacheService: billingCacheService, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, + } +} + +// ValidatePromoCode 验证优惠码(注册前调用) +// 返回 nil, nil 表示空码(不报错) +func (s *PromoService) ValidatePromoCode(ctx context.Context, code string) (*PromoCode, error) { + code = strings.TrimSpace(code) + if code == "" { + return nil, nil // 空码不报错,直接返回 + } + + promoCode, err := s.promoRepo.GetByCode(ctx, code) + if err != nil { + // 保留原始错误类型,不要统一映射为 NotFound + return nil, err + } + + if err := s.validatePromoCodeStatus(promoCode); err != nil { + return nil, err + } + + return promoCode, nil +} + +// validatePromoCodeStatus 验证优惠码状态 +func (s *PromoService) validatePromoCodeStatus(promoCode *PromoCode) error { + if !promoCode.CanUse() { + if promoCode.IsExpired() { + return ErrPromoCodeExpired + } + if promoCode.Status == PromoCodeStatusDisabled { + return ErrPromoCodeDisabled + } + if promoCode.MaxUses > 0 && promoCode.UsedCount >= promoCode.MaxUses { + return ErrPromoCodeMaxUsed + } + return ErrPromoCodeInvalid + } + return nil +} + +// ApplyPromoCode 应用优惠码(注册成功后调用) +// 使用事务和行锁确保并发安全 +func (s *PromoService) ApplyPromoCode(ctx context.Context, userID int64, code string) error { + code = strings.TrimSpace(code) + if code == "" { + return nil + } + + // 开启事务 + tx, err := s.entClient.Tx(ctx) + if err != nil { + return fmt.Errorf("begin transaction: %w", err) + } + defer func() { _ = tx.Rollback() }() + + txCtx := dbent.NewTxContext(ctx, tx) + + // 在事务中获取并锁定优惠码记录(FOR UPDATE) + promoCode, err := s.promoRepo.GetByCodeForUpdate(txCtx, code) + if err != nil { + return err + } + + // 在事务中验证优惠码状态 + if err := s.validatePromoCodeStatus(promoCode); err != nil { + return err + } + + // 在事务中检查用户是否已使用过此优惠码 + existing, err := s.promoRepo.GetUsageByPromoCodeAndUser(txCtx, promoCode.ID, userID) + if err != nil { + return fmt.Errorf("check existing usage: %w", err) + } + if existing != nil { + return ErrPromoCodeAlreadyUsed + } + + // 增加用户余额 + if err := s.userRepo.UpdateBalance(txCtx, userID, promoCode.BonusAmount); err != nil { + return fmt.Errorf("update user balance: %w", err) + } + + // 创建使用记录 + usage := &PromoCodeUsage{ + PromoCodeID: promoCode.ID, + UserID: userID, + BonusAmount: promoCode.BonusAmount, + UsedAt: time.Now(), + } + if err := s.promoRepo.CreateUsage(txCtx, usage); err != nil { + return fmt.Errorf("create usage record: %w", err) + } + + // 增加使用次数 + if err := s.promoRepo.IncrementUsedCount(txCtx, promoCode.ID); err != nil { + return fmt.Errorf("increment used count: %w", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit transaction: %w", err) + } + + s.invalidatePromoCaches(ctx, userID, promoCode.BonusAmount) + + // 失效余额缓存 + if s.billingCacheService != nil { + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateUserBalance(cacheCtx, userID) + }() + } + + return nil +} + +func (s *PromoService) invalidatePromoCaches(ctx context.Context, userID int64, bonusAmount float64) { + if bonusAmount == 0 || s.authCacheInvalidator == nil { + return + } + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) +} + +// GenerateRandomCode 生成随机优惠码 +func (s *PromoService) GenerateRandomCode() (string, error) { + bytes := make([]byte, 8) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + return strings.ToUpper(hex.EncodeToString(bytes)), nil +} + +// Create 创建优惠码 +func (s *PromoService) Create(ctx context.Context, input *CreatePromoCodeInput) (*PromoCode, error) { + code := strings.TrimSpace(input.Code) + if code == "" { + // 自动生成 + var err error + code, err = s.GenerateRandomCode() + if err != nil { + return nil, err + } + } + + promoCode := &PromoCode{ + Code: strings.ToUpper(code), + BonusAmount: input.BonusAmount, + MaxUses: input.MaxUses, + UsedCount: 0, + Status: PromoCodeStatusActive, + ExpiresAt: input.ExpiresAt, + Notes: input.Notes, + } + + if err := s.promoRepo.Create(ctx, promoCode); err != nil { + return nil, fmt.Errorf("create promo code: %w", err) + } + + return promoCode, nil +} + +// GetByID 根据ID获取优惠码 +func (s *PromoService) GetByID(ctx context.Context, id int64) (*PromoCode, error) { + code, err := s.promoRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + return code, nil +} + +// Update 更新优惠码 +func (s *PromoService) Update(ctx context.Context, id int64, input *UpdatePromoCodeInput) (*PromoCode, error) { + promoCode, err := s.promoRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Code != nil { + promoCode.Code = strings.ToUpper(strings.TrimSpace(*input.Code)) + } + if input.BonusAmount != nil { + promoCode.BonusAmount = *input.BonusAmount + } + if input.MaxUses != nil { + promoCode.MaxUses = *input.MaxUses + } + if input.Status != nil { + promoCode.Status = *input.Status + } + if input.ExpiresAt != nil { + promoCode.ExpiresAt = input.ExpiresAt + } + if input.Notes != nil { + promoCode.Notes = *input.Notes + } + + if err := s.promoRepo.Update(ctx, promoCode); err != nil { + return nil, fmt.Errorf("update promo code: %w", err) + } + + return promoCode, nil +} + +// Delete 删除优惠码 +func (s *PromoService) Delete(ctx context.Context, id int64) error { + if err := s.promoRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete promo code: %w", err) + } + return nil +} + +// List 获取优惠码列表 +func (s *PromoService) List(ctx context.Context, params pagination.PaginationParams, status, search string) ([]PromoCode, *pagination.PaginationResult, error) { + return s.promoRepo.ListWithFilters(ctx, params, status, search) +} + +// ListUsages 获取使用记录 +func (s *PromoService) ListUsages(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]PromoCodeUsage, *pagination.PaginationResult, error) { + return s.promoRepo.ListUsagesByPromoCode(ctx, promoCodeID, params) +} diff --git a/backend/internal/service/prompts/codex_cli_instructions.md b/backend/internal/service/prompts/codex_cli_instructions.md new file mode 100644 index 00000000..4886c7ef --- /dev/null +++ b/backend/internal/service/prompts/codex_cli_instructions.md @@ -0,0 +1,275 @@ +You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful. + +Your capabilities: + +- Receive user prompts and other context provided by the harness, such as files in the workspace. +- Communicate with the user by streaming thinking & responses, and by making & updating plans. +- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section. + +Within this context, Codex refers to the open-source agentic coding interface (not the old Codex language model built by OpenAI). + +# How you work + +## Personality + +Your default personality and tone is concise, direct, and friendly. You communicate efficiently, always keeping the user clearly informed about ongoing actions without unnecessary detail. You always prioritize actionable guidance, clearly stating assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations about your work. + +# AGENTS.md spec +- Repos often contain AGENTS.md files. These files can appear anywhere within the repository. +- These files are a way for humans to give you (the agent) instructions or tips for working within the container. +- Some examples might be: coding conventions, info about how code is organized, or instructions for how to run or test code. +- Instructions in AGENTS.md files: + - The scope of an AGENTS.md file is the entire directory tree rooted at the folder that contains it. + - For every file you touch in the final patch, you must obey instructions in any AGENTS.md file whose scope includes that file. + - Instructions about code style, structure, naming, etc. apply only to code within the AGENTS.md file's scope, unless the file states otherwise. + - More-deeply-nested AGENTS.md files take precedence in the case of conflicting instructions. + - Direct system/developer/user instructions (as part of a prompt) take precedence over AGENTS.md instructions. +- The contents of the AGENTS.md file at the root of the repo and any directories from the CWD up to the root are included with the developer message and don't need to be re-read. When working in a subdirectory of CWD, or a directory outside the CWD, check for any AGENTS.md files that may be applicable. + +## Responsiveness + +### Preamble messages + +Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples: + +- **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each. +- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates). +- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions. +- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging. +- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action. + +**Examples:** + +- “I’ve explored the repo; now checking the API route definitions.” +- “Next, I’ll patch the config and update the related tests.” +- “I’m about to scaffold the CLI commands and helper functions.” +- “Ok cool, so I’ve wrapped my head around the repo. Now digging into the API routes.” +- “Config’s looking tidy. Next up is patching helpers to keep things in sync.” +- “Finished poking at the DB gateway. I will now chase down error handling.” +- “Alright, build pipeline order is interesting. Checking how it reports failures.” +- “Spotted a clever caching util; now hunting where it gets used.” + +## Planning + +You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. + +Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately. + +Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step. + +Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so. + +Use a plan when: + +- The task is non-trivial and will require multiple actions over a long time horizon. +- There are logical phases or dependencies where sequencing matters. +- The work has ambiguity that benefits from outlining high-level goals. +- You want intermediate checkpoints for feedback and validation. +- When the user asked you to do more than one thing in a single prompt +- The user has asked you to use the plan tool (aka "TODOs") +- You generate additional steps while working, and plan to do them before yielding to the user + +### Examples + +**High-quality plans** + +Example 1: + +1. Add CLI entry with file args +2. Parse Markdown via CommonMark library +3. Apply semantic HTML template +4. Handle code blocks, images, links +5. Add error handling for invalid files + +Example 2: + +1. Define CSS variables for colors +2. Add toggle with localStorage state +3. Refactor components to use variables +4. Verify all views for readability +5. Add smooth theme-change transition + +Example 3: + +1. Set up Node.js + WebSocket server +2. Add join/leave broadcast events +3. Implement messaging with timestamps +4. Add usernames + mention highlighting +5. Persist messages in lightweight DB +6. Add typing indicators + unread count + +**Low-quality plans** + +Example 1: + +1. Create CLI tool +2. Add Markdown parser +3. Convert to HTML + +Example 2: + +1. Add dark mode toggle +2. Save preference +3. Make styles look good + +Example 3: + +1. Create single-file HTML game +2. Run quick sanity check +3. Summarize usage instructions + +If you need to write a plan, only write high quality plans, not low quality ones. + +## Task execution + +You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer. + +You MUST adhere to the following criteria when solving queries: + +- Working on the repo(s) in the current environment is allowed, even if they are proprietary. +- Analyzing code for vulnerabilities is allowed. +- Showing user code and tool call details is allowed. +- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]} + +If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines: + +- Fix the problem at the root cause rather than applying surface-level patches, when possible. +- Avoid unneeded complexity in your solution. +- Do not attempt to fix unrelated bugs or broken tests. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) +- Update documentation as necessary. +- Keep changes consistent with the style of the existing codebase. Changes should be minimal and focused on the task. +- Use `git log` and `git blame` to search the history of the codebase if additional context is required. +- NEVER add copyright or license headers unless specifically requested. +- Do not waste tokens by re-reading files after calling `apply_patch` on them. The tool call will fail if it didn't work. The same goes for making folders, deleting folders, etc. +- Do not `git commit` your changes or create new git branches unless explicitly requested. +- Do not add inline comments within code unless explicitly requested. +- Do not use one-letter variable names unless explicitly requested. +- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor. + +## Validating your work + +If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete. + +When testing, your philosophy should be to start as specific as possible to the code you changed so that you can catch issues efficiently, then make your way to broader tests as you build confidence. If there's no test for the code you changed, and if the adjacent patterns in the codebases show that there's a logical place for you to add a test, you may do so. However, do not add tests to codebases with no tests. + +Similarly, once you're confident in correctness, you can suggest or use formatting commands to ensure that your code is well formatted. If there are issues you can iterate up to 3 times to get formatting right, but if you still can't manage it's better to save the user time and present them a correct solution where you call out the formatting in your final message. If the codebase does not have a formatter configured, do not add one. + +For all of testing, running, building, and formatting, do not attempt to fix unrelated bugs. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) + +Be mindful of whether to run validation commands proactively. In the absence of behavioral guidance: + +- When running in non-interactive approval modes like **never** or **on-failure**, proactively run tests, lint and do whatever you need to ensure you've completed the task. +- When working in interactive approval modes like **untrusted**, or **on-request**, hold off on running tests or lint commands until the user is ready for you to finalize your output, because these commands take time to run and slow down iteration. Instead suggest what you want to do next, and let the user confirm first. +- When working on test-related tasks, such as adding tests, fixing tests, or reproducing a bug to verify behavior, you may proactively run tests regardless of approval mode. Use your judgement to decide whether this is a test-related task. + +## Ambition vs. precision + +For tasks that have no prior context (i.e. the user is starting something brand new), you should feel free to be ambitious and demonstrate creativity with your implementation. + +If you're operating in an existing codebase, you should make sure you do exactly what the user asks with surgical precision. Treat the surrounding codebase with respect, and don't overstep (i.e. changing filenames or variables unnecessarily). You should balance being sufficiently ambitious and proactive when completing tasks of this nature. + +You should use judicious initiative to decide on the right level of detail and complexity to deliver based on the user's needs. This means showing good judgment that you're capable of doing the right extras without gold-plating. This might be demonstrated by high-value, creative touches when scope of the task is vague; while being surgical and targeted when scope is tightly specified. + +## Sharing progress updates + +For especially longer tasks that you work on (i.e. requiring many tool calls, or a plan with multiple steps), you should provide progress updates back to the user at reasonable intervals. These updates should be structured as a concise sentence or two (no more than 8-10 words long) recapping progress so far in plain language: this update demonstrates your understanding of what needs to be done, progress so far (i.e. files explores, subtasks complete), and where you're going next. + +Before doing large chunks of work that may incur latency as experienced by the user (i.e. writing a new file), you should send a concise message to the user with an update indicating what you're about to do to ensure they know what you're spending time on. Don't start editing or writing large files before informing the user what you are doing and why. + +The messages you send before tool calls should describe what is immediately about to be done next in very concise language. If there was previous work done, this preamble message should also include a note about the work done so far to bring the user along. + +## Presenting your work and final message + +Your final message should read naturally, like an update from a concise teammate. For casual conversation, brainstorming tasks, or quick questions from the user, respond in a friendly, conversational tone. You should ask questions, suggest ideas, and adapt to the user’s style. If you've finished a large amount of work, when describing what you've done to the user, you should follow the final answer formatting guidelines to communicate substantive changes. You don't need to add structured formatting for one-word answers, greetings, or purely conversational exchanges. + +You can skip heavy formatting for single, simple actions or confirmations. In these cases, respond in plain sentences with any relevant next step or quick option. Reserve multi-section structured responses for results that need grouping or explanation. + +The user is working on the same computer as you, and has access to your work. As such there's no need to show the full contents of large files you have already written unless the user explicitly asks for them. Similarly, if you've created or modified files using `apply_patch`, there's no need to tell users to "save the file" or "copy the code into a file"—just reference the file path. + +If there's something that you think you could help with as a logical next step, concisely ask the user if they want you to do so. Good examples of this are running tests, committing changes, or building out the next logical component. If there’s something that you couldn't do (even with approval) but that the user might want to do (such as verifying changes by running the app), include those instructions succinctly. + +Brevity is very important as a default. You should be very concise (i.e. no more than 10 lines), but can relax this requirement for tasks where additional detail and comprehensiveness is important for the user's understanding. + +### Final answer structure and style guidelines + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +**Section Headers** + +- Use only when they improve clarity — they are not mandatory for every answer. +- Choose descriptive names that fit the content +- Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**` +- Leave no blank line before the first bullet under a header. +- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer. + +**Bullets** + +- Use `-` followed by a space for every bullet. +- Merge related points when possible; avoid a bullet for every trivial detail. +- Keep bullets to one line unless breaking for clarity is unavoidable. +- Group into short lists (4–6 bullets) ordered by importance. +- Use consistent keyword phrasing and formatting across sections. + +**Monospace** + +- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``). +- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command. +- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``). + +**File References** +When referencing files in your response, make sure to include the relevant start line and always follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 + +**Structure** + +- Place related bullets together; don’t mix unrelated concepts in the same section. +- Order sections from general → specific → supporting info. +- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it. +- Match structure to complexity: + - Multi-part or detailed results → use clear headers and grouped bullets. + - Simple results → minimal headers, possibly just a short list or paragraph. + +**Tone** + +- Keep the voice collaborative and natural, like a coding partner handing off work. +- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition +- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”). +- Keep descriptions self-contained; don’t refer to “above” or “below”. +- Use parallel structure in lists for consistency. + +**Don’t** + +- Don’t use literal words “bold” or “monospace” in the content. +- Don’t nest bullets or create deep hierarchies. +- Don’t output ANSI escape codes directly — the CLI renderer applies them. +- Don’t cram unrelated keywords into a single bullet; split for clarity. +- Don’t let keyword lists run long — wrap or reformat for scanability. + +Generally, ensure your final answers adapt their shape and depth to the request. For example, answers to code explanations should have a precise, structured explanation with code references that answer the question directly. For tasks with a simple implementation, lead with the outcome and supplement only with what’s needed for clarity. Larger changes can be presented as a logical walkthrough of your approach, grouping related steps, explaining rationale where it adds value, and highlighting next actions to accelerate the user. Your answers should provide the right level of detail while being easily scannable. + +For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting. + +# Tool Guidelines + +## Shell commands + +When using the shell, you must adhere to the following guidelines: + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) +- Do not use python scripts to attempt to output larger chunks of a file. + +## `update_plan` + +A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task. + +To create a new plan, call `update_plan` with a short list of 1‑sentence steps (no more than 5-7 words each) with a `status` for each step (`pending`, `in_progress`, or `completed`). + +When steps have been completed, use `update_plan` to mark each finished step as `completed` and the next step you are working on as `in_progress`. There should always be exactly one `in_progress` step until everything is done. You can mark multiple items as complete in a single `update_plan` call. + +If all steps are complete, ensure you call `update_plan` to mark all steps as `completed`. diff --git a/backend/internal/service/prompts/codex_opencode_bridge.txt b/backend/internal/service/prompts/codex_opencode_bridge.txt new file mode 100644 index 00000000..093aa0f2 --- /dev/null +++ b/backend/internal/service/prompts/codex_opencode_bridge.txt @@ -0,0 +1,122 @@ +# Codex Running in OpenCode + +You are running Codex through OpenCode, an open-source terminal coding assistant. OpenCode provides different tools but follows Codex operating principles. + +## CRITICAL: Tool Replacements + + +❌ APPLY_PATCH DOES NOT EXIST → ✅ USE "edit" INSTEAD +- NEVER use: apply_patch, applyPatch +- ALWAYS use: edit tool for ALL file modifications +- Before modifying files: Verify you're using "edit", NOT "apply_patch" + + + +❌ UPDATE_PLAN DOES NOT EXIST → ✅ USE "todowrite" INSTEAD +- NEVER use: update_plan, updatePlan, read_plan, readPlan +- ALWAYS use: todowrite for task/plan updates, todoread to read plans +- Before plan operations: Verify you're using "todowrite", NOT "update_plan" + + +## Available OpenCode Tools + +**File Operations:** +- `write` - Create new files + - Overwriting existing files requires a prior Read in this session; default to ASCII unless the file already uses Unicode. +- `edit` - Modify existing files (REPLACES apply_patch) + - Requires a prior Read in this session; preserve exact indentation; ensure `oldString` uniquely matches or use `replaceAll`; edit fails if ambiguous or missing. +- `read` - Read file contents + +**Search/Discovery:** +- `grep` - Search file contents (tool, not bash grep); use `include` to filter patterns; set `path` only when not searching workspace root; for cross-file match counts use bash with `rg`. +- `glob` - Find files by pattern; defaults to workspace cwd unless `path` is set. +- `list` - List directories (requires absolute paths) + +**Execution:** +- `bash` - Run shell commands + - No workdir parameter; do not include it in tool calls. + - Always include a short description for the command. + - Do not use cd; use absolute paths in commands. + - Quote paths containing spaces with double quotes. + - Chain multiple commands with ';' or '&&'; avoid newlines. + - Use Grep/Glob tools for searches; only use bash with `rg` when you need counts or advanced features. + - Do not use `ls`/`cat` in bash; use `list`/`read` tools instead. + - For deletions (rm), verify by listing parent dir with `list`. + +**Network:** +- `webfetch` - Fetch web content + - Use fully-formed URLs (http/https; http auto-upgrades to https). + - Always set `format` to one of: text | markdown | html; prefer markdown unless otherwise required. + - Read-only; short cache window. + +**Task Management:** +- `todowrite` - Manage tasks/plans (REPLACES update_plan) +- `todoread` - Read current plan + +## Substitution Rules + +Base instruction says: You MUST use instead: +apply_patch → edit +update_plan → todowrite +read_plan → todoread + +**Path Usage:** Use per-tool conventions to avoid conflicts: +- Tool calls: `read`, `edit`, `write`, `list` require absolute paths. +- Searches: `grep`/`glob` default to the workspace cwd; prefer relative include patterns; set `path` only when a different root is needed. +- Presentation: In assistant messages, show workspace-relative paths; use absolute paths only inside tool calls. +- Tool schema overrides general path preferences—do not convert required absolute paths to relative. + +## Verification Checklist + +Before file/plan modifications: +1. Am I using "edit" NOT "apply_patch"? +2. Am I using "todowrite" NOT "update_plan"? +3. Is this tool in the approved list above? +4. Am I following each tool's path requirements? + +If ANY answer is NO → STOP and correct before proceeding. + +## OpenCode Working Style + +**Communication:** +- Send brief preambles (8-12 words) before tool calls, building on prior context +- Provide progress updates during longer tasks + +**Execution:** +- Keep working autonomously until query is fully resolved before yielding +- Don't return to user with partial solutions + +**Code Approach:** +- New projects: Be ambitious and creative +- Existing codebases: Surgical precision - modify only what's requested unless explicitly instructed to do otherwise + +**Testing:** +- If tests exist: Start specific to your changes, then broader validation + +## Advanced Tools + +**Task Tool (Sub-Agents):** +- Use the Task tool (functions.task) to launch sub-agents +- Check the Task tool description for current agent types and their capabilities +- Useful for complex analysis, specialized workflows, or tasks requiring isolated context +- The agent list is dynamically generated - refer to tool schema for available agents + +**Parallelization:** +- When multiple independent tool calls are needed, use multi_tool_use.parallel to run them concurrently. +- Reserve sequential calls for ordered or data-dependent steps. + +**MCP Tools:** +- Model Context Protocol servers provide additional capabilities +- MCP tools are prefixed: `mcp____` +- Check your available tools for MCP integrations +- Use when the tool's functionality matches your task needs + +## What Remains from Codex + +Sandbox policies, approval mechanisms, final answer formatting, git commit protocols, and file reference formats all follow Codex instructions. In approval policy "never", never request escalations. + +## Approvals & Safety +- Assume workspace-write filesystem, network enabled, approval on-failure unless explicitly stated otherwise. +- When a command fails due to sandboxing or permissions, retry with escalated permissions if allowed by policy, including a one-line justification. +- Treat destructive commands (e.g., `rm`, `git reset --hard`) as requiring explicit user request or approval. +- When uncertain, prefer non-destructive verification first (e.g., confirm file existence with `list`, then delete with `bash`). \ No newline at end of file diff --git a/backend/internal/service/prompts/tool_remap_message.txt b/backend/internal/service/prompts/tool_remap_message.txt new file mode 100644 index 00000000..4ff986e1 --- /dev/null +++ b/backend/internal/service/prompts/tool_remap_message.txt @@ -0,0 +1,63 @@ + + +YOU ARE IN A DIFFERENT ENVIRONMENT. These instructions override ALL previous tool references. + + + + +❌ APPLY_PATCH DOES NOT EXIST → ✅ USE "edit" INSTEAD +- NEVER use: apply_patch, applyPatch +- ALWAYS use: edit tool for ALL file modifications +- Before modifying files: Verify you're using "edit", NOT "apply_patch" + + + +❌ UPDATE_PLAN DOES NOT EXIST → ✅ USE "todowrite" INSTEAD +- NEVER use: update_plan, updatePlan +- ALWAYS use: todowrite for ALL task/plan operations +- Use todoread to read current plan +- Before plan operations: Verify you're using "todowrite", NOT "update_plan" + + + + +File Operations: + • write - Create new files + • edit - Modify existing files (REPLACES apply_patch) + • patch - Apply diff patches + • read - Read file contents + +Search/Discovery: + • grep - Search file contents + • glob - Find files by pattern + • list - List directories (use relative paths) + +Execution: + • bash - Run shell commands + +Network: + • webfetch - Fetch web content + +Task Management: + • todowrite - Manage tasks/plans (REPLACES update_plan) + • todoread - Read current plan + + + +Base instruction says: You MUST use instead: +apply_patch → edit +update_plan → todowrite +read_plan → todoread +absolute paths → relative paths + + + +Before file/plan modifications: +1. Am I using "edit" NOT "apply_patch"? +2. Am I using "todowrite" NOT "update_plan"? +3. Is this tool in the approved list above? +4. Am I using relative paths? + +If ANY answer is NO → STOP and correct before proceeding. + + \ No newline at end of file diff --git a/backend/internal/service/proxy.go b/backend/internal/service/proxy.go new file mode 100644 index 00000000..7eb7728f --- /dev/null +++ b/backend/internal/service/proxy.go @@ -0,0 +1,51 @@ +package service + +import ( + "fmt" + "time" +) + +type Proxy struct { + ID int64 + Name string + Protocol string + Host string + Port int + Username string + Password string + Status string + CreatedAt time.Time + UpdatedAt time.Time +} + +func (p *Proxy) IsActive() bool { + return p.Status == StatusActive +} + +func (p *Proxy) URL() string { + if p.Username != "" && p.Password != "" { + return fmt.Sprintf("%s://%s:%s@%s:%d", p.Protocol, p.Username, p.Password, p.Host, p.Port) + } + return fmt.Sprintf("%s://%s:%d", p.Protocol, p.Host, p.Port) +} + +type ProxyWithAccountCount struct { + Proxy + AccountCount int64 + LatencyMs *int64 + LatencyStatus string + LatencyMessage string + IPAddress string + Country string + CountryCode string + Region string + City string +} + +type ProxyAccountSummary struct { + ID int64 + Name string + Platform string + Type string + Notes *string +} diff --git a/backend/internal/service/proxy_latency_cache.go b/backend/internal/service/proxy_latency_cache.go new file mode 100644 index 00000000..4a1cc77b --- /dev/null +++ b/backend/internal/service/proxy_latency_cache.go @@ -0,0 +1,23 @@ +package service + +import ( + "context" + "time" +) + +type ProxyLatencyInfo struct { + Success bool `json:"success"` + LatencyMs *int64 `json:"latency_ms,omitempty"` + Message string `json:"message,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` + Region string `json:"region,omitempty"` + City string `json:"city,omitempty"` + UpdatedAt time.Time `json:"updated_at"` +} + +type ProxyLatencyCache interface { + GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*ProxyLatencyInfo, error) + SetProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) error +} diff --git a/backend/internal/service/proxy_service.go b/backend/internal/service/proxy_service.go new file mode 100644 index 00000000..a5d897f6 --- /dev/null +++ b/backend/internal/service/proxy_service.go @@ -0,0 +1,193 @@ +package service + +import ( + "context" + "fmt" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrProxyNotFound = infraerrors.NotFound("PROXY_NOT_FOUND", "proxy not found") + ErrProxyInUse = infraerrors.Conflict("PROXY_IN_USE", "proxy is in use by accounts") +) + +type ProxyRepository interface { + Create(ctx context.Context, proxy *Proxy) error + GetByID(ctx context.Context, id int64) (*Proxy, error) + Update(ctx context.Context, proxy *Proxy) error + Delete(ctx context.Context, id int64) error + + List(ctx context.Context, params pagination.PaginationParams) ([]Proxy, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]Proxy, *pagination.PaginationResult, error) + ListWithFiltersAndAccountCount(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]ProxyWithAccountCount, *pagination.PaginationResult, error) + ListActive(ctx context.Context) ([]Proxy, error) + ListActiveWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) + + ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) + CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) + ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) +} + +// CreateProxyRequest 创建代理请求 +type CreateProxyRequest struct { + Name string `json:"name"` + Protocol string `json:"protocol"` + Host string `json:"host"` + Port int `json:"port"` + Username string `json:"username"` + Password string `json:"password"` +} + +// UpdateProxyRequest 更新代理请求 +type UpdateProxyRequest struct { + Name *string `json:"name"` + Protocol *string `json:"protocol"` + Host *string `json:"host"` + Port *int `json:"port"` + Username *string `json:"username"` + Password *string `json:"password"` + Status *string `json:"status"` +} + +// ProxyService 代理管理服务 +type ProxyService struct { + proxyRepo ProxyRepository +} + +// NewProxyService 创建代理服务实例 +func NewProxyService(proxyRepo ProxyRepository) *ProxyService { + return &ProxyService{ + proxyRepo: proxyRepo, + } +} + +// Create 创建代理 +func (s *ProxyService) Create(ctx context.Context, req CreateProxyRequest) (*Proxy, error) { + // 创建代理 + proxy := &Proxy{ + Name: req.Name, + Protocol: req.Protocol, + Host: req.Host, + Port: req.Port, + Username: req.Username, + Password: req.Password, + Status: StatusActive, + } + + if err := s.proxyRepo.Create(ctx, proxy); err != nil { + return nil, fmt.Errorf("create proxy: %w", err) + } + + return proxy, nil +} + +// GetByID 根据ID获取代理 +func (s *ProxyService) GetByID(ctx context.Context, id int64) (*Proxy, error) { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get proxy: %w", err) + } + return proxy, nil +} + +// List 获取代理列表 +func (s *ProxyService) List(ctx context.Context, params pagination.PaginationParams) ([]Proxy, *pagination.PaginationResult, error) { + proxies, pagination, err := s.proxyRepo.List(ctx, params) + if err != nil { + return nil, nil, fmt.Errorf("list proxies: %w", err) + } + return proxies, pagination, nil +} + +// ListActive 获取活跃代理列表 +func (s *ProxyService) ListActive(ctx context.Context) ([]Proxy, error) { + proxies, err := s.proxyRepo.ListActive(ctx) + if err != nil { + return nil, fmt.Errorf("list active proxies: %w", err) + } + return proxies, nil +} + +// Update 更新代理 +func (s *ProxyService) Update(ctx context.Context, id int64, req UpdateProxyRequest) (*Proxy, error) { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get proxy: %w", err) + } + + // 更新字段 + if req.Name != nil { + proxy.Name = *req.Name + } + + if req.Protocol != nil { + proxy.Protocol = *req.Protocol + } + + if req.Host != nil { + proxy.Host = *req.Host + } + + if req.Port != nil { + proxy.Port = *req.Port + } + + if req.Username != nil { + proxy.Username = *req.Username + } + + if req.Password != nil { + proxy.Password = *req.Password + } + + if req.Status != nil { + proxy.Status = *req.Status + } + + if err := s.proxyRepo.Update(ctx, proxy); err != nil { + return nil, fmt.Errorf("update proxy: %w", err) + } + + return proxy, nil +} + +// Delete 删除代理 +func (s *ProxyService) Delete(ctx context.Context, id int64) error { + // 检查代理是否存在 + _, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get proxy: %w", err) + } + + if err := s.proxyRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete proxy: %w", err) + } + + return nil +} + +// TestConnection 测试代理连接(需要实现具体测试逻辑) +func (s *ProxyService) TestConnection(ctx context.Context, id int64) error { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get proxy: %w", err) + } + + // TODO: 实现代理连接测试逻辑 + // 可以尝试通过代理发送测试请求 + _ = proxy + + return nil +} + +// GetURL 获取代理URL +func (s *ProxyService) GetURL(ctx context.Context, id int64) (string, error) { + proxy, err := s.proxyRepo.GetByID(ctx, id) + if err != nil { + return "", fmt.Errorf("get proxy: %w", err) + } + + return proxy.URL(), nil +} diff --git a/backend/internal/service/quota_fetcher.go b/backend/internal/service/quota_fetcher.go new file mode 100644 index 00000000..40d8572c --- /dev/null +++ b/backend/internal/service/quota_fetcher.go @@ -0,0 +1,19 @@ +package service + +import ( + "context" +) + +// QuotaFetcher 额度获取接口,各平台实现此接口 +type QuotaFetcher interface { + // CanFetch 检查是否可以获取此账户的额度 + CanFetch(account *Account) bool + // FetchQuota 获取账户额度信息 + FetchQuota(ctx context.Context, account *Account, proxyURL string) (*QuotaResult, error) +} + +// QuotaResult 额度获取结果 +type QuotaResult struct { + UsageInfo *UsageInfo // 转换后的使用信息 + Raw map[string]any // 原始响应,可存入 account.Extra +} diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go new file mode 100644 index 00000000..ca479486 --- /dev/null +++ b/backend/internal/service/ratelimit_service.go @@ -0,0 +1,725 @@ +package service + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// RateLimitService 处理限流和过载状态管理 +type RateLimitService struct { + accountRepo AccountRepository + usageRepo UsageLogRepository + cfg *config.Config + geminiQuotaService *GeminiQuotaService + tempUnschedCache TempUnschedCache + timeoutCounterCache TimeoutCounterCache + settingService *SettingService + tokenCacheInvalidator TokenCacheInvalidator + usageCacheMu sync.RWMutex + usageCache map[int64]*geminiUsageCacheEntry +} + +type geminiUsageCacheEntry struct { + windowStart time.Time + cachedAt time.Time + totals GeminiUsageTotals +} + +const geminiPrecheckCacheTTL = time.Minute + +// NewRateLimitService 创建RateLimitService实例 +func NewRateLimitService(accountRepo AccountRepository, usageRepo UsageLogRepository, cfg *config.Config, geminiQuotaService *GeminiQuotaService, tempUnschedCache TempUnschedCache) *RateLimitService { + return &RateLimitService{ + accountRepo: accountRepo, + usageRepo: usageRepo, + cfg: cfg, + geminiQuotaService: geminiQuotaService, + tempUnschedCache: tempUnschedCache, + usageCache: make(map[int64]*geminiUsageCacheEntry), + } +} + +// SetTimeoutCounterCache 设置超时计数器缓存(可选依赖) +func (s *RateLimitService) SetTimeoutCounterCache(cache TimeoutCounterCache) { + s.timeoutCounterCache = cache +} + +// SetSettingService 设置系统设置服务(可选依赖) +func (s *RateLimitService) SetSettingService(settingService *SettingService) { + s.settingService = settingService +} + +// SetTokenCacheInvalidator 设置 token 缓存清理器(可选依赖) +func (s *RateLimitService) SetTokenCacheInvalidator(invalidator TokenCacheInvalidator) { + s.tokenCacheInvalidator = invalidator +} + +// HandleUpstreamError 处理上游错误响应,标记账号状态 +// 返回是否应该停止该账号的调度 +func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, responseBody []byte) (shouldDisable bool) { + // apikey 类型账号:检查自定义错误码配置 + // 如果启用且错误码不在列表中,则不处理(不停止调度、不标记限流/过载) + customErrorCodesEnabled := account.IsCustomErrorCodesEnabled() + if !account.ShouldHandleErrorCode(statusCode) { + slog.Info("account_error_code_skipped", "account_id", account.ID, "status_code", statusCode) + return false + } + + tempMatched := false + if statusCode != 401 { + tempMatched = s.tryTempUnschedulable(ctx, account, statusCode, responseBody) + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(responseBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + if upstreamMsg != "" { + upstreamMsg = truncateForLog([]byte(upstreamMsg), 512) + } + + switch statusCode { + case 401: + if account.Type == AccountTypeOAuth && + (account.Platform == PlatformAntigravity || account.Platform == PlatformGemini) { + if s.tokenCacheInvalidator != nil { + if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil { + slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err) + } + } + } + msg := "Authentication failed (401): invalid or expired credentials" + if upstreamMsg != "" { + msg = "Authentication failed (401): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) + shouldDisable = true + case 402: + // 支付要求:余额不足或计费问题,停止调度 + msg := "Payment required (402): insufficient balance or billing issue" + if upstreamMsg != "" { + msg = "Payment required (402): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) + shouldDisable = true + case 403: + // 禁止访问:停止调度,记录错误 + msg := "Access forbidden (403): account may be suspended or lack permissions" + if upstreamMsg != "" { + msg = "Access forbidden (403): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) + shouldDisable = true + case 429: + s.handle429(ctx, account, headers) + shouldDisable = false + case 529: + s.handle529(ctx, account) + shouldDisable = false + default: + // 自定义错误码启用时:在列表中的错误码都应该停止调度 + if customErrorCodesEnabled { + msg := "Custom error code triggered" + if upstreamMsg != "" { + msg = upstreamMsg + } + s.handleCustomErrorCode(ctx, account, statusCode, msg) + shouldDisable = true + } else if statusCode >= 500 { + // 未启用自定义错误码时:仅记录5xx错误 + slog.Warn("account_upstream_error", "account_id", account.ID, "status_code", statusCode) + shouldDisable = false + } + } + + if tempMatched { + return true + } + return shouldDisable +} + +// PreCheckUsage proactively checks local quota before dispatching a request. +// Returns false when the account should be skipped. +func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, requestedModel string) (bool, error) { + if account == nil || account.Platform != PlatformGemini { + return true, nil + } + if s.usageRepo == nil || s.geminiQuotaService == nil { + return true, nil + } + + quota, ok := s.geminiQuotaService.QuotaForAccount(ctx, account) + if !ok { + return true, nil + } + + now := time.Now() + modelClass := geminiModelClassFromName(requestedModel) + + // 1) Daily quota precheck (RPD; resets at PST midnight) + { + var limit int64 + if quota.SharedRPD > 0 { + limit = quota.SharedRPD + } else { + switch modelClass { + case geminiModelFlash: + limit = quota.FlashRPD + default: + limit = quota.ProRPD + } + } + + if limit > 0 { + start := geminiDailyWindowStart(now) + totals, ok := s.getGeminiUsageTotals(account.ID, start, now) + if !ok { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + if err != nil { + return true, err + } + totals = geminiAggregateUsage(stats) + s.setGeminiUsageTotals(account.ID, start, now, totals) + } + + var used int64 + if quota.SharedRPD > 0 { + used = totals.ProRequests + totals.FlashRequests + } else { + switch modelClass { + case geminiModelFlash: + used = totals.FlashRequests + default: + used = totals.ProRequests + } + } + + if used >= limit { + resetAt := geminiDailyResetTime(now) + // NOTE: + // - This is a local precheck to reduce upstream 429s. + // - Do NOT mark the account as rate-limited here; rate_limit_reset_at should reflect real upstream 429s. + slog.Info("gemini_precheck_daily_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt) + return false, nil + } + } + } + + // 2) Minute quota precheck (RPM; fixed window current minute) + { + var limit int64 + if quota.SharedRPM > 0 { + limit = quota.SharedRPM + } else { + switch modelClass { + case geminiModelFlash: + limit = quota.FlashRPM + default: + limit = quota.ProRPM + } + } + + if limit > 0 { + start := now.Truncate(time.Minute) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) + if err != nil { + return true, err + } + totals := geminiAggregateUsage(stats) + + var used int64 + if quota.SharedRPM > 0 { + used = totals.ProRequests + totals.FlashRequests + } else { + switch modelClass { + case geminiModelFlash: + used = totals.FlashRequests + default: + used = totals.ProRequests + } + } + + if used >= limit { + resetAt := start.Add(time.Minute) + // Do not persist "rate limited" status from local precheck. See note above. + slog.Info("gemini_precheck_minute_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt) + return false, nil + } + } + } + + return true, nil +} + +func (s *RateLimitService) getGeminiUsageTotals(accountID int64, windowStart, now time.Time) (GeminiUsageTotals, bool) { + s.usageCacheMu.RLock() + defer s.usageCacheMu.RUnlock() + + if s.usageCache == nil { + return GeminiUsageTotals{}, false + } + + entry, ok := s.usageCache[accountID] + if !ok || entry == nil { + return GeminiUsageTotals{}, false + } + if !entry.windowStart.Equal(windowStart) { + return GeminiUsageTotals{}, false + } + if now.Sub(entry.cachedAt) >= geminiPrecheckCacheTTL { + return GeminiUsageTotals{}, false + } + return entry.totals, true +} + +func (s *RateLimitService) setGeminiUsageTotals(accountID int64, windowStart, now time.Time, totals GeminiUsageTotals) { + s.usageCacheMu.Lock() + defer s.usageCacheMu.Unlock() + if s.usageCache == nil { + s.usageCache = make(map[int64]*geminiUsageCacheEntry) + } + s.usageCache[accountID] = &geminiUsageCacheEntry{ + windowStart: windowStart, + cachedAt: now, + totals: totals, + } +} + +// GeminiCooldown returns the fallback cooldown duration for Gemini 429s based on tier. +func (s *RateLimitService) GeminiCooldown(ctx context.Context, account *Account) time.Duration { + if account == nil { + return 5 * time.Minute + } + if s.geminiQuotaService == nil { + return 5 * time.Minute + } + return s.geminiQuotaService.CooldownForAccount(ctx, account) +} + +// handleAuthError 处理认证类错误(401/403),停止账号调度 +func (s *RateLimitService) handleAuthError(ctx context.Context, account *Account, errorMsg string) { + if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil { + slog.Warn("account_set_error_failed", "account_id", account.ID, "error", err) + return + } + slog.Warn("account_disabled_auth_error", "account_id", account.ID, "error", errorMsg) +} + +// handleCustomErrorCode 处理自定义错误码,停止账号调度 +func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *Account, statusCode int, errorMsg string) { + msg := "Custom error code " + strconv.Itoa(statusCode) + ": " + errorMsg + if err := s.accountRepo.SetError(ctx, account.ID, msg); err != nil { + slog.Warn("account_set_error_failed", "account_id", account.ID, "status_code", statusCode, "error", err) + return + } + slog.Warn("account_disabled_custom_error", "account_id", account.ID, "status_code", statusCode, "error", errorMsg) +} + +// handle429 处理429限流错误 +// 解析响应头获取重置时间,标记账号为限流状态 +func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header) { + // 解析重置时间戳 + resetTimestamp := headers.Get("anthropic-ratelimit-unified-reset") + if resetTimestamp == "" { + // 没有重置时间,使用默认5分钟 + resetAt := time.Now().Add(5 * time.Minute) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + } + return + } + + // 解析Unix时间戳 + ts, err := strconv.ParseInt(resetTimestamp, 10, 64) + if err != nil { + slog.Warn("rate_limit_reset_parse_failed", "reset_timestamp", resetTimestamp, "error", err) + resetAt := time.Now().Add(5 * time.Minute) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + } + return + } + + resetAt := time.Unix(ts, 0) + + // 标记限流状态 + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) + return + } + + // 根据重置时间反推5h窗口 + windowEnd := resetAt + windowStart := resetAt.Add(-5 * time.Hour) + if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, &windowStart, &windowEnd, "rejected"); err != nil { + slog.Warn("rate_limit_update_session_window_failed", "account_id", account.ID, "error", err) + } + + slog.Info("account_rate_limited", "account_id", account.ID, "reset_at", resetAt) +} + +// handle529 处理529过载错误 +// 根据配置设置过载冷却时间 +func (s *RateLimitService) handle529(ctx context.Context, account *Account) { + cooldownMinutes := s.cfg.RateLimit.OverloadCooldownMinutes + if cooldownMinutes <= 0 { + cooldownMinutes = 10 // 默认10分钟 + } + + until := time.Now().Add(time.Duration(cooldownMinutes) * time.Minute) + if err := s.accountRepo.SetOverloaded(ctx, account.ID, until); err != nil { + slog.Warn("overload_set_failed", "account_id", account.ID, "error", err) + return + } + + slog.Info("account_overloaded", "account_id", account.ID, "until", until) +} + +// UpdateSessionWindow 从成功响应更新5h窗口状态 +func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Account, headers http.Header) { + status := headers.Get("anthropic-ratelimit-unified-5h-status") + if status == "" { + return + } + + // 检查是否需要初始化时间窗口 + // 对于 Setup Token 账号,首次成功请求时需要预测时间窗口 + var windowStart, windowEnd *time.Time + needInitWindow := account.SessionWindowEnd == nil || time.Now().After(*account.SessionWindowEnd) + + if needInitWindow && (status == "allowed" || status == "allowed_warning") { + // 预测时间窗口:从当前时间的整点开始,+5小时为结束 + // 例如:现在是 14:30,窗口为 14:00 ~ 19:00 + now := time.Now() + start := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location()) + end := start.Add(5 * time.Hour) + windowStart = &start + windowEnd = &end + slog.Info("account_session_window_initialized", "account_id", account.ID, "window_start", start, "window_end", end, "status", status) + } + + if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, windowStart, windowEnd, status); err != nil { + slog.Warn("session_window_update_failed", "account_id", account.ID, "error", err) + } + + // 如果状态为allowed且之前有限流,说明窗口已重置,清除限流状态 + if status == "allowed" && account.IsRateLimited() { + if err := s.ClearRateLimit(ctx, account.ID); err != nil { + slog.Warn("rate_limit_clear_failed", "account_id", account.ID, "error", err) + } + } +} + +// ClearRateLimit 清除账号的限流状态 +func (s *RateLimitService) ClearRateLimit(ctx context.Context, accountID int64) error { + if err := s.accountRepo.ClearRateLimit(ctx, accountID); err != nil { + return err + } + return s.accountRepo.ClearAntigravityQuotaScopes(ctx, accountID) +} + +func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID int64) error { + if err := s.accountRepo.ClearTempUnschedulable(ctx, accountID); err != nil { + return err + } + if s.tempUnschedCache != nil { + if err := s.tempUnschedCache.DeleteTempUnsched(ctx, accountID); err != nil { + slog.Warn("temp_unsched_cache_delete_failed", "account_id", accountID, "error", err) + } + } + return nil +} + +func (s *RateLimitService) GetTempUnschedStatus(ctx context.Context, accountID int64) (*TempUnschedState, error) { + now := time.Now().Unix() + if s.tempUnschedCache != nil { + state, err := s.tempUnschedCache.GetTempUnsched(ctx, accountID) + if err != nil { + return nil, err + } + if state != nil && state.UntilUnix > now { + return state, nil + } + } + + account, err := s.accountRepo.GetByID(ctx, accountID) + if err != nil { + return nil, err + } + if account.TempUnschedulableUntil == nil { + return nil, nil + } + if account.TempUnschedulableUntil.Unix() <= now { + return nil, nil + } + + state := &TempUnschedState{ + UntilUnix: account.TempUnschedulableUntil.Unix(), + } + + if account.TempUnschedulableReason != "" { + var parsed TempUnschedState + if err := json.Unmarshal([]byte(account.TempUnschedulableReason), &parsed); err == nil { + if parsed.UntilUnix == 0 { + parsed.UntilUnix = state.UntilUnix + } + state = &parsed + } else { + state.ErrorMessage = account.TempUnschedulableReason + } + } + + if s.tempUnschedCache != nil { + if err := s.tempUnschedCache.SetTempUnsched(ctx, accountID, state); err != nil { + slog.Warn("temp_unsched_cache_set_failed", "account_id", accountID, "error", err) + } + } + + return state, nil +} + +func (s *RateLimitService) HandleTempUnschedulable(ctx context.Context, account *Account, statusCode int, responseBody []byte) bool { + if account == nil { + return false + } + if !account.ShouldHandleErrorCode(statusCode) { + return false + } + return s.tryTempUnschedulable(ctx, account, statusCode, responseBody) +} + +const tempUnschedBodyMaxBytes = 64 << 10 +const tempUnschedMessageMaxBytes = 2048 + +func (s *RateLimitService) tryTempUnschedulable(ctx context.Context, account *Account, statusCode int, responseBody []byte) bool { + if account == nil { + return false + } + if !account.IsTempUnschedulableEnabled() { + return false + } + rules := account.GetTempUnschedulableRules() + if len(rules) == 0 { + return false + } + if statusCode <= 0 || len(responseBody) == 0 { + return false + } + + body := responseBody + if len(body) > tempUnschedBodyMaxBytes { + body = body[:tempUnschedBodyMaxBytes] + } + bodyLower := strings.ToLower(string(body)) + + for idx, rule := range rules { + if rule.ErrorCode != statusCode || len(rule.Keywords) == 0 { + continue + } + matchedKeyword := matchTempUnschedKeyword(bodyLower, rule.Keywords) + if matchedKeyword == "" { + continue + } + + if s.triggerTempUnschedulable(ctx, account, rule, idx, statusCode, matchedKeyword, responseBody) { + return true + } + } + + return false +} + +func matchTempUnschedKeyword(bodyLower string, keywords []string) string { + if bodyLower == "" { + return "" + } + for _, keyword := range keywords { + k := strings.TrimSpace(keyword) + if k == "" { + continue + } + if strings.Contains(bodyLower, strings.ToLower(k)) { + return k + } + } + return "" +} + +func (s *RateLimitService) triggerTempUnschedulable(ctx context.Context, account *Account, rule TempUnschedulableRule, ruleIndex int, statusCode int, matchedKeyword string, responseBody []byte) bool { + if account == nil { + return false + } + if rule.DurationMinutes <= 0 { + return false + } + + now := time.Now() + until := now.Add(time.Duration(rule.DurationMinutes) * time.Minute) + + state := &TempUnschedState{ + UntilUnix: until.Unix(), + TriggeredAtUnix: now.Unix(), + StatusCode: statusCode, + MatchedKeyword: matchedKeyword, + RuleIndex: ruleIndex, + ErrorMessage: truncateTempUnschedMessage(responseBody, tempUnschedMessageMaxBytes), + } + + reason := "" + if raw, err := json.Marshal(state); err == nil { + reason = string(raw) + } + if reason == "" { + reason = strings.TrimSpace(state.ErrorMessage) + } + + if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil { + slog.Warn("temp_unsched_set_failed", "account_id", account.ID, "error", err) + return false + } + + if s.tempUnschedCache != nil { + if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil { + slog.Warn("temp_unsched_cache_set_failed", "account_id", account.ID, "error", err) + } + } + + slog.Info("account_temp_unschedulable", "account_id", account.ID, "until", until, "rule_index", ruleIndex, "status_code", statusCode) + return true +} + +func truncateTempUnschedMessage(body []byte, maxBytes int) string { + if maxBytes <= 0 || len(body) == 0 { + return "" + } + if len(body) > maxBytes { + body = body[:maxBytes] + } + return strings.TrimSpace(string(body)) +} + +// HandleStreamTimeout 处理流数据超时 +// 根据系统设置决定是否标记账户为临时不可调度或错误状态 +// 返回是否应该停止该账号的调度 +func (s *RateLimitService) HandleStreamTimeout(ctx context.Context, account *Account, model string) bool { + if account == nil { + return false + } + + // 获取系统设置 + if s.settingService == nil { + slog.Warn("stream_timeout_setting_service_missing", "account_id", account.ID) + return false + } + + settings, err := s.settingService.GetStreamTimeoutSettings(ctx) + if err != nil { + slog.Warn("stream_timeout_get_settings_failed", "account_id", account.ID, "error", err) + return false + } + + if !settings.Enabled { + return false + } + + if settings.Action == StreamTimeoutActionNone { + return false + } + + // 增加超时计数 + var count int64 = 1 + if s.timeoutCounterCache != nil { + count, err = s.timeoutCounterCache.IncrementTimeoutCount(ctx, account.ID, settings.ThresholdWindowMinutes) + if err != nil { + slog.Warn("stream_timeout_increment_count_failed", "account_id", account.ID, "error", err) + // 继续处理,使用 count=1 + count = 1 + } + } + + slog.Info("stream_timeout_count", "account_id", account.ID, "count", count, "threshold", settings.ThresholdCount, "window_minutes", settings.ThresholdWindowMinutes, "model", model) + + // 检查是否达到阈值 + if count < int64(settings.ThresholdCount) { + return false + } + + // 达到阈值,执行相应操作 + switch settings.Action { + case StreamTimeoutActionTempUnsched: + return s.triggerStreamTimeoutTempUnsched(ctx, account, settings, model) + case StreamTimeoutActionError: + return s.triggerStreamTimeoutError(ctx, account, model) + default: + return false + } +} + +// triggerStreamTimeoutTempUnsched 触发流超时临时不可调度 +func (s *RateLimitService) triggerStreamTimeoutTempUnsched(ctx context.Context, account *Account, settings *StreamTimeoutSettings, model string) bool { + now := time.Now() + until := now.Add(time.Duration(settings.TempUnschedMinutes) * time.Minute) + + state := &TempUnschedState{ + UntilUnix: until.Unix(), + TriggeredAtUnix: now.Unix(), + StatusCode: 0, // 超时没有状态码 + MatchedKeyword: "stream_timeout", + RuleIndex: -1, // 表示系统级规则 + ErrorMessage: "Stream data interval timeout for model: " + model, + } + + reason := "" + if raw, err := json.Marshal(state); err == nil { + reason = string(raw) + } + if reason == "" { + reason = state.ErrorMessage + } + + if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil { + slog.Warn("stream_timeout_set_temp_unsched_failed", "account_id", account.ID, "error", err) + return false + } + + if s.tempUnschedCache != nil { + if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil { + slog.Warn("stream_timeout_set_temp_unsched_cache_failed", "account_id", account.ID, "error", err) + } + } + + // 重置超时计数 + if s.timeoutCounterCache != nil { + if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil { + slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err) + } + } + + slog.Info("stream_timeout_temp_unschedulable", "account_id", account.ID, "until", until, "model", model) + return true +} + +// triggerStreamTimeoutError 触发流超时错误状态 +func (s *RateLimitService) triggerStreamTimeoutError(ctx context.Context, account *Account, model string) bool { + errorMsg := "Stream data interval timeout (repeated failures) for model: " + model + + if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil { + slog.Warn("stream_timeout_set_error_failed", "account_id", account.ID, "error", err) + return false + } + + // 重置超时计数 + if s.timeoutCounterCache != nil { + if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil { + slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err) + } + } + + slog.Warn("stream_timeout_account_error", "account_id", account.ID, "model", model) + return true +} diff --git a/backend/internal/service/ratelimit_service_401_test.go b/backend/internal/service/ratelimit_service_401_test.go new file mode 100644 index 00000000..36357a4b --- /dev/null +++ b/backend/internal/service/ratelimit_service_401_test.go @@ -0,0 +1,121 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "net/http" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type rateLimitAccountRepoStub struct { + mockAccountRepoForGemini + setErrorCalls int + tempCalls int + lastErrorMsg string +} + +func (r *rateLimitAccountRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error { + r.setErrorCalls++ + r.lastErrorMsg = errorMsg + return nil +} + +func (r *rateLimitAccountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + r.tempCalls++ + return nil +} + +type tokenCacheInvalidatorRecorder struct { + accounts []*Account + err error +} + +func (r *tokenCacheInvalidatorRecorder) InvalidateToken(ctx context.Context, account *Account) error { + r.accounts = append(r.accounts, account) + return r.err +} + +func TestRateLimitService_HandleUpstreamError_OAuth401MarksError(t *testing.T) { + tests := []struct { + name string + platform string + }{ + {name: "gemini", platform: PlatformGemini}, + {name: "antigravity", platform: PlatformAntigravity}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 100, + Platform: tt.platform, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": 401, + "keywords": []any{"unauthorized"}, + "duration_minutes": 30, + "description": "custom rule", + }, + }, + }, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Equal(t, 0, repo.tempCalls) + require.Contains(t, repo.lastErrorMsg, "Authentication failed (401)") + require.Len(t, invalidator.accounts, 1) + }) + } +} + +func TestRateLimitService_HandleUpstreamError_OAuth401InvalidatorError(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{err: errors.New("boom")} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 101, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Len(t, invalidator.accounts, 1) +} + +func TestRateLimitService_HandleUpstreamError_NonOAuth401(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 102, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Empty(t, invalidator.accounts) +} diff --git a/backend/internal/service/redeem_code.go b/backend/internal/service/redeem_code.go new file mode 100644 index 00000000..a66b53ba --- /dev/null +++ b/backend/internal/service/redeem_code.go @@ -0,0 +1,41 @@ +package service + +import ( + "crypto/rand" + "encoding/hex" + "time" +) + +type RedeemCode struct { + ID int64 + Code string + Type string + Value float64 + Status string + UsedBy *int64 + UsedAt *time.Time + Notes string + CreatedAt time.Time + + GroupID *int64 + ValidityDays int + + User *User + Group *Group +} + +func (r *RedeemCode) IsUsed() bool { + return r.Status == StatusUsed +} + +func (r *RedeemCode) CanUse() bool { + return r.Status == StatusUnused +} + +func GenerateRedeemCode() (string, error) { + b := make([]byte, 16) + if _, err := rand.Read(b); err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} diff --git a/backend/internal/service/redeem_service.go b/backend/internal/service/redeem_service.go new file mode 100644 index 00000000..ff52dc47 --- /dev/null +++ b/backend/internal/service/redeem_service.go @@ -0,0 +1,438 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrRedeemCodeNotFound = infraerrors.NotFound("REDEEM_CODE_NOT_FOUND", "redeem code not found") + ErrRedeemCodeUsed = infraerrors.Conflict("REDEEM_CODE_USED", "redeem code already used") + ErrInsufficientBalance = infraerrors.BadRequest("INSUFFICIENT_BALANCE", "insufficient balance") + ErrRedeemRateLimited = infraerrors.TooManyRequests("REDEEM_RATE_LIMITED", "too many failed attempts, please try again later") + ErrRedeemCodeLocked = infraerrors.Conflict("REDEEM_CODE_LOCKED", "redeem code is being processed, please try again") +) + +const ( + redeemMaxErrorsPerHour = 20 + redeemRateLimitDuration = time.Hour + redeemLockDuration = 10 * time.Second // 锁超时时间,防止死锁 +) + +// RedeemCache defines cache operations for redeem service +type RedeemCache interface { + GetRedeemAttemptCount(ctx context.Context, userID int64) (int, error) + IncrementRedeemAttemptCount(ctx context.Context, userID int64) error + + AcquireRedeemLock(ctx context.Context, code string, ttl time.Duration) (bool, error) + ReleaseRedeemLock(ctx context.Context, code string) error +} + +type RedeemCodeRepository interface { + Create(ctx context.Context, code *RedeemCode) error + CreateBatch(ctx context.Context, codes []RedeemCode) error + GetByID(ctx context.Context, id int64) (*RedeemCode, error) + GetByCode(ctx context.Context, code string) (*RedeemCode, error) + Update(ctx context.Context, code *RedeemCode) error + Delete(ctx context.Context, id int64) error + Use(ctx context.Context, id, userID int64) error + + List(ctx context.Context, params pagination.PaginationParams) ([]RedeemCode, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]RedeemCode, *pagination.PaginationResult, error) + ListByUser(ctx context.Context, userID int64, limit int) ([]RedeemCode, error) +} + +// GenerateCodesRequest 生成兑换码请求 +type GenerateCodesRequest struct { + Count int `json:"count"` + Value float64 `json:"value"` + Type string `json:"type"` +} + +// RedeemCodeResponse 兑换码响应 +type RedeemCodeResponse struct { + Code string `json:"code"` + Value float64 `json:"value"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` +} + +// RedeemService 兑换码服务 +type RedeemService struct { + redeemRepo RedeemCodeRepository + userRepo UserRepository + subscriptionService *SubscriptionService + cache RedeemCache + billingCacheService *BillingCacheService + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewRedeemService 创建兑换码服务实例 +func NewRedeemService( + redeemRepo RedeemCodeRepository, + userRepo UserRepository, + subscriptionService *SubscriptionService, + cache RedeemCache, + billingCacheService *BillingCacheService, + entClient *dbent.Client, + authCacheInvalidator APIKeyAuthCacheInvalidator, +) *RedeemService { + return &RedeemService{ + redeemRepo: redeemRepo, + userRepo: userRepo, + subscriptionService: subscriptionService, + cache: cache, + billingCacheService: billingCacheService, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, + } +} + +// GenerateRandomCode 生成随机兑换码 +func (s *RedeemService) GenerateRandomCode() (string, error) { + // 生成16字节随机数据 + bytes := make([]byte, 16) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + + // 转换为十六进制字符串 + code := hex.EncodeToString(bytes) + + // 格式化为 XXXX-XXXX-XXXX-XXXX 格式 + parts := []string{ + strings.ToUpper(code[0:8]), + strings.ToUpper(code[8:16]), + strings.ToUpper(code[16:24]), + strings.ToUpper(code[24:32]), + } + + return strings.Join(parts, "-"), nil +} + +// GenerateCodes 批量生成兑换码 +func (s *RedeemService) GenerateCodes(ctx context.Context, req GenerateCodesRequest) ([]RedeemCode, error) { + if req.Count <= 0 { + return nil, errors.New("count must be greater than 0") + } + + if req.Value <= 0 { + return nil, errors.New("value must be greater than 0") + } + + if req.Count > 1000 { + return nil, errors.New("cannot generate more than 1000 codes at once") + } + + codeType := req.Type + if codeType == "" { + codeType = RedeemTypeBalance + } + + codes := make([]RedeemCode, 0, req.Count) + for i := 0; i < req.Count; i++ { + code, err := s.GenerateRandomCode() + if err != nil { + return nil, fmt.Errorf("generate code: %w", err) + } + + codes = append(codes, RedeemCode{ + Code: code, + Type: codeType, + Value: req.Value, + Status: StatusUnused, + }) + } + + // 批量插入 + if err := s.redeemRepo.CreateBatch(ctx, codes); err != nil { + return nil, fmt.Errorf("create batch codes: %w", err) + } + + return codes, nil +} + +// checkRedeemRateLimit 检查用户兑换错误次数是否超限 +func (s *RedeemService) checkRedeemRateLimit(ctx context.Context, userID int64) error { + if s.cache == nil { + return nil + } + + count, err := s.cache.GetRedeemAttemptCount(ctx, userID) + if err != nil { + // Redis 出错时不阻止用户操作 + return nil + } + + if count >= redeemMaxErrorsPerHour { + return ErrRedeemRateLimited + } + + return nil +} + +// incrementRedeemErrorCount 增加用户兑换错误计数 +func (s *RedeemService) incrementRedeemErrorCount(ctx context.Context, userID int64) { + if s.cache == nil { + return + } + + _ = s.cache.IncrementRedeemAttemptCount(ctx, userID) +} + +// acquireRedeemLock 尝试获取兑换码的分布式锁 +// 返回 true 表示获取成功,false 表示锁已被占用 +func (s *RedeemService) acquireRedeemLock(ctx context.Context, code string) bool { + if s.cache == nil { + return true // 无 Redis 时降级为不加锁 + } + + ok, err := s.cache.AcquireRedeemLock(ctx, code, redeemLockDuration) + if err != nil { + // Redis 出错时不阻止操作,依赖数据库层面的状态检查 + return true + } + return ok +} + +// releaseRedeemLock 释放兑换码的分布式锁 +func (s *RedeemService) releaseRedeemLock(ctx context.Context, code string) { + if s.cache == nil { + return + } + + _ = s.cache.ReleaseRedeemLock(ctx, code) +} + +// Redeem 使用兑换码 +func (s *RedeemService) Redeem(ctx context.Context, userID int64, code string) (*RedeemCode, error) { + // 检查限流 + if err := s.checkRedeemRateLimit(ctx, userID); err != nil { + return nil, err + } + + // 获取分布式锁,防止同一兑换码并发使用 + if !s.acquireRedeemLock(ctx, code) { + return nil, ErrRedeemCodeLocked + } + defer s.releaseRedeemLock(ctx, code) + + // 查找兑换码 + redeemCode, err := s.redeemRepo.GetByCode(ctx, code) + if err != nil { + if errors.Is(err, ErrRedeemCodeNotFound) { + s.incrementRedeemErrorCount(ctx, userID) + return nil, ErrRedeemCodeNotFound + } + return nil, fmt.Errorf("get redeem code: %w", err) + } + + // 检查兑换码状态 + if !redeemCode.CanUse() { + s.incrementRedeemErrorCount(ctx, userID) + return nil, ErrRedeemCodeUsed + } + + // 验证兑换码类型的前置条件 + if redeemCode.Type == RedeemTypeSubscription && redeemCode.GroupID == nil { + return nil, infraerrors.BadRequest("REDEEM_CODE_INVALID", "invalid subscription redeem code: missing group_id") + } + + // 获取用户信息 + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + _ = user // 使用变量避免未使用错误 + + // 使用数据库事务保证兑换码标记与权益发放的原子性 + tx, err := s.entClient.Tx(ctx) + if err != nil { + return nil, fmt.Errorf("begin transaction: %w", err) + } + defer func() { _ = tx.Rollback() }() + + // 将事务放入 context,使 repository 方法能够使用同一事务 + txCtx := dbent.NewTxContext(ctx, tx) + + // 【关键】先标记兑换码为已使用,确保并发安全 + // 利用数据库乐观锁(WHERE status = 'unused')保证原子性 + if err := s.redeemRepo.Use(txCtx, redeemCode.ID, userID); err != nil { + if errors.Is(err, ErrRedeemCodeNotFound) || errors.Is(err, ErrRedeemCodeUsed) { + return nil, ErrRedeemCodeUsed + } + return nil, fmt.Errorf("mark code as used: %w", err) + } + + // 执行兑换逻辑(兑换码已被锁定,此时可安全操作) + switch redeemCode.Type { + case RedeemTypeBalance: + // 增加用户余额 + if err := s.userRepo.UpdateBalance(txCtx, userID, redeemCode.Value); err != nil { + return nil, fmt.Errorf("update user balance: %w", err) + } + + case RedeemTypeConcurrency: + // 增加用户并发数 + if err := s.userRepo.UpdateConcurrency(txCtx, userID, int(redeemCode.Value)); err != nil { + return nil, fmt.Errorf("update user concurrency: %w", err) + } + + case RedeemTypeSubscription: + validityDays := redeemCode.ValidityDays + if validityDays <= 0 { + validityDays = 30 + } + _, _, err := s.subscriptionService.AssignOrExtendSubscription(txCtx, &AssignSubscriptionInput{ + UserID: userID, + GroupID: *redeemCode.GroupID, + ValidityDays: validityDays, + AssignedBy: 0, // 系统分配 + Notes: fmt.Sprintf("通过兑换码 %s 兑换", redeemCode.Code), + }) + if err != nil { + return nil, fmt.Errorf("assign or extend subscription: %w", err) + } + + default: + return nil, fmt.Errorf("unsupported redeem type: %s", redeemCode.Type) + } + + // 提交事务 + if err := tx.Commit(); err != nil { + return nil, fmt.Errorf("commit transaction: %w", err) + } + + // 事务提交成功后失效缓存 + s.invalidateRedeemCaches(ctx, userID, redeemCode) + + // 重新获取更新后的兑换码 + redeemCode, err = s.redeemRepo.GetByID(ctx, redeemCode.ID) + if err != nil { + return nil, fmt.Errorf("get updated redeem code: %w", err) + } + + return redeemCode, nil +} + +// invalidateRedeemCaches 失效兑换相关的缓存 +func (s *RedeemService) invalidateRedeemCaches(ctx context.Context, userID int64, redeemCode *RedeemCode) { + switch redeemCode.Type { + case RedeemTypeBalance: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateUserBalance(cacheCtx, userID) + }() + case RedeemTypeConcurrency: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } + case RedeemTypeSubscription: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } + if redeemCode.GroupID != nil { + groupID := *redeemCode.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + } +} + +// GetByID 根据ID获取兑换码 +func (s *RedeemService) GetByID(ctx context.Context, id int64) (*RedeemCode, error) { + code, err := s.redeemRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get redeem code: %w", err) + } + return code, nil +} + +// GetByCode 根据Code获取兑换码 +func (s *RedeemService) GetByCode(ctx context.Context, code string) (*RedeemCode, error) { + redeemCode, err := s.redeemRepo.GetByCode(ctx, code) + if err != nil { + return nil, fmt.Errorf("get redeem code: %w", err) + } + return redeemCode, nil +} + +// List 获取兑换码列表(管理员功能) +func (s *RedeemService) List(ctx context.Context, params pagination.PaginationParams) ([]RedeemCode, *pagination.PaginationResult, error) { + codes, pagination, err := s.redeemRepo.List(ctx, params) + if err != nil { + return nil, nil, fmt.Errorf("list redeem codes: %w", err) + } + return codes, pagination, nil +} + +// Delete 删除兑换码(管理员功能) +func (s *RedeemService) Delete(ctx context.Context, id int64) error { + // 检查兑换码是否存在 + code, err := s.redeemRepo.GetByID(ctx, id) + if err != nil { + return fmt.Errorf("get redeem code: %w", err) + } + + // 不允许删除已使用的兑换码 + if code.IsUsed() { + return infraerrors.Conflict("REDEEM_CODE_DELETE_USED", "cannot delete used redeem code") + } + + if err := s.redeemRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete redeem code: %w", err) + } + + return nil +} + +// GetStats 获取兑换码统计信息 +func (s *RedeemService) GetStats(ctx context.Context) (map[string]any, error) { + // TODO: 实现统计逻辑 + // 统计未使用、已使用的兑换码数量 + // 统计总面值等 + + stats := map[string]any{ + "total_codes": 0, + "unused_codes": 0, + "used_codes": 0, + "total_value": 0.0, + } + + return stats, nil +} + +// GetUserHistory 获取用户的兑换历史 +func (s *RedeemService) GetUserHistory(ctx context.Context, userID int64, limit int) ([]RedeemCode, error) { + codes, err := s.redeemRepo.ListByUser(ctx, userID, limit) + if err != nil { + return nil, fmt.Errorf("get user redeem history: %w", err) + } + return codes, nil +} diff --git a/backend/internal/service/scheduler_cache.go b/backend/internal/service/scheduler_cache.go new file mode 100644 index 00000000..f36135e0 --- /dev/null +++ b/backend/internal/service/scheduler_cache.go @@ -0,0 +1,68 @@ +package service + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + SchedulerModeSingle = "single" + SchedulerModeMixed = "mixed" + SchedulerModeForced = "forced" +) + +type SchedulerBucket struct { + GroupID int64 + Platform string + Mode string +} + +func (b SchedulerBucket) String() string { + return fmt.Sprintf("%d:%s:%s", b.GroupID, b.Platform, b.Mode) +} + +func ParseSchedulerBucket(raw string) (SchedulerBucket, bool) { + parts := strings.Split(raw, ":") + if len(parts) != 3 { + return SchedulerBucket{}, false + } + groupID, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return SchedulerBucket{}, false + } + if parts[1] == "" || parts[2] == "" { + return SchedulerBucket{}, false + } + return SchedulerBucket{ + GroupID: groupID, + Platform: parts[1], + Mode: parts[2], + }, true +} + +// SchedulerCache 负责调度快照与账号快照的缓存读写。 +type SchedulerCache interface { + // GetSnapshot 读取快照并返回命中与否(ready + active + 数据完整)。 + GetSnapshot(ctx context.Context, bucket SchedulerBucket) ([]*Account, bool, error) + // SetSnapshot 写入快照并切换激活版本。 + SetSnapshot(ctx context.Context, bucket SchedulerBucket, accounts []Account) error + // GetAccount 获取单账号快照。 + GetAccount(ctx context.Context, accountID int64) (*Account, error) + // SetAccount 写入单账号快照(包含不可调度状态)。 + SetAccount(ctx context.Context, account *Account) error + // DeleteAccount 删除单账号快照。 + DeleteAccount(ctx context.Context, accountID int64) error + // UpdateLastUsed 批量更新账号的最后使用时间。 + UpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error + // TryLockBucket 尝试获取分桶重建锁。 + TryLockBucket(ctx context.Context, bucket SchedulerBucket, ttl time.Duration) (bool, error) + // ListBuckets 返回已注册的分桶集合。 + ListBuckets(ctx context.Context) ([]SchedulerBucket, error) + // GetOutboxWatermark 读取 outbox 水位。 + GetOutboxWatermark(ctx context.Context) (int64, error) + // SetOutboxWatermark 保存 outbox 水位。 + SetOutboxWatermark(ctx context.Context, id int64) error +} diff --git a/backend/internal/service/scheduler_events.go b/backend/internal/service/scheduler_events.go new file mode 100644 index 00000000..5a3e72ce --- /dev/null +++ b/backend/internal/service/scheduler_events.go @@ -0,0 +1,10 @@ +package service + +const ( + SchedulerOutboxEventAccountChanged = "account_changed" + SchedulerOutboxEventAccountGroupsChanged = "account_groups_changed" + SchedulerOutboxEventAccountBulkChanged = "account_bulk_changed" + SchedulerOutboxEventAccountLastUsed = "account_last_used" + SchedulerOutboxEventGroupChanged = "group_changed" + SchedulerOutboxEventFullRebuild = "full_rebuild" +) diff --git a/backend/internal/service/scheduler_outbox.go b/backend/internal/service/scheduler_outbox.go new file mode 100644 index 00000000..32bfcfaa --- /dev/null +++ b/backend/internal/service/scheduler_outbox.go @@ -0,0 +1,21 @@ +package service + +import ( + "context" + "time" +) + +type SchedulerOutboxEvent struct { + ID int64 + EventType string + AccountID *int64 + GroupID *int64 + Payload map[string]any + CreatedAt time.Time +} + +// SchedulerOutboxRepository 提供调度 outbox 的读取接口。 +type SchedulerOutboxRepository interface { + ListAfter(ctx context.Context, afterID int64, limit int) ([]SchedulerOutboxEvent, error) + MaxID(ctx context.Context) (int64, error) +} diff --git a/backend/internal/service/scheduler_snapshot_service.go b/backend/internal/service/scheduler_snapshot_service.go new file mode 100644 index 00000000..b3714ed1 --- /dev/null +++ b/backend/internal/service/scheduler_snapshot_service.go @@ -0,0 +1,786 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "log" + "strconv" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +var ( + ErrSchedulerCacheNotReady = errors.New("scheduler cache not ready") + ErrSchedulerFallbackLimited = errors.New("scheduler db fallback limited") +) + +const outboxEventTimeout = 2 * time.Minute + +type SchedulerSnapshotService struct { + cache SchedulerCache + outboxRepo SchedulerOutboxRepository + accountRepo AccountRepository + groupRepo GroupRepository + cfg *config.Config + stopCh chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + fallbackLimit *fallbackLimiter + lagMu sync.Mutex + lagFailures int +} + +func NewSchedulerSnapshotService( + cache SchedulerCache, + outboxRepo SchedulerOutboxRepository, + accountRepo AccountRepository, + groupRepo GroupRepository, + cfg *config.Config, +) *SchedulerSnapshotService { + maxQPS := 0 + if cfg != nil { + maxQPS = cfg.Gateway.Scheduling.DbFallbackMaxQPS + } + return &SchedulerSnapshotService{ + cache: cache, + outboxRepo: outboxRepo, + accountRepo: accountRepo, + groupRepo: groupRepo, + cfg: cfg, + stopCh: make(chan struct{}), + fallbackLimit: newFallbackLimiter(maxQPS), + } +} + +func (s *SchedulerSnapshotService) Start() { + if s == nil || s.cache == nil { + return + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runInitialRebuild() + }() + + interval := s.outboxPollInterval() + if s.outboxRepo != nil && interval > 0 { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runOutboxWorker(interval) + }() + } + + fullInterval := s.fullRebuildInterval() + if fullInterval > 0 { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runFullRebuildWorker(fullInterval) + }() + } +} + +func (s *SchedulerSnapshotService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + close(s.stopCh) + }) + s.wg.Wait() +} + +func (s *SchedulerSnapshotService) ListSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { + useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform + mode := s.resolveMode(platform, hasForcePlatform) + bucket := s.bucketFor(groupID, platform, mode) + + if s.cache != nil { + cached, hit, err := s.cache.GetSnapshot(ctx, bucket) + if err != nil { + log.Printf("[Scheduler] cache read failed: bucket=%s err=%v", bucket.String(), err) + } else if hit { + return derefAccounts(cached), useMixed, nil + } + } + + if err := s.guardFallback(ctx); err != nil { + return nil, useMixed, err + } + + fallbackCtx, cancel := s.withFallbackTimeout(ctx) + defer cancel() + + accounts, err := s.loadAccountsFromDB(fallbackCtx, bucket, useMixed) + if err != nil { + return nil, useMixed, err + } + + if s.cache != nil { + if err := s.cache.SetSnapshot(fallbackCtx, bucket, accounts); err != nil { + log.Printf("[Scheduler] cache write failed: bucket=%s err=%v", bucket.String(), err) + } + } + + return accounts, useMixed, nil +} + +func (s *SchedulerSnapshotService) GetAccount(ctx context.Context, accountID int64) (*Account, error) { + if accountID <= 0 { + return nil, nil + } + if s.cache != nil { + account, err := s.cache.GetAccount(ctx, accountID) + if err != nil { + log.Printf("[Scheduler] account cache read failed: id=%d err=%v", accountID, err) + } else if account != nil { + return account, nil + } + } + + if err := s.guardFallback(ctx); err != nil { + return nil, err + } + fallbackCtx, cancel := s.withFallbackTimeout(ctx) + defer cancel() + return s.accountRepo.GetByID(fallbackCtx, accountID) +} + +func (s *SchedulerSnapshotService) runInitialRebuild() { + if s.cache == nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + buckets, err := s.cache.ListBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] list buckets failed: %v", err) + } + if len(buckets) == 0 { + buckets, err = s.defaultBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] default buckets failed: %v", err) + return + } + } + if err := s.rebuildBuckets(ctx, buckets, "startup"); err != nil { + log.Printf("[Scheduler] rebuild startup failed: %v", err) + } +} + +func (s *SchedulerSnapshotService) runOutboxWorker(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + s.pollOutbox() + for { + select { + case <-ticker.C: + s.pollOutbox() + case <-s.stopCh: + return + } + } +} + +func (s *SchedulerSnapshotService) runFullRebuildWorker(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := s.triggerFullRebuild("interval"); err != nil { + log.Printf("[Scheduler] full rebuild failed: %v", err) + } + case <-s.stopCh: + return + } + } +} + +func (s *SchedulerSnapshotService) pollOutbox() { + if s.outboxRepo == nil || s.cache == nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + watermark, err := s.cache.GetOutboxWatermark(ctx) + if err != nil { + log.Printf("[Scheduler] outbox watermark read failed: %v", err) + return + } + + events, err := s.outboxRepo.ListAfter(ctx, watermark, 200) + if err != nil { + log.Printf("[Scheduler] outbox poll failed: %v", err) + return + } + if len(events) == 0 { + return + } + + watermarkForCheck := watermark + for _, event := range events { + eventCtx, cancel := context.WithTimeout(context.Background(), outboxEventTimeout) + err := s.handleOutboxEvent(eventCtx, event) + cancel() + if err != nil { + log.Printf("[Scheduler] outbox handle failed: id=%d type=%s err=%v", event.ID, event.EventType, err) + return + } + } + + lastID := events[len(events)-1].ID + if err := s.cache.SetOutboxWatermark(ctx, lastID); err != nil { + log.Printf("[Scheduler] outbox watermark write failed: %v", err) + } else { + watermarkForCheck = lastID + } + + s.checkOutboxLag(ctx, events[0], watermarkForCheck) +} + +func (s *SchedulerSnapshotService) handleOutboxEvent(ctx context.Context, event SchedulerOutboxEvent) error { + switch event.EventType { + case SchedulerOutboxEventAccountLastUsed: + return s.handleLastUsedEvent(ctx, event.Payload) + case SchedulerOutboxEventAccountBulkChanged: + return s.handleBulkAccountEvent(ctx, event.Payload) + case SchedulerOutboxEventAccountGroupsChanged: + return s.handleAccountEvent(ctx, event.AccountID, event.Payload) + case SchedulerOutboxEventAccountChanged: + return s.handleAccountEvent(ctx, event.AccountID, event.Payload) + case SchedulerOutboxEventGroupChanged: + return s.handleGroupEvent(ctx, event.GroupID) + case SchedulerOutboxEventFullRebuild: + return s.triggerFullRebuild("outbox") + default: + return nil + } +} + +func (s *SchedulerSnapshotService) handleLastUsedEvent(ctx context.Context, payload map[string]any) error { + if s.cache == nil || payload == nil { + return nil + } + raw, ok := payload["last_used"].(map[string]any) + if !ok || len(raw) == 0 { + return nil + } + updates := make(map[int64]time.Time, len(raw)) + for key, value := range raw { + id, err := strconv.ParseInt(key, 10, 64) + if err != nil || id <= 0 { + continue + } + sec, ok := toInt64(value) + if !ok || sec <= 0 { + continue + } + updates[id] = time.Unix(sec, 0) + } + if len(updates) == 0 { + return nil + } + return s.cache.UpdateLastUsed(ctx, updates) +} + +func (s *SchedulerSnapshotService) handleBulkAccountEvent(ctx context.Context, payload map[string]any) error { + if payload == nil { + return nil + } + ids := parseInt64Slice(payload["account_ids"]) + for _, id := range ids { + if err := s.handleAccountEvent(ctx, &id, payload); err != nil { + return err + } + } + return nil +} + +func (s *SchedulerSnapshotService) handleAccountEvent(ctx context.Context, accountID *int64, payload map[string]any) error { + if accountID == nil || *accountID <= 0 { + return nil + } + if s.accountRepo == nil { + return nil + } + + var groupIDs []int64 + if payload != nil { + groupIDs = parseInt64Slice(payload["group_ids"]) + } + + account, err := s.accountRepo.GetByID(ctx, *accountID) + if err != nil { + if errors.Is(err, ErrAccountNotFound) { + if s.cache != nil { + if err := s.cache.DeleteAccount(ctx, *accountID); err != nil { + return err + } + } + return s.rebuildByGroupIDs(ctx, groupIDs, "account_miss") + } + return err + } + if s.cache != nil { + if err := s.cache.SetAccount(ctx, account); err != nil { + return err + } + } + if len(groupIDs) == 0 { + groupIDs = account.GroupIDs + } + return s.rebuildByAccount(ctx, account, groupIDs, "account_change") +} + +func (s *SchedulerSnapshotService) handleGroupEvent(ctx context.Context, groupID *int64) error { + if groupID == nil || *groupID <= 0 { + return nil + } + groupIDs := []int64{*groupID} + return s.rebuildByGroupIDs(ctx, groupIDs, "group_change") +} + +func (s *SchedulerSnapshotService) rebuildByAccount(ctx context.Context, account *Account, groupIDs []int64, reason string) error { + if account == nil { + return nil + } + groupIDs = s.normalizeGroupIDs(groupIDs) + if len(groupIDs) == 0 { + return nil + } + + var firstErr error + if err := s.rebuildBucketsForPlatform(ctx, account.Platform, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + if account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() { + if err := s.rebuildBucketsForPlatform(ctx, PlatformAnthropic, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + if err := s.rebuildBucketsForPlatform(ctx, PlatformGemini, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildByGroupIDs(ctx context.Context, groupIDs []int64, reason string) error { + groupIDs = s.normalizeGroupIDs(groupIDs) + if len(groupIDs) == 0 { + return nil + } + platforms := []string{PlatformAnthropic, PlatformGemini, PlatformOpenAI, PlatformAntigravity} + var firstErr error + for _, platform := range platforms { + if err := s.rebuildBucketsForPlatform(ctx, platform, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBucketsForPlatform(ctx context.Context, platform string, groupIDs []int64, reason string) error { + if platform == "" { + return nil + } + var firstErr error + for _, gid := range groupIDs { + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeSingle}, reason); err != nil && firstErr == nil { + firstErr = err + } + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeForced}, reason); err != nil && firstErr == nil { + firstErr = err + } + if platform == PlatformAnthropic || platform == PlatformGemini { + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeMixed}, reason); err != nil && firstErr == nil { + firstErr = err + } + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBuckets(ctx context.Context, buckets []SchedulerBucket, reason string) error { + var firstErr error + for _, bucket := range buckets { + if err := s.rebuildBucket(ctx, bucket, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBucket(ctx context.Context, bucket SchedulerBucket, reason string) error { + if s.cache == nil { + return ErrSchedulerCacheNotReady + } + ok, err := s.cache.TryLockBucket(ctx, bucket, 30*time.Second) + if err != nil { + return err + } + if !ok { + return nil + } + + rebuildCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + accounts, err := s.loadAccountsFromDB(rebuildCtx, bucket, bucket.Mode == SchedulerModeMixed) + if err != nil { + log.Printf("[Scheduler] rebuild failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err) + return err + } + if err := s.cache.SetSnapshot(rebuildCtx, bucket, accounts); err != nil { + log.Printf("[Scheduler] rebuild cache failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err) + return err + } + log.Printf("[Scheduler] rebuild ok: bucket=%s reason=%s size=%d", bucket.String(), reason, len(accounts)) + return nil +} + +func (s *SchedulerSnapshotService) triggerFullRebuild(reason string) error { + if s.cache == nil { + return ErrSchedulerCacheNotReady + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + buckets, err := s.cache.ListBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] list buckets failed: %v", err) + return err + } + if len(buckets) == 0 { + buckets, err = s.defaultBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] default buckets failed: %v", err) + return err + } + } + return s.rebuildBuckets(ctx, buckets, reason) +} + +func (s *SchedulerSnapshotService) checkOutboxLag(ctx context.Context, oldest SchedulerOutboxEvent, watermark int64) { + if oldest.CreatedAt.IsZero() || s.cfg == nil { + return + } + + lag := time.Since(oldest.CreatedAt) + if lagSeconds := int(lag.Seconds()); lagSeconds >= s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds && s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds > 0 { + log.Printf("[Scheduler] outbox lag warning: %ds", lagSeconds) + } + + if s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 && int(lag.Seconds()) >= s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds { + s.lagMu.Lock() + s.lagFailures++ + failures := s.lagFailures + s.lagMu.Unlock() + + if failures >= s.cfg.Gateway.Scheduling.OutboxLagRebuildFailures { + log.Printf("[Scheduler] outbox lag rebuild triggered: lag=%s failures=%d", lag, failures) + s.lagMu.Lock() + s.lagFailures = 0 + s.lagMu.Unlock() + if err := s.triggerFullRebuild("outbox_lag"); err != nil { + log.Printf("[Scheduler] outbox lag rebuild failed: %v", err) + } + } + } else { + s.lagMu.Lock() + s.lagFailures = 0 + s.lagMu.Unlock() + } + + threshold := s.cfg.Gateway.Scheduling.OutboxBacklogRebuildRows + if threshold <= 0 || s.outboxRepo == nil { + return + } + maxID, err := s.outboxRepo.MaxID(ctx) + if err != nil { + return + } + if maxID-watermark >= int64(threshold) { + log.Printf("[Scheduler] outbox backlog rebuild triggered: backlog=%d", maxID-watermark) + if err := s.triggerFullRebuild("outbox_backlog"); err != nil { + log.Printf("[Scheduler] outbox backlog rebuild failed: %v", err) + } + } +} + +func (s *SchedulerSnapshotService) loadAccountsFromDB(ctx context.Context, bucket SchedulerBucket, useMixed bool) ([]Account, error) { + if s.accountRepo == nil { + return nil, ErrSchedulerCacheNotReady + } + groupID := bucket.GroupID + if s.isRunModeSimple() { + groupID = 0 + } + + if useMixed { + platforms := []string{bucket.Platform, PlatformAntigravity} + var accounts []Account + var err error + if groupID > 0 { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, groupID, platforms) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) + } + if err != nil { + return nil, err + } + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + filtered = append(filtered, acc) + } + return filtered, nil + } + + if groupID > 0 { + return s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, groupID, bucket.Platform) + } + return s.accountRepo.ListSchedulableByPlatform(ctx, bucket.Platform) +} + +func (s *SchedulerSnapshotService) bucketFor(groupID *int64, platform string, mode string) SchedulerBucket { + return SchedulerBucket{ + GroupID: s.normalizeGroupID(groupID), + Platform: platform, + Mode: mode, + } +} + +func (s *SchedulerSnapshotService) normalizeGroupID(groupID *int64) int64 { + if s.isRunModeSimple() { + return 0 + } + if groupID == nil || *groupID <= 0 { + return 0 + } + return *groupID +} + +func (s *SchedulerSnapshotService) normalizeGroupIDs(groupIDs []int64) []int64 { + if s.isRunModeSimple() { + return []int64{0} + } + if len(groupIDs) == 0 { + return []int64{0} + } + seen := make(map[int64]struct{}, len(groupIDs)) + out := make([]int64, 0, len(groupIDs)) + for _, id := range groupIDs { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + if len(out) == 0 { + return []int64{0} + } + return out +} + +func (s *SchedulerSnapshotService) resolveMode(platform string, hasForcePlatform bool) string { + if hasForcePlatform { + return SchedulerModeForced + } + if platform == PlatformAnthropic || platform == PlatformGemini { + return SchedulerModeMixed + } + return SchedulerModeSingle +} + +func (s *SchedulerSnapshotService) guardFallback(ctx context.Context) error { + if s.cfg == nil || s.cfg.Gateway.Scheduling.DbFallbackEnabled { + if s.fallbackLimit == nil || s.fallbackLimit.Allow() { + return nil + } + return ErrSchedulerFallbackLimited + } + return ErrSchedulerCacheNotReady +} + +func (s *SchedulerSnapshotService) withFallbackTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + if s.cfg == nil || s.cfg.Gateway.Scheduling.DbFallbackTimeoutSeconds <= 0 { + return context.WithCancel(ctx) + } + timeout := time.Duration(s.cfg.Gateway.Scheduling.DbFallbackTimeoutSeconds) * time.Second + if deadline, ok := ctx.Deadline(); ok { + remaining := time.Until(deadline) + if remaining <= 0 { + return context.WithCancel(ctx) + } + if remaining < timeout { + timeout = remaining + } + } + return context.WithTimeout(ctx, timeout) +} + +func (s *SchedulerSnapshotService) isRunModeSimple() bool { + return s.cfg != nil && s.cfg.RunMode == config.RunModeSimple +} + +func (s *SchedulerSnapshotService) outboxPollInterval() time.Duration { + if s.cfg == nil { + return time.Second + } + sec := s.cfg.Gateway.Scheduling.OutboxPollIntervalSeconds + if sec <= 0 { + return time.Second + } + return time.Duration(sec) * time.Second +} + +func (s *SchedulerSnapshotService) fullRebuildInterval() time.Duration { + if s.cfg == nil { + return 0 + } + sec := s.cfg.Gateway.Scheduling.FullRebuildIntervalSeconds + if sec <= 0 { + return 0 + } + return time.Duration(sec) * time.Second +} + +func (s *SchedulerSnapshotService) defaultBuckets(ctx context.Context) ([]SchedulerBucket, error) { + buckets := make([]SchedulerBucket, 0) + platforms := []string{PlatformAnthropic, PlatformGemini, PlatformOpenAI, PlatformAntigravity} + for _, platform := range platforms { + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeSingle}) + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeForced}) + if platform == PlatformAnthropic || platform == PlatformGemini { + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeMixed}) + } + } + + if s.isRunModeSimple() || s.groupRepo == nil { + return dedupeBuckets(buckets), nil + } + + groups, err := s.groupRepo.ListActive(ctx) + if err != nil { + return dedupeBuckets(buckets), nil + } + for _, group := range groups { + if group.Platform == "" { + continue + } + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeSingle}) + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeForced}) + if group.Platform == PlatformAnthropic || group.Platform == PlatformGemini { + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeMixed}) + } + } + return dedupeBuckets(buckets), nil +} + +func dedupeBuckets(in []SchedulerBucket) []SchedulerBucket { + seen := make(map[string]struct{}, len(in)) + out := make([]SchedulerBucket, 0, len(in)) + for _, bucket := range in { + key := bucket.String() + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + out = append(out, bucket) + } + return out +} + +func derefAccounts(accounts []*Account) []Account { + if len(accounts) == 0 { + return []Account{} + } + out := make([]Account, 0, len(accounts)) + for _, account := range accounts { + if account == nil { + continue + } + out = append(out, *account) + } + return out +} + +func parseInt64Slice(value any) []int64 { + raw, ok := value.([]any) + if !ok { + return nil + } + out := make([]int64, 0, len(raw)) + for _, item := range raw { + if v, ok := toInt64(item); ok && v > 0 { + out = append(out, v) + } + } + return out +} + +func toInt64(value any) (int64, bool) { + switch v := value.(type) { + case float64: + return int64(v), true + case int64: + return v, true + case int: + return int64(v), true + case json.Number: + parsed, err := strconv.ParseInt(v.String(), 10, 64) + return parsed, err == nil + default: + return 0, false + } +} + +type fallbackLimiter struct { + maxQPS int + mu sync.Mutex + window time.Time + count int +} + +func newFallbackLimiter(maxQPS int) *fallbackLimiter { + if maxQPS <= 0 { + return nil + } + return &fallbackLimiter{ + maxQPS: maxQPS, + window: time.Now(), + } +} + +func (l *fallbackLimiter) Allow() bool { + if l == nil || l.maxQPS <= 0 { + return true + } + l.mu.Lock() + defer l.mu.Unlock() + + now := time.Now() + if now.Sub(l.window) >= time.Second { + l.window = now + l.count = 0 + } + if l.count >= l.maxQPS { + return false + } + l.count++ + return true +} diff --git a/backend/internal/service/setting.go b/backend/internal/service/setting.go new file mode 100644 index 00000000..eef6bcc5 --- /dev/null +++ b/backend/internal/service/setting.go @@ -0,0 +1,10 @@ +package service + +import "time" + +type Setting struct { + ID int64 + Key string + Value string + UpdatedAt time.Time +} diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go new file mode 100644 index 00000000..0a7426f8 --- /dev/null +++ b/backend/internal/service/setting_service.go @@ -0,0 +1,759 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ( + ErrRegistrationDisabled = infraerrors.Forbidden("REGISTRATION_DISABLED", "registration is currently disabled") + ErrSettingNotFound = infraerrors.NotFound("SETTING_NOT_FOUND", "setting not found") +) + +type SettingRepository interface { + Get(ctx context.Context, key string) (*Setting, error) + GetValue(ctx context.Context, key string) (string, error) + Set(ctx context.Context, key, value string) error + GetMultiple(ctx context.Context, keys []string) (map[string]string, error) + SetMultiple(ctx context.Context, settings map[string]string) error + GetAll(ctx context.Context) (map[string]string, error) + Delete(ctx context.Context, key string) error +} + +// SettingService 系统设置服务 +type SettingService struct { + settingRepo SettingRepository + cfg *config.Config + onUpdate func() // Callback when settings are updated (for cache invalidation) + version string // Application version +} + +// NewSettingService 创建系统设置服务实例 +func NewSettingService(settingRepo SettingRepository, cfg *config.Config) *SettingService { + return &SettingService{ + settingRepo: settingRepo, + cfg: cfg, + } +} + +// GetAllSettings 获取所有系统设置 +func (s *SettingService) GetAllSettings(ctx context.Context) (*SystemSettings, error) { + settings, err := s.settingRepo.GetAll(ctx) + if err != nil { + return nil, fmt.Errorf("get all settings: %w", err) + } + + return s.parseSettings(settings), nil +} + +// GetPublicSettings 获取公开设置(无需登录) +func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings, error) { + keys := []string{ + SettingKeyRegistrationEnabled, + SettingKeyEmailVerifyEnabled, + SettingKeyTurnstileEnabled, + SettingKeyTurnstileSiteKey, + SettingKeySiteName, + SettingKeySiteLogo, + SettingKeySiteSubtitle, + SettingKeyAPIBaseURL, + SettingKeyContactInfo, + SettingKeyDocURL, + SettingKeyHomeContent, + SettingKeyLinuxDoConnectEnabled, + } + + settings, err := s.settingRepo.GetMultiple(ctx, keys) + if err != nil { + return nil, fmt.Errorf("get public settings: %w", err) + } + + linuxDoEnabled := false + if raw, ok := settings[SettingKeyLinuxDoConnectEnabled]; ok { + linuxDoEnabled = raw == "true" + } else { + linuxDoEnabled = s.cfg != nil && s.cfg.LinuxDo.Enabled + } + + return &PublicSettings{ + RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", + EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", + TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", + TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], + SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), + SiteLogo: settings[SettingKeySiteLogo], + SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), + APIBaseURL: settings[SettingKeyAPIBaseURL], + ContactInfo: settings[SettingKeyContactInfo], + DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], + LinuxDoOAuthEnabled: linuxDoEnabled, + }, nil +} + +// SetOnUpdateCallback sets a callback function to be called when settings are updated +// This is used for cache invalidation (e.g., HTML cache in frontend server) +func (s *SettingService) SetOnUpdateCallback(callback func()) { + s.onUpdate = callback +} + +// SetVersion sets the application version for injection into public settings +func (s *SettingService) SetVersion(version string) { + s.version = version +} + +// GetPublicSettingsForInjection returns public settings in a format suitable for HTML injection +// This implements the web.PublicSettingsProvider interface +func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any, error) { + settings, err := s.GetPublicSettings(ctx) + if err != nil { + return nil, err + } + + // Return a struct that matches the frontend's expected format + return &struct { + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo,omitempty"` + SiteSubtitle string `json:"site_subtitle,omitempty"` + APIBaseURL string `json:"api_base_url,omitempty"` + ContactInfo string `json:"contact_info,omitempty"` + DocURL string `json:"doc_url,omitempty"` + HomeContent string `json:"home_content,omitempty"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version,omitempty"` + }{ + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: s.version, + }, nil +} + +// UpdateSettings 更新系统设置 +func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSettings) error { + updates := make(map[string]string) + + // 注册设置 + updates[SettingKeyRegistrationEnabled] = strconv.FormatBool(settings.RegistrationEnabled) + updates[SettingKeyEmailVerifyEnabled] = strconv.FormatBool(settings.EmailVerifyEnabled) + + // 邮件服务设置(只有非空才更新密码) + updates[SettingKeySMTPHost] = settings.SMTPHost + updates[SettingKeySMTPPort] = strconv.Itoa(settings.SMTPPort) + updates[SettingKeySMTPUsername] = settings.SMTPUsername + if settings.SMTPPassword != "" { + updates[SettingKeySMTPPassword] = settings.SMTPPassword + } + updates[SettingKeySMTPFrom] = settings.SMTPFrom + updates[SettingKeySMTPFromName] = settings.SMTPFromName + updates[SettingKeySMTPUseTLS] = strconv.FormatBool(settings.SMTPUseTLS) + + // Cloudflare Turnstile 设置(只有非空才更新密钥) + updates[SettingKeyTurnstileEnabled] = strconv.FormatBool(settings.TurnstileEnabled) + updates[SettingKeyTurnstileSiteKey] = settings.TurnstileSiteKey + if settings.TurnstileSecretKey != "" { + updates[SettingKeyTurnstileSecretKey] = settings.TurnstileSecretKey + } + + // LinuxDo Connect OAuth 登录 + updates[SettingKeyLinuxDoConnectEnabled] = strconv.FormatBool(settings.LinuxDoConnectEnabled) + updates[SettingKeyLinuxDoConnectClientID] = settings.LinuxDoConnectClientID + updates[SettingKeyLinuxDoConnectRedirectURL] = settings.LinuxDoConnectRedirectURL + if settings.LinuxDoConnectClientSecret != "" { + updates[SettingKeyLinuxDoConnectClientSecret] = settings.LinuxDoConnectClientSecret + } + + // OEM设置 + updates[SettingKeySiteName] = settings.SiteName + updates[SettingKeySiteLogo] = settings.SiteLogo + updates[SettingKeySiteSubtitle] = settings.SiteSubtitle + updates[SettingKeyAPIBaseURL] = settings.APIBaseURL + updates[SettingKeyContactInfo] = settings.ContactInfo + updates[SettingKeyDocURL] = settings.DocURL + updates[SettingKeyHomeContent] = settings.HomeContent + + // 默认配置 + updates[SettingKeyDefaultConcurrency] = strconv.Itoa(settings.DefaultConcurrency) + updates[SettingKeyDefaultBalance] = strconv.FormatFloat(settings.DefaultBalance, 'f', 8, 64) + + // Model fallback configuration + updates[SettingKeyEnableModelFallback] = strconv.FormatBool(settings.EnableModelFallback) + updates[SettingKeyFallbackModelAnthropic] = settings.FallbackModelAnthropic + updates[SettingKeyFallbackModelOpenAI] = settings.FallbackModelOpenAI + updates[SettingKeyFallbackModelGemini] = settings.FallbackModelGemini + updates[SettingKeyFallbackModelAntigravity] = settings.FallbackModelAntigravity + + // Identity patch configuration (Claude -> Gemini) + updates[SettingKeyEnableIdentityPatch] = strconv.FormatBool(settings.EnableIdentityPatch) + updates[SettingKeyIdentityPatchPrompt] = settings.IdentityPatchPrompt + + // Ops monitoring (vNext) + updates[SettingKeyOpsMonitoringEnabled] = strconv.FormatBool(settings.OpsMonitoringEnabled) + updates[SettingKeyOpsRealtimeMonitoringEnabled] = strconv.FormatBool(settings.OpsRealtimeMonitoringEnabled) + updates[SettingKeyOpsQueryModeDefault] = string(ParseOpsQueryMode(settings.OpsQueryModeDefault)) + if settings.OpsMetricsIntervalSeconds > 0 { + updates[SettingKeyOpsMetricsIntervalSeconds] = strconv.Itoa(settings.OpsMetricsIntervalSeconds) + } + + err := s.settingRepo.SetMultiple(ctx, updates) + if err == nil && s.onUpdate != nil { + s.onUpdate() // Invalidate cache after settings update + } + return err +} + +// IsRegistrationEnabled 检查是否开放注册 +func (s *SettingService) IsRegistrationEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyRegistrationEnabled) + if err != nil { + // 安全默认:如果设置不存在或查询出错,默认关闭注册 + return false + } + return value == "true" +} + +// IsEmailVerifyEnabled 检查是否开启邮件验证 +func (s *SettingService) IsEmailVerifyEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyEmailVerifyEnabled) + if err != nil { + return false + } + return value == "true" +} + +// GetSiteName 获取网站名称 +func (s *SettingService) GetSiteName(ctx context.Context) string { + value, err := s.settingRepo.GetValue(ctx, SettingKeySiteName) + if err != nil || value == "" { + return "Sub2API" + } + return value +} + +// GetDefaultConcurrency 获取默认并发量 +func (s *SettingService) GetDefaultConcurrency(ctx context.Context) int { + value, err := s.settingRepo.GetValue(ctx, SettingKeyDefaultConcurrency) + if err != nil { + return s.cfg.Default.UserConcurrency + } + if v, err := strconv.Atoi(value); err == nil && v > 0 { + return v + } + return s.cfg.Default.UserConcurrency +} + +// GetDefaultBalance 获取默认余额 +func (s *SettingService) GetDefaultBalance(ctx context.Context) float64 { + value, err := s.settingRepo.GetValue(ctx, SettingKeyDefaultBalance) + if err != nil { + return s.cfg.Default.UserBalance + } + if v, err := strconv.ParseFloat(value, 64); err == nil && v >= 0 { + return v + } + return s.cfg.Default.UserBalance +} + +// InitializeDefaultSettings 初始化默认设置 +func (s *SettingService) InitializeDefaultSettings(ctx context.Context) error { + // 检查是否已有设置 + _, err := s.settingRepo.GetValue(ctx, SettingKeyRegistrationEnabled) + if err == nil { + // 已有设置,不需要初始化 + return nil + } + if !errors.Is(err, ErrSettingNotFound) { + return fmt.Errorf("check existing settings: %w", err) + } + + // 初始化默认设置 + defaults := map[string]string{ + SettingKeyRegistrationEnabled: "true", + SettingKeyEmailVerifyEnabled: "false", + SettingKeySiteName: "Sub2API", + SettingKeySiteLogo: "", + SettingKeyDefaultConcurrency: strconv.Itoa(s.cfg.Default.UserConcurrency), + SettingKeyDefaultBalance: strconv.FormatFloat(s.cfg.Default.UserBalance, 'f', 8, 64), + SettingKeySMTPPort: "587", + SettingKeySMTPUseTLS: "false", + // Model fallback defaults + SettingKeyEnableModelFallback: "false", + SettingKeyFallbackModelAnthropic: "claude-3-5-sonnet-20241022", + SettingKeyFallbackModelOpenAI: "gpt-4o", + SettingKeyFallbackModelGemini: "gemini-2.5-pro", + SettingKeyFallbackModelAntigravity: "gemini-2.5-pro", + // Identity patch defaults + SettingKeyEnableIdentityPatch: "true", + SettingKeyIdentityPatchPrompt: "", + + // Ops monitoring defaults (vNext) + SettingKeyOpsMonitoringEnabled: "true", + SettingKeyOpsRealtimeMonitoringEnabled: "true", + SettingKeyOpsQueryModeDefault: "auto", + SettingKeyOpsMetricsIntervalSeconds: "60", + } + + return s.settingRepo.SetMultiple(ctx, defaults) +} + +// parseSettings 解析设置到结构体 +func (s *SettingService) parseSettings(settings map[string]string) *SystemSettings { + result := &SystemSettings{ + RegistrationEnabled: settings[SettingKeyRegistrationEnabled] == "true", + EmailVerifyEnabled: settings[SettingKeyEmailVerifyEnabled] == "true", + SMTPHost: settings[SettingKeySMTPHost], + SMTPUsername: settings[SettingKeySMTPUsername], + SMTPFrom: settings[SettingKeySMTPFrom], + SMTPFromName: settings[SettingKeySMTPFromName], + SMTPUseTLS: settings[SettingKeySMTPUseTLS] == "true", + SMTPPasswordConfigured: settings[SettingKeySMTPPassword] != "", + TurnstileEnabled: settings[SettingKeyTurnstileEnabled] == "true", + TurnstileSiteKey: settings[SettingKeyTurnstileSiteKey], + TurnstileSecretKeyConfigured: settings[SettingKeyTurnstileSecretKey] != "", + SiteName: s.getStringOrDefault(settings, SettingKeySiteName, "Sub2API"), + SiteLogo: settings[SettingKeySiteLogo], + SiteSubtitle: s.getStringOrDefault(settings, SettingKeySiteSubtitle, "Subscription to API Conversion Platform"), + APIBaseURL: settings[SettingKeyAPIBaseURL], + ContactInfo: settings[SettingKeyContactInfo], + DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], + } + + // 解析整数类型 + if port, err := strconv.Atoi(settings[SettingKeySMTPPort]); err == nil { + result.SMTPPort = port + } else { + result.SMTPPort = 587 + } + + if concurrency, err := strconv.Atoi(settings[SettingKeyDefaultConcurrency]); err == nil { + result.DefaultConcurrency = concurrency + } else { + result.DefaultConcurrency = s.cfg.Default.UserConcurrency + } + + // 解析浮点数类型 + if balance, err := strconv.ParseFloat(settings[SettingKeyDefaultBalance], 64); err == nil { + result.DefaultBalance = balance + } else { + result.DefaultBalance = s.cfg.Default.UserBalance + } + + // 敏感信息直接返回,方便测试连接时使用 + result.SMTPPassword = settings[SettingKeySMTPPassword] + result.TurnstileSecretKey = settings[SettingKeyTurnstileSecretKey] + + // LinuxDo Connect 设置: + // - 兼容 config.yaml/env(避免老部署因为未迁移到数据库设置而被意外关闭) + // - 支持在后台“系统设置”中覆盖并持久化(存储于 DB) + linuxDoBase := config.LinuxDoConnectConfig{} + if s.cfg != nil { + linuxDoBase = s.cfg.LinuxDo + } + + if raw, ok := settings[SettingKeyLinuxDoConnectEnabled]; ok { + result.LinuxDoConnectEnabled = raw == "true" + } else { + result.LinuxDoConnectEnabled = linuxDoBase.Enabled + } + + if v, ok := settings[SettingKeyLinuxDoConnectClientID]; ok && strings.TrimSpace(v) != "" { + result.LinuxDoConnectClientID = strings.TrimSpace(v) + } else { + result.LinuxDoConnectClientID = linuxDoBase.ClientID + } + + if v, ok := settings[SettingKeyLinuxDoConnectRedirectURL]; ok && strings.TrimSpace(v) != "" { + result.LinuxDoConnectRedirectURL = strings.TrimSpace(v) + } else { + result.LinuxDoConnectRedirectURL = linuxDoBase.RedirectURL + } + + result.LinuxDoConnectClientSecret = strings.TrimSpace(settings[SettingKeyLinuxDoConnectClientSecret]) + if result.LinuxDoConnectClientSecret == "" { + result.LinuxDoConnectClientSecret = strings.TrimSpace(linuxDoBase.ClientSecret) + } + result.LinuxDoConnectClientSecretConfigured = result.LinuxDoConnectClientSecret != "" + + // Model fallback settings + result.EnableModelFallback = settings[SettingKeyEnableModelFallback] == "true" + result.FallbackModelAnthropic = s.getStringOrDefault(settings, SettingKeyFallbackModelAnthropic, "claude-3-5-sonnet-20241022") + result.FallbackModelOpenAI = s.getStringOrDefault(settings, SettingKeyFallbackModelOpenAI, "gpt-4o") + result.FallbackModelGemini = s.getStringOrDefault(settings, SettingKeyFallbackModelGemini, "gemini-2.5-pro") + result.FallbackModelAntigravity = s.getStringOrDefault(settings, SettingKeyFallbackModelAntigravity, "gemini-2.5-pro") + + // Identity patch settings (default: enabled, to preserve existing behavior) + if v, ok := settings[SettingKeyEnableIdentityPatch]; ok && v != "" { + result.EnableIdentityPatch = v == "true" + } else { + result.EnableIdentityPatch = true + } + result.IdentityPatchPrompt = settings[SettingKeyIdentityPatchPrompt] + + // Ops monitoring settings (default: enabled, fail-open) + result.OpsMonitoringEnabled = !isFalseSettingValue(settings[SettingKeyOpsMonitoringEnabled]) + result.OpsRealtimeMonitoringEnabled = !isFalseSettingValue(settings[SettingKeyOpsRealtimeMonitoringEnabled]) + result.OpsQueryModeDefault = string(ParseOpsQueryMode(settings[SettingKeyOpsQueryModeDefault])) + result.OpsMetricsIntervalSeconds = 60 + if raw := strings.TrimSpace(settings[SettingKeyOpsMetricsIntervalSeconds]); raw != "" { + if v, err := strconv.Atoi(raw); err == nil { + if v < 60 { + v = 60 + } + if v > 3600 { + v = 3600 + } + result.OpsMetricsIntervalSeconds = v + } + } + + return result +} + +func isFalseSettingValue(value string) bool { + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return true + default: + return false + } +} + +// getStringOrDefault 获取字符串值或默认值 +func (s *SettingService) getStringOrDefault(settings map[string]string, key, defaultValue string) string { + if value, ok := settings[key]; ok && value != "" { + return value + } + return defaultValue +} + +// IsTurnstileEnabled 检查是否启用 Turnstile 验证 +func (s *SettingService) IsTurnstileEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyTurnstileEnabled) + if err != nil { + return false + } + return value == "true" +} + +// GetTurnstileSecretKey 获取 Turnstile Secret Key +func (s *SettingService) GetTurnstileSecretKey(ctx context.Context) string { + value, err := s.settingRepo.GetValue(ctx, SettingKeyTurnstileSecretKey) + if err != nil { + return "" + } + return value +} + +// IsIdentityPatchEnabled 检查是否启用身份补丁(Claude -> Gemini systemInstruction 注入) +func (s *SettingService) IsIdentityPatchEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyEnableIdentityPatch) + if err != nil { + // 默认开启,保持兼容 + return true + } + return value == "true" +} + +// GetIdentityPatchPrompt 获取自定义身份补丁提示词(为空表示使用内置默认模板) +func (s *SettingService) GetIdentityPatchPrompt(ctx context.Context) string { + value, err := s.settingRepo.GetValue(ctx, SettingKeyIdentityPatchPrompt) + if err != nil { + return "" + } + return value +} + +// GenerateAdminAPIKey 生成新的管理员 API Key +func (s *SettingService) GenerateAdminAPIKey(ctx context.Context) (string, error) { + // 生成 32 字节随机数 = 64 位十六进制字符 + bytes := make([]byte, 32) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + + key := AdminAPIKeyPrefix + hex.EncodeToString(bytes) + + // 存储到 settings 表 + if err := s.settingRepo.Set(ctx, SettingKeyAdminAPIKey, key); err != nil { + return "", fmt.Errorf("save admin api key: %w", err) + } + + return key, nil +} + +// GetAdminAPIKeyStatus 获取管理员 API Key 状态 +// 返回脱敏的 key、是否存在、错误 +func (s *SettingService) GetAdminAPIKeyStatus(ctx context.Context) (maskedKey string, exists bool, err error) { + key, err := s.settingRepo.GetValue(ctx, SettingKeyAdminAPIKey) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return "", false, nil + } + return "", false, err + } + if key == "" { + return "", false, nil + } + + // 脱敏:显示前 10 位和后 4 位 + if len(key) > 14 { + maskedKey = key[:10] + "..." + key[len(key)-4:] + } else { + maskedKey = key + } + + return maskedKey, true, nil +} + +// GetAdminAPIKey 获取完整的管理员 API Key(仅供内部验证使用) +// 如果未配置返回空字符串和 nil 错误,只有数据库错误时才返回 error +func (s *SettingService) GetAdminAPIKey(ctx context.Context) (string, error) { + key, err := s.settingRepo.GetValue(ctx, SettingKeyAdminAPIKey) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return "", nil // 未配置,返回空字符串 + } + return "", err // 数据库错误 + } + return key, nil +} + +// DeleteAdminAPIKey 删除管理员 API Key +func (s *SettingService) DeleteAdminAPIKey(ctx context.Context) error { + return s.settingRepo.Delete(ctx, SettingKeyAdminAPIKey) +} + +// IsModelFallbackEnabled 检查是否启用模型兜底机制 +func (s *SettingService) IsModelFallbackEnabled(ctx context.Context) bool { + value, err := s.settingRepo.GetValue(ctx, SettingKeyEnableModelFallback) + if err != nil { + return false // Default: disabled + } + return value == "true" +} + +// GetFallbackModel 获取指定平台的兜底模型 +func (s *SettingService) GetFallbackModel(ctx context.Context, platform string) string { + var key string + var defaultModel string + + switch platform { + case PlatformAnthropic: + key = SettingKeyFallbackModelAnthropic + defaultModel = "claude-3-5-sonnet-20241022" + case PlatformOpenAI: + key = SettingKeyFallbackModelOpenAI + defaultModel = "gpt-4o" + case PlatformGemini: + key = SettingKeyFallbackModelGemini + defaultModel = "gemini-2.5-pro" + case PlatformAntigravity: + key = SettingKeyFallbackModelAntigravity + defaultModel = "gemini-2.5-pro" + default: + return "" + } + + value, err := s.settingRepo.GetValue(ctx, key) + if err != nil || value == "" { + return defaultModel + } + return value +} + +// GetLinuxDoConnectOAuthConfig 返回用于登录的"最终生效" LinuxDo Connect 配置。 +// +// 优先级: +// - 若对应系统设置键存在,则覆盖 config.yaml/env 的值 +// - 否则回退到 config.yaml/env 的值 +func (s *SettingService) GetLinuxDoConnectOAuthConfig(ctx context.Context) (config.LinuxDoConnectConfig, error) { + if s == nil || s.cfg == nil { + return config.LinuxDoConnectConfig{}, infraerrors.ServiceUnavailable("CONFIG_NOT_READY", "config not loaded") + } + + effective := s.cfg.LinuxDo + + keys := []string{ + SettingKeyLinuxDoConnectEnabled, + SettingKeyLinuxDoConnectClientID, + SettingKeyLinuxDoConnectClientSecret, + SettingKeyLinuxDoConnectRedirectURL, + } + settings, err := s.settingRepo.GetMultiple(ctx, keys) + if err != nil { + return config.LinuxDoConnectConfig{}, fmt.Errorf("get linuxdo connect settings: %w", err) + } + + if raw, ok := settings[SettingKeyLinuxDoConnectEnabled]; ok { + effective.Enabled = raw == "true" + } + if v, ok := settings[SettingKeyLinuxDoConnectClientID]; ok && strings.TrimSpace(v) != "" { + effective.ClientID = strings.TrimSpace(v) + } + if v, ok := settings[SettingKeyLinuxDoConnectClientSecret]; ok && strings.TrimSpace(v) != "" { + effective.ClientSecret = strings.TrimSpace(v) + } + if v, ok := settings[SettingKeyLinuxDoConnectRedirectURL]; ok && strings.TrimSpace(v) != "" { + effective.RedirectURL = strings.TrimSpace(v) + } + + if !effective.Enabled { + return config.LinuxDoConnectConfig{}, infraerrors.NotFound("OAUTH_DISABLED", "oauth login is disabled") + } + + // 基础健壮性校验(避免把用户重定向到一个必然失败或不安全的 OAuth 流程里)。 + if strings.TrimSpace(effective.ClientID) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client id not configured") + } + if strings.TrimSpace(effective.AuthorizeURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url not configured") + } + if strings.TrimSpace(effective.TokenURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url not configured") + } + if strings.TrimSpace(effective.UserInfoURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url not configured") + } + if strings.TrimSpace(effective.RedirectURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url not configured") + } + if strings.TrimSpace(effective.FrontendRedirectURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url not configured") + } + + if err := config.ValidateAbsoluteHTTPURL(effective.AuthorizeURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.TokenURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.UserInfoURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.RedirectURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url invalid") + } + if err := config.ValidateFrontendRedirectURL(effective.FrontendRedirectURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url invalid") + } + + method := strings.ToLower(strings.TrimSpace(effective.TokenAuthMethod)) + switch method { + case "", "client_secret_post", "client_secret_basic": + if strings.TrimSpace(effective.ClientSecret) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client secret not configured") + } + case "none": + if !effective.UsePKCE { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth pkce must be enabled when token_auth_method=none") + } + default: + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token_auth_method invalid") + } + + return effective, nil +} + +// GetStreamTimeoutSettings 获取流超时处理配置 +func (s *SettingService) GetStreamTimeoutSettings(ctx context.Context) (*StreamTimeoutSettings, error) { + value, err := s.settingRepo.GetValue(ctx, SettingKeyStreamTimeoutSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return DefaultStreamTimeoutSettings(), nil + } + return nil, fmt.Errorf("get stream timeout settings: %w", err) + } + if value == "" { + return DefaultStreamTimeoutSettings(), nil + } + + var settings StreamTimeoutSettings + if err := json.Unmarshal([]byte(value), &settings); err != nil { + return DefaultStreamTimeoutSettings(), nil + } + + // 验证并修正配置值 + if settings.TempUnschedMinutes < 1 { + settings.TempUnschedMinutes = 1 + } + if settings.TempUnschedMinutes > 60 { + settings.TempUnschedMinutes = 60 + } + if settings.ThresholdCount < 1 { + settings.ThresholdCount = 1 + } + if settings.ThresholdCount > 10 { + settings.ThresholdCount = 10 + } + if settings.ThresholdWindowMinutes < 1 { + settings.ThresholdWindowMinutes = 1 + } + if settings.ThresholdWindowMinutes > 60 { + settings.ThresholdWindowMinutes = 60 + } + + // 验证 action + switch settings.Action { + case StreamTimeoutActionTempUnsched, StreamTimeoutActionError, StreamTimeoutActionNone: + // valid + default: + settings.Action = StreamTimeoutActionTempUnsched + } + + return &settings, nil +} + +// SetStreamTimeoutSettings 设置流超时处理配置 +func (s *SettingService) SetStreamTimeoutSettings(ctx context.Context, settings *StreamTimeoutSettings) error { + if settings == nil { + return fmt.Errorf("settings cannot be nil") + } + + // 验证配置值 + if settings.TempUnschedMinutes < 1 || settings.TempUnschedMinutes > 60 { + return fmt.Errorf("temp_unsched_minutes must be between 1-60") + } + if settings.ThresholdCount < 1 || settings.ThresholdCount > 10 { + return fmt.Errorf("threshold_count must be between 1-10") + } + if settings.ThresholdWindowMinutes < 1 || settings.ThresholdWindowMinutes > 60 { + return fmt.Errorf("threshold_window_minutes must be between 1-60") + } + + switch settings.Action { + case StreamTimeoutActionTempUnsched, StreamTimeoutActionError, StreamTimeoutActionNone: + // valid + default: + return fmt.Errorf("invalid action: %s", settings.Action) + } + + data, err := json.Marshal(settings) + if err != nil { + return fmt.Errorf("marshal stream timeout settings: %w", err) + } + + return s.settingRepo.Set(ctx, SettingKeyStreamTimeoutSettings, string(data)) +} diff --git a/backend/internal/service/settings_view.go b/backend/internal/service/settings_view.go new file mode 100644 index 00000000..e4ee2826 --- /dev/null +++ b/backend/internal/service/settings_view.go @@ -0,0 +1,103 @@ +package service + +type SystemSettings struct { + RegistrationEnabled bool + EmailVerifyEnabled bool + + SMTPHost string + SMTPPort int + SMTPUsername string + SMTPPassword string + SMTPPasswordConfigured bool + SMTPFrom string + SMTPFromName string + SMTPUseTLS bool + + TurnstileEnabled bool + TurnstileSiteKey string + TurnstileSecretKey string + TurnstileSecretKeyConfigured bool + + // LinuxDo Connect OAuth 登录 + LinuxDoConnectEnabled bool + LinuxDoConnectClientID string + LinuxDoConnectClientSecret string + LinuxDoConnectClientSecretConfigured bool + LinuxDoConnectRedirectURL string + + SiteName string + SiteLogo string + SiteSubtitle string + APIBaseURL string + ContactInfo string + DocURL string + HomeContent string + + DefaultConcurrency int + DefaultBalance float64 + + // Model fallback configuration + EnableModelFallback bool `json:"enable_model_fallback"` + FallbackModelAnthropic string `json:"fallback_model_anthropic"` + FallbackModelOpenAI string `json:"fallback_model_openai"` + FallbackModelGemini string `json:"fallback_model_gemini"` + FallbackModelAntigravity string `json:"fallback_model_antigravity"` + + // Identity patch configuration (Claude -> Gemini) + EnableIdentityPatch bool `json:"enable_identity_patch"` + IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled bool + OpsRealtimeMonitoringEnabled bool + OpsQueryModeDefault string + OpsMetricsIntervalSeconds int +} + +type PublicSettings struct { + RegistrationEnabled bool + EmailVerifyEnabled bool + TurnstileEnabled bool + TurnstileSiteKey string + SiteName string + SiteLogo string + SiteSubtitle string + APIBaseURL string + ContactInfo string + DocURL string + HomeContent string + LinuxDoOAuthEnabled bool + Version string +} + +// StreamTimeoutSettings 流超时处理配置(仅控制超时后的处理方式,超时判定由网关配置控制) +type StreamTimeoutSettings struct { + // Enabled 是否启用流超时处理 + Enabled bool `json:"enabled"` + // Action 超时后的处理方式: "temp_unsched" | "error" | "none" + Action string `json:"action"` + // TempUnschedMinutes 临时不可调度持续时间(分钟) + TempUnschedMinutes int `json:"temp_unsched_minutes"` + // ThresholdCount 触发阈值次数(累计多少次超时才触发) + ThresholdCount int `json:"threshold_count"` + // ThresholdWindowMinutes 阈值窗口时间(分钟) + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} + +// StreamTimeoutAction 流超时处理方式常量 +const ( + StreamTimeoutActionTempUnsched = "temp_unsched" // 临时不可调度 + StreamTimeoutActionError = "error" // 标记为错误状态 + StreamTimeoutActionNone = "none" // 不处理 +) + +// DefaultStreamTimeoutSettings 返回默认的流超时配置 +func DefaultStreamTimeoutSettings() *StreamTimeoutSettings { + return &StreamTimeoutSettings{ + Enabled: false, + Action: StreamTimeoutActionTempUnsched, + TempUnschedMinutes: 5, + ThresholdCount: 3, + ThresholdWindowMinutes: 10, + } +} diff --git a/backend/internal/service/subscription_service.go b/backend/internal/service/subscription_service.go new file mode 100644 index 00000000..d960c86f --- /dev/null +++ b/backend/internal/service/subscription_service.go @@ -0,0 +1,669 @@ +package service + +import ( + "context" + "fmt" + "log" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +// MaxExpiresAt is the maximum allowed expiration date (year 2099) +// This prevents time.Time JSON serialization errors (RFC 3339 requires year <= 9999) +var MaxExpiresAt = time.Date(2099, 12, 31, 23, 59, 59, 0, time.UTC) + +// MaxValidityDays is the maximum allowed validity days for subscriptions (100 years) +const MaxValidityDays = 36500 + +var ( + ErrSubscriptionNotFound = infraerrors.NotFound("SUBSCRIPTION_NOT_FOUND", "subscription not found") + ErrSubscriptionExpired = infraerrors.Forbidden("SUBSCRIPTION_EXPIRED", "subscription has expired") + ErrSubscriptionSuspended = infraerrors.Forbidden("SUBSCRIPTION_SUSPENDED", "subscription is suspended") + ErrSubscriptionAlreadyExists = infraerrors.Conflict("SUBSCRIPTION_ALREADY_EXISTS", "subscription already exists for this user and group") + ErrGroupNotSubscriptionType = infraerrors.BadRequest("GROUP_NOT_SUBSCRIPTION_TYPE", "group is not a subscription type") + ErrDailyLimitExceeded = infraerrors.TooManyRequests("DAILY_LIMIT_EXCEEDED", "daily usage limit exceeded") + ErrWeeklyLimitExceeded = infraerrors.TooManyRequests("WEEKLY_LIMIT_EXCEEDED", "weekly usage limit exceeded") + ErrMonthlyLimitExceeded = infraerrors.TooManyRequests("MONTHLY_LIMIT_EXCEEDED", "monthly usage limit exceeded") + ErrSubscriptionNilInput = infraerrors.BadRequest("SUBSCRIPTION_NIL_INPUT", "subscription input cannot be nil") +) + +// SubscriptionService 订阅服务 +type SubscriptionService struct { + groupRepo GroupRepository + userSubRepo UserSubscriptionRepository + billingCacheService *BillingCacheService +} + +// NewSubscriptionService 创建订阅服务 +func NewSubscriptionService(groupRepo GroupRepository, userSubRepo UserSubscriptionRepository, billingCacheService *BillingCacheService) *SubscriptionService { + return &SubscriptionService{ + groupRepo: groupRepo, + userSubRepo: userSubRepo, + billingCacheService: billingCacheService, + } +} + +// AssignSubscriptionInput 分配订阅输入 +type AssignSubscriptionInput struct { + UserID int64 + GroupID int64 + ValidityDays int + AssignedBy int64 + Notes string +} + +// AssignSubscription 分配订阅给用户(不允许重复分配) +func (s *SubscriptionService) AssignSubscription(ctx context.Context, input *AssignSubscriptionInput) (*UserSubscription, error) { + // 检查分组是否存在且为订阅类型 + group, err := s.groupRepo.GetByID(ctx, input.GroupID) + if err != nil { + return nil, fmt.Errorf("group not found: %w", err) + } + if !group.IsSubscriptionType() { + return nil, ErrGroupNotSubscriptionType + } + + // 检查是否已存在订阅 + exists, err := s.userSubRepo.ExistsByUserIDAndGroupID(ctx, input.UserID, input.GroupID) + if err != nil { + return nil, err + } + if exists { + return nil, ErrSubscriptionAlreadyExists + } + + sub, err := s.createSubscription(ctx, input) + if err != nil { + return nil, err + } + + // 失效订阅缓存 + if s.billingCacheService != nil { + userID, groupID := input.UserID, input.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + + return sub, nil +} + +// AssignOrExtendSubscription 分配或续期订阅(用于兑换码等场景) +// 如果用户已有同分组的订阅: +// - 未过期:从当前过期时间累加天数 +// - 已过期:从当前时间开始计算新的过期时间,并激活订阅 +// +// 如果没有订阅:创建新订阅 +func (s *SubscriptionService) AssignOrExtendSubscription(ctx context.Context, input *AssignSubscriptionInput) (*UserSubscription, bool, error) { + // 检查分组是否存在且为订阅类型 + group, err := s.groupRepo.GetByID(ctx, input.GroupID) + if err != nil { + return nil, false, fmt.Errorf("group not found: %w", err) + } + if !group.IsSubscriptionType() { + return nil, false, ErrGroupNotSubscriptionType + } + + // 查询是否已有订阅 + existingSub, err := s.userSubRepo.GetByUserIDAndGroupID(ctx, input.UserID, input.GroupID) + if err != nil { + // 不存在记录是正常情况,其他错误需要返回 + existingSub = nil + } + + validityDays := input.ValidityDays + if validityDays <= 0 { + validityDays = 30 + } + if validityDays > MaxValidityDays { + validityDays = MaxValidityDays + } + + // 已有订阅,执行续期 + if existingSub != nil { + now := time.Now() + var newExpiresAt time.Time + + if existingSub.ExpiresAt.After(now) { + // 未过期:从当前过期时间累加 + newExpiresAt = existingSub.ExpiresAt.AddDate(0, 0, validityDays) + } else { + // 已过期:从当前时间开始计算 + newExpiresAt = now.AddDate(0, 0, validityDays) + } + + // 确保不超过最大过期时间 + if newExpiresAt.After(MaxExpiresAt) { + newExpiresAt = MaxExpiresAt + } + + // 更新过期时间 + if err := s.userSubRepo.ExtendExpiry(ctx, existingSub.ID, newExpiresAt); err != nil { + return nil, false, fmt.Errorf("extend subscription: %w", err) + } + + // 如果订阅已过期或被暂停,恢复为active状态 + if existingSub.Status != SubscriptionStatusActive { + if err := s.userSubRepo.UpdateStatus(ctx, existingSub.ID, SubscriptionStatusActive); err != nil { + return nil, false, fmt.Errorf("update subscription status: %w", err) + } + } + + // 追加备注 + if input.Notes != "" { + newNotes := existingSub.Notes + if newNotes != "" { + newNotes += "\n" + } + newNotes += input.Notes + if err := s.userSubRepo.UpdateNotes(ctx, existingSub.ID, newNotes); err != nil { + log.Printf("update subscription notes failed: sub_id=%d err=%v", existingSub.ID, err) + } + } + + // 失效订阅缓存 + if s.billingCacheService != nil { + userID, groupID := input.UserID, input.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + + // 返回更新后的订阅 + sub, err := s.userSubRepo.GetByID(ctx, existingSub.ID) + return sub, true, err // true 表示是续期 + } + + // 没有订阅,创建新订阅 + sub, err := s.createSubscription(ctx, input) + if err != nil { + return nil, false, err + } + + // 失效订阅缓存 + if s.billingCacheService != nil { + userID, groupID := input.UserID, input.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + + return sub, false, nil // false 表示是新建 +} + +// createSubscription 创建新订阅(内部方法) +func (s *SubscriptionService) createSubscription(ctx context.Context, input *AssignSubscriptionInput) (*UserSubscription, error) { + validityDays := input.ValidityDays + if validityDays <= 0 { + validityDays = 30 + } + if validityDays > MaxValidityDays { + validityDays = MaxValidityDays + } + + now := time.Now() + expiresAt := now.AddDate(0, 0, validityDays) + if expiresAt.After(MaxExpiresAt) { + expiresAt = MaxExpiresAt + } + + sub := &UserSubscription{ + UserID: input.UserID, + GroupID: input.GroupID, + StartsAt: now, + ExpiresAt: expiresAt, + Status: SubscriptionStatusActive, + AssignedAt: now, + Notes: input.Notes, + CreatedAt: now, + UpdatedAt: now, + } + // 只有当 AssignedBy > 0 时才设置(0 表示系统分配,如兑换码) + if input.AssignedBy > 0 { + sub.AssignedBy = &input.AssignedBy + } + + if err := s.userSubRepo.Create(ctx, sub); err != nil { + return nil, err + } + + // 重新获取完整订阅信息(包含关联) + return s.userSubRepo.GetByID(ctx, sub.ID) +} + +// BulkAssignSubscriptionInput 批量分配订阅输入 +type BulkAssignSubscriptionInput struct { + UserIDs []int64 + GroupID int64 + ValidityDays int + AssignedBy int64 + Notes string +} + +// BulkAssignResult 批量分配结果 +type BulkAssignResult struct { + SuccessCount int + FailedCount int + Subscriptions []UserSubscription + Errors []string +} + +// BulkAssignSubscription 批量分配订阅 +func (s *SubscriptionService) BulkAssignSubscription(ctx context.Context, input *BulkAssignSubscriptionInput) (*BulkAssignResult, error) { + result := &BulkAssignResult{ + Subscriptions: make([]UserSubscription, 0), + Errors: make([]string, 0), + } + + for _, userID := range input.UserIDs { + sub, err := s.AssignSubscription(ctx, &AssignSubscriptionInput{ + UserID: userID, + GroupID: input.GroupID, + ValidityDays: input.ValidityDays, + AssignedBy: input.AssignedBy, + Notes: input.Notes, + }) + if err != nil { + result.FailedCount++ + result.Errors = append(result.Errors, fmt.Sprintf("user %d: %v", userID, err)) + } else { + result.SuccessCount++ + result.Subscriptions = append(result.Subscriptions, *sub) + } + } + + return result, nil +} + +// RevokeSubscription 撤销订阅 +func (s *SubscriptionService) RevokeSubscription(ctx context.Context, subscriptionID int64) error { + // 先获取订阅信息用于失效缓存 + sub, err := s.userSubRepo.GetByID(ctx, subscriptionID) + if err != nil { + return err + } + + if err := s.userSubRepo.Delete(ctx, subscriptionID); err != nil { + return err + } + + // 失效订阅缓存 + if s.billingCacheService != nil { + userID, groupID := sub.UserID, sub.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + + return nil +} + +// ExtendSubscription 延长订阅 +func (s *SubscriptionService) ExtendSubscription(ctx context.Context, subscriptionID int64, days int) (*UserSubscription, error) { + sub, err := s.userSubRepo.GetByID(ctx, subscriptionID) + if err != nil { + return nil, ErrSubscriptionNotFound + } + + // 限制延长天数 + if days > MaxValidityDays { + days = MaxValidityDays + } + + // 计算新的过期时间 + newExpiresAt := sub.ExpiresAt.AddDate(0, 0, days) + if newExpiresAt.After(MaxExpiresAt) { + newExpiresAt = MaxExpiresAt + } + + if err := s.userSubRepo.ExtendExpiry(ctx, subscriptionID, newExpiresAt); err != nil { + return nil, err + } + + // 如果订阅已过期,恢复为active状态 + if sub.Status == SubscriptionStatusExpired { + if err := s.userSubRepo.UpdateStatus(ctx, subscriptionID, SubscriptionStatusActive); err != nil { + return nil, err + } + } + + // 失效订阅缓存 + if s.billingCacheService != nil { + userID, groupID := sub.UserID, sub.GroupID + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateSubscription(cacheCtx, userID, groupID) + }() + } + + return s.userSubRepo.GetByID(ctx, subscriptionID) +} + +// GetByID 根据ID获取订阅 +func (s *SubscriptionService) GetByID(ctx context.Context, id int64) (*UserSubscription, error) { + return s.userSubRepo.GetByID(ctx, id) +} + +// GetActiveSubscription 获取用户对特定分组的有效订阅 +func (s *SubscriptionService) GetActiveSubscription(ctx context.Context, userID, groupID int64) (*UserSubscription, error) { + sub, err := s.userSubRepo.GetActiveByUserIDAndGroupID(ctx, userID, groupID) + if err != nil { + return nil, ErrSubscriptionNotFound + } + return sub, nil +} + +// ListUserSubscriptions 获取用户的所有订阅 +func (s *SubscriptionService) ListUserSubscriptions(ctx context.Context, userID int64) ([]UserSubscription, error) { + subs, err := s.userSubRepo.ListByUserID(ctx, userID) + if err != nil { + return nil, err + } + normalizeExpiredWindows(subs) + return subs, nil +} + +// ListActiveUserSubscriptions 获取用户的所有有效订阅 +func (s *SubscriptionService) ListActiveUserSubscriptions(ctx context.Context, userID int64) ([]UserSubscription, error) { + subs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return nil, err + } + normalizeExpiredWindows(subs) + return subs, nil +} + +// ListGroupSubscriptions 获取分组的所有订阅 +func (s *SubscriptionService) ListGroupSubscriptions(ctx context.Context, groupID int64, page, pageSize int) ([]UserSubscription, *pagination.PaginationResult, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + subs, pag, err := s.userSubRepo.ListByGroupID(ctx, groupID, params) + if err != nil { + return nil, nil, err + } + normalizeExpiredWindows(subs) + return subs, pag, nil +} + +// List 获取所有订阅(分页,支持筛选) +func (s *SubscriptionService) List(ctx context.Context, page, pageSize int, userID, groupID *int64, status string) ([]UserSubscription, *pagination.PaginationResult, error) { + params := pagination.PaginationParams{Page: page, PageSize: pageSize} + subs, pag, err := s.userSubRepo.List(ctx, params, userID, groupID, status) + if err != nil { + return nil, nil, err + } + normalizeExpiredWindows(subs) + return subs, pag, nil +} + +// normalizeExpiredWindows 将已过期窗口的数据清零(仅影响返回数据,不影响数据库) +// 这确保前端显示正确的当前窗口状态,而不是过期窗口的历史数据 +func normalizeExpiredWindows(subs []UserSubscription) { + for i := range subs { + sub := &subs[i] + // 日窗口过期:清零展示数据 + if sub.NeedsDailyReset() { + sub.DailyWindowStart = nil + sub.DailyUsageUSD = 0 + } + // 周窗口过期:清零展示数据 + if sub.NeedsWeeklyReset() { + sub.WeeklyWindowStart = nil + sub.WeeklyUsageUSD = 0 + } + // 月窗口过期:清零展示数据 + if sub.NeedsMonthlyReset() { + sub.MonthlyWindowStart = nil + sub.MonthlyUsageUSD = 0 + } + } +} + +// startOfDay 返回给定时间所在日期的零点(保持原时区) +func startOfDay(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) +} + +// CheckAndActivateWindow 检查并激活窗口(首次使用时) +func (s *SubscriptionService) CheckAndActivateWindow(ctx context.Context, sub *UserSubscription) error { + if sub.IsWindowActivated() { + return nil + } + + // 使用当天零点作为窗口起始时间 + windowStart := startOfDay(time.Now()) + return s.userSubRepo.ActivateWindows(ctx, sub.ID, windowStart) +} + +// CheckAndResetWindows 检查并重置过期的窗口 +func (s *SubscriptionService) CheckAndResetWindows(ctx context.Context, sub *UserSubscription) error { + // 使用当天零点作为新窗口起始时间 + windowStart := startOfDay(time.Now()) + needsInvalidateCache := false + + // 日窗口重置(24小时) + if sub.NeedsDailyReset() { + if err := s.userSubRepo.ResetDailyUsage(ctx, sub.ID, windowStart); err != nil { + return err + } + sub.DailyWindowStart = &windowStart + sub.DailyUsageUSD = 0 + needsInvalidateCache = true + } + + // 周窗口重置(7天) + if sub.NeedsWeeklyReset() { + if err := s.userSubRepo.ResetWeeklyUsage(ctx, sub.ID, windowStart); err != nil { + return err + } + sub.WeeklyWindowStart = &windowStart + sub.WeeklyUsageUSD = 0 + needsInvalidateCache = true + } + + // 月窗口重置(30天) + if sub.NeedsMonthlyReset() { + if err := s.userSubRepo.ResetMonthlyUsage(ctx, sub.ID, windowStart); err != nil { + return err + } + sub.MonthlyWindowStart = &windowStart + sub.MonthlyUsageUSD = 0 + needsInvalidateCache = true + } + + // 如果有窗口被重置,失效 Redis 缓存以保持一致性 + if needsInvalidateCache && s.billingCacheService != nil { + _ = s.billingCacheService.InvalidateSubscription(ctx, sub.UserID, sub.GroupID) + } + + return nil +} + +// CheckUsageLimits 检查使用限额(返回错误如果超限) +// 用于中间件的快速预检查,additionalCost 通常为 0 +func (s *SubscriptionService) CheckUsageLimits(ctx context.Context, sub *UserSubscription, group *Group, additionalCost float64) error { + if !sub.CheckDailyLimit(group, additionalCost) { + return ErrDailyLimitExceeded + } + if !sub.CheckWeeklyLimit(group, additionalCost) { + return ErrWeeklyLimitExceeded + } + if !sub.CheckMonthlyLimit(group, additionalCost) { + return ErrMonthlyLimitExceeded + } + return nil +} + +// RecordUsage 记录使用量到订阅 +func (s *SubscriptionService) RecordUsage(ctx context.Context, subscriptionID int64, costUSD float64) error { + return s.userSubRepo.IncrementUsage(ctx, subscriptionID, costUSD) +} + +// SubscriptionProgress 订阅进度 +type SubscriptionProgress struct { + ID int64 `json:"id"` + GroupName string `json:"group_name"` + ExpiresAt time.Time `json:"expires_at"` + ExpiresInDays int `json:"expires_in_days"` + Daily *UsageWindowProgress `json:"daily,omitempty"` + Weekly *UsageWindowProgress `json:"weekly,omitempty"` + Monthly *UsageWindowProgress `json:"monthly,omitempty"` +} + +// UsageWindowProgress 使用窗口进度 +type UsageWindowProgress struct { + LimitUSD float64 `json:"limit_usd"` + UsedUSD float64 `json:"used_usd"` + RemainingUSD float64 `json:"remaining_usd"` + Percentage float64 `json:"percentage"` + WindowStart time.Time `json:"window_start"` + ResetsAt time.Time `json:"resets_at"` + ResetsInSeconds int64 `json:"resets_in_seconds"` +} + +// GetSubscriptionProgress 获取订阅使用进度 +func (s *SubscriptionService) GetSubscriptionProgress(ctx context.Context, subscriptionID int64) (*SubscriptionProgress, error) { + sub, err := s.userSubRepo.GetByID(ctx, subscriptionID) + if err != nil { + return nil, ErrSubscriptionNotFound + } + + group := sub.Group + if group == nil { + group, err = s.groupRepo.GetByID(ctx, sub.GroupID) + if err != nil { + return nil, err + } + } + + progress := &SubscriptionProgress{ + ID: sub.ID, + GroupName: group.Name, + ExpiresAt: sub.ExpiresAt, + ExpiresInDays: sub.DaysRemaining(), + } + + // 日进度 + if group.HasDailyLimit() && sub.DailyWindowStart != nil { + limit := *group.DailyLimitUSD + resetsAt := sub.DailyWindowStart.Add(24 * time.Hour) + progress.Daily = &UsageWindowProgress{ + LimitUSD: limit, + UsedUSD: sub.DailyUsageUSD, + RemainingUSD: limit - sub.DailyUsageUSD, + Percentage: (sub.DailyUsageUSD / limit) * 100, + WindowStart: *sub.DailyWindowStart, + ResetsAt: resetsAt, + ResetsInSeconds: int64(time.Until(resetsAt).Seconds()), + } + if progress.Daily.RemainingUSD < 0 { + progress.Daily.RemainingUSD = 0 + } + if progress.Daily.Percentage > 100 { + progress.Daily.Percentage = 100 + } + if progress.Daily.ResetsInSeconds < 0 { + progress.Daily.ResetsInSeconds = 0 + } + } + + // 周进度 + if group.HasWeeklyLimit() && sub.WeeklyWindowStart != nil { + limit := *group.WeeklyLimitUSD + resetsAt := sub.WeeklyWindowStart.Add(7 * 24 * time.Hour) + progress.Weekly = &UsageWindowProgress{ + LimitUSD: limit, + UsedUSD: sub.WeeklyUsageUSD, + RemainingUSD: limit - sub.WeeklyUsageUSD, + Percentage: (sub.WeeklyUsageUSD / limit) * 100, + WindowStart: *sub.WeeklyWindowStart, + ResetsAt: resetsAt, + ResetsInSeconds: int64(time.Until(resetsAt).Seconds()), + } + if progress.Weekly.RemainingUSD < 0 { + progress.Weekly.RemainingUSD = 0 + } + if progress.Weekly.Percentage > 100 { + progress.Weekly.Percentage = 100 + } + if progress.Weekly.ResetsInSeconds < 0 { + progress.Weekly.ResetsInSeconds = 0 + } + } + + // 月进度 + if group.HasMonthlyLimit() && sub.MonthlyWindowStart != nil { + limit := *group.MonthlyLimitUSD + resetsAt := sub.MonthlyWindowStart.Add(30 * 24 * time.Hour) + progress.Monthly = &UsageWindowProgress{ + LimitUSD: limit, + UsedUSD: sub.MonthlyUsageUSD, + RemainingUSD: limit - sub.MonthlyUsageUSD, + Percentage: (sub.MonthlyUsageUSD / limit) * 100, + WindowStart: *sub.MonthlyWindowStart, + ResetsAt: resetsAt, + ResetsInSeconds: int64(time.Until(resetsAt).Seconds()), + } + if progress.Monthly.RemainingUSD < 0 { + progress.Monthly.RemainingUSD = 0 + } + if progress.Monthly.Percentage > 100 { + progress.Monthly.Percentage = 100 + } + if progress.Monthly.ResetsInSeconds < 0 { + progress.Monthly.ResetsInSeconds = 0 + } + } + + return progress, nil +} + +// GetUserSubscriptionsWithProgress 获取用户所有订阅及进度 +func (s *SubscriptionService) GetUserSubscriptionsWithProgress(ctx context.Context, userID int64) ([]SubscriptionProgress, error) { + subs, err := s.userSubRepo.ListActiveByUserID(ctx, userID) + if err != nil { + return nil, err + } + + progresses := make([]SubscriptionProgress, 0, len(subs)) + for _, sub := range subs { + progress, err := s.GetSubscriptionProgress(ctx, sub.ID) + if err != nil { + continue + } + progresses = append(progresses, *progress) + } + + return progresses, nil +} + +// UpdateExpiredSubscriptions 更新过期订阅状态(定时任务调用) +func (s *SubscriptionService) UpdateExpiredSubscriptions(ctx context.Context) (int64, error) { + return s.userSubRepo.BatchUpdateExpiredStatus(ctx) +} + +// ValidateSubscription 验证订阅是否有效 +func (s *SubscriptionService) ValidateSubscription(ctx context.Context, sub *UserSubscription) error { + if sub.Status == SubscriptionStatusExpired { + return ErrSubscriptionExpired + } + if sub.Status == SubscriptionStatusSuspended { + return ErrSubscriptionSuspended + } + if sub.IsExpired() { + // 更新状态 + _ = s.userSubRepo.UpdateStatus(ctx, sub.ID, SubscriptionStatusExpired) + return ErrSubscriptionExpired + } + return nil +} diff --git a/backend/internal/service/temp_unsched.go b/backend/internal/service/temp_unsched.go new file mode 100644 index 00000000..3871b72b --- /dev/null +++ b/backend/internal/service/temp_unsched.go @@ -0,0 +1,36 @@ +package service + +import ( + "context" + "time" +) + +// TempUnschedState 临时不可调度状态 +type TempUnschedState struct { + UntilUnix int64 `json:"until_unix"` // 解除时间(Unix 时间戳) + TriggeredAtUnix int64 `json:"triggered_at_unix"` // 触发时间(Unix 时间戳) + StatusCode int `json:"status_code"` // 触发的错误码 + MatchedKeyword string `json:"matched_keyword"` // 匹配的关键词 + RuleIndex int `json:"rule_index"` // 触发的规则索引 + ErrorMessage string `json:"error_message"` // 错误消息 +} + +// TempUnschedCache 临时不可调度缓存接口 +type TempUnschedCache interface { + SetTempUnsched(ctx context.Context, accountID int64, state *TempUnschedState) error + GetTempUnsched(ctx context.Context, accountID int64) (*TempUnschedState, error) + DeleteTempUnsched(ctx context.Context, accountID int64) error +} + +// TimeoutCounterCache 超时计数器缓存接口 +type TimeoutCounterCache interface { + // IncrementTimeoutCount 增加账户的超时计数,返回当前计数值 + // windowMinutes 是计数窗口时间(分钟),超过此时间计数器会自动重置 + IncrementTimeoutCount(ctx context.Context, accountID int64, windowMinutes int) (int64, error) + // GetTimeoutCount 获取账户当前的超时计数 + GetTimeoutCount(ctx context.Context, accountID int64) (int64, error) + // ResetTimeoutCount 重置账户的超时计数 + ResetTimeoutCount(ctx context.Context, accountID int64) error + // GetTimeoutCountTTL 获取计数器剩余过期时间 + GetTimeoutCountTTL(ctx context.Context, accountID int64) (time.Duration, error) +} diff --git a/backend/internal/service/timing_wheel_service.go b/backend/internal/service/timing_wheel_service.go new file mode 100644 index 00000000..c4e64e33 --- /dev/null +++ b/backend/internal/service/timing_wheel_service.go @@ -0,0 +1,63 @@ +package service + +import ( + "log" + "sync" + "time" + + "github.com/zeromicro/go-zero/core/collection" +) + +// TimingWheelService wraps go-zero's TimingWheel for task scheduling +type TimingWheelService struct { + tw *collection.TimingWheel + stopOnce sync.Once +} + +// NewTimingWheelService creates a new TimingWheelService instance +func NewTimingWheelService() *TimingWheelService { + // 1 second tick, 3600 slots = supports up to 1 hour delay + // execute function: runs func() type tasks + tw, err := collection.NewTimingWheel(1*time.Second, 3600, func(key, value any) { + if fn, ok := value.(func()); ok { + fn() + } + }) + if err != nil { + panic(err) + } + return &TimingWheelService{tw: tw} +} + +// Start starts the timing wheel +func (s *TimingWheelService) Start() { + log.Println("[TimingWheel] Started (auto-start by go-zero)") +} + +// Stop stops the timing wheel +func (s *TimingWheelService) Stop() { + s.stopOnce.Do(func() { + s.tw.Stop() + log.Println("[TimingWheel] Stopped") + }) +} + +// Schedule schedules a one-time task +func (s *TimingWheelService) Schedule(name string, delay time.Duration, fn func()) { + _ = s.tw.SetTimer(name, fn, delay) +} + +// ScheduleRecurring schedules a recurring task +func (s *TimingWheelService) ScheduleRecurring(name string, interval time.Duration, fn func()) { + var schedule func() + schedule = func() { + fn() + _ = s.tw.SetTimer(name, schedule, interval) + } + _ = s.tw.SetTimer(name, schedule, interval) +} + +// Cancel cancels a scheduled task +func (s *TimingWheelService) Cancel(name string) { + _ = s.tw.RemoveTimer(name) +} diff --git a/backend/internal/service/token_cache_invalidator.go b/backend/internal/service/token_cache_invalidator.go new file mode 100644 index 00000000..aacdf266 --- /dev/null +++ b/backend/internal/service/token_cache_invalidator.go @@ -0,0 +1,35 @@ +package service + +import "context" + +type TokenCacheInvalidator interface { + InvalidateToken(ctx context.Context, account *Account) error +} + +type CompositeTokenCacheInvalidator struct { + geminiCache GeminiTokenCache +} + +func NewCompositeTokenCacheInvalidator(geminiCache GeminiTokenCache) *CompositeTokenCacheInvalidator { + return &CompositeTokenCacheInvalidator{ + geminiCache: geminiCache, + } +} + +func (c *CompositeTokenCacheInvalidator) InvalidateToken(ctx context.Context, account *Account) error { + if c == nil || c.geminiCache == nil || account == nil { + return nil + } + if account.Type != AccountTypeOAuth { + return nil + } + + switch account.Platform { + case PlatformGemini: + return c.geminiCache.DeleteAccessToken(ctx, GeminiTokenCacheKey(account)) + case PlatformAntigravity: + return c.geminiCache.DeleteAccessToken(ctx, AntigravityTokenCacheKey(account)) + default: + return nil + } +} diff --git a/backend/internal/service/token_cache_invalidator_test.go b/backend/internal/service/token_cache_invalidator_test.go new file mode 100644 index 00000000..0090ed24 --- /dev/null +++ b/backend/internal/service/token_cache_invalidator_test.go @@ -0,0 +1,97 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type geminiTokenCacheStub struct { + deletedKeys []string + deleteErr error +} + +func (s *geminiTokenCacheStub) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { + return "", nil +} + +func (s *geminiTokenCacheStub) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { + return nil +} + +func (s *geminiTokenCacheStub) DeleteAccessToken(ctx context.Context, cacheKey string) error { + s.deletedKeys = append(s.deletedKeys, cacheKey) + return s.deleteErr +} + +func (s *geminiTokenCacheStub) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { + return true, nil +} + +func (s *geminiTokenCacheStub) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { + return nil +} + +func TestCompositeTokenCacheInvalidator_Gemini(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 10, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "project_id": "project-x", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"project-x"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_Antigravity(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 99, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "project_id": "ag-project", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"ag:ag-project"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_SkipNonOAuth(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 1, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Empty(t, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_NilCache(t *testing.T) { + invalidator := NewCompositeTokenCacheInvalidator(nil) + account := &Account{ + ID: 2, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) +} diff --git a/backend/internal/service/token_cache_key_test.go b/backend/internal/service/token_cache_key_test.go new file mode 100644 index 00000000..0dc751c6 --- /dev/null +++ b/backend/internal/service/token_cache_key_test.go @@ -0,0 +1,153 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGeminiTokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "with_project_id", + account: &Account{ + ID: 100, + Credentials: map[string]any{ + "project_id": "my-project-123", + }, + }, + expected: "my-project-123", + }, + { + name: "project_id_with_whitespace", + account: &Account{ + ID: 101, + Credentials: map[string]any{ + "project_id": " project-with-spaces ", + }, + }, + expected: "project-with-spaces", + }, + { + name: "empty_project_id_fallback_to_account_id", + account: &Account{ + ID: 102, + Credentials: map[string]any{ + "project_id": "", + }, + }, + expected: "account:102", + }, + { + name: "whitespace_only_project_id_fallback_to_account_id", + account: &Account{ + ID: 103, + Credentials: map[string]any{ + "project_id": " ", + }, + }, + expected: "account:103", + }, + { + name: "no_project_id_key_fallback_to_account_id", + account: &Account{ + ID: 104, + Credentials: map[string]any{}, + }, + expected: "account:104", + }, + { + name: "nil_credentials_fallback_to_account_id", + account: &Account{ + ID: 105, + Credentials: nil, + }, + expected: "account:105", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GeminiTokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestAntigravityTokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "with_project_id", + account: &Account{ + ID: 200, + Credentials: map[string]any{ + "project_id": "ag-project-456", + }, + }, + expected: "ag:ag-project-456", + }, + { + name: "project_id_with_whitespace", + account: &Account{ + ID: 201, + Credentials: map[string]any{ + "project_id": " ag-project-spaces ", + }, + }, + expected: "ag:ag-project-spaces", + }, + { + name: "empty_project_id_fallback_to_account_id", + account: &Account{ + ID: 202, + Credentials: map[string]any{ + "project_id": "", + }, + }, + expected: "ag:account:202", + }, + { + name: "whitespace_only_project_id_fallback_to_account_id", + account: &Account{ + ID: 203, + Credentials: map[string]any{ + "project_id": " ", + }, + }, + expected: "ag:account:203", + }, + { + name: "no_project_id_key_fallback_to_account_id", + account: &Account{ + ID: 204, + Credentials: map[string]any{}, + }, + expected: "ag:account:204", + }, + { + name: "nil_credentials_fallback_to_account_id", + account: &Account{ + ID: 205, + Credentials: nil, + }, + expected: "ag:account:205", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := AntigravityTokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go new file mode 100644 index 00000000..4d513d07 --- /dev/null +++ b/backend/internal/service/token_refresh_service.go @@ -0,0 +1,240 @@ +package service + +import ( + "context" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// TokenRefreshService OAuth token自动刷新服务 +// 定期检查并刷新即将过期的token +type TokenRefreshService struct { + accountRepo AccountRepository + refreshers []TokenRefresher + cfg *config.TokenRefreshConfig + cacheInvalidator TokenCacheInvalidator + + stopCh chan struct{} + wg sync.WaitGroup +} + +// NewTokenRefreshService 创建token刷新服务 +func NewTokenRefreshService( + accountRepo AccountRepository, + oauthService *OAuthService, + openaiOAuthService *OpenAIOAuthService, + geminiOAuthService *GeminiOAuthService, + antigravityOAuthService *AntigravityOAuthService, + cacheInvalidator TokenCacheInvalidator, + cfg *config.Config, +) *TokenRefreshService { + s := &TokenRefreshService{ + accountRepo: accountRepo, + cfg: &cfg.TokenRefresh, + cacheInvalidator: cacheInvalidator, + stopCh: make(chan struct{}), + } + + // 注册平台特定的刷新器 + s.refreshers = []TokenRefresher{ + NewClaudeTokenRefresher(oauthService), + NewOpenAITokenRefresher(openaiOAuthService), + NewGeminiTokenRefresher(geminiOAuthService), + NewAntigravityTokenRefresher(antigravityOAuthService), + } + + return s +} + +// Start 启动后台刷新服务 +func (s *TokenRefreshService) Start() { + if !s.cfg.Enabled { + log.Println("[TokenRefresh] Service disabled by configuration") + return + } + + s.wg.Add(1) + go s.refreshLoop() + + log.Printf("[TokenRefresh] Service started (check every %d minutes, refresh %v hours before expiry)", + s.cfg.CheckIntervalMinutes, s.cfg.RefreshBeforeExpiryHours) +} + +// Stop 停止刷新服务 +func (s *TokenRefreshService) Stop() { + close(s.stopCh) + s.wg.Wait() + log.Println("[TokenRefresh] Service stopped") +} + +// refreshLoop 刷新循环 +func (s *TokenRefreshService) refreshLoop() { + defer s.wg.Done() + + // 计算检查间隔 + checkInterval := time.Duration(s.cfg.CheckIntervalMinutes) * time.Minute + if checkInterval < time.Minute { + checkInterval = 5 * time.Minute + } + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + + // 启动时立即执行一次检查 + s.processRefresh() + + for { + select { + case <-ticker.C: + s.processRefresh() + case <-s.stopCh: + return + } + } +} + +// processRefresh 执行一次刷新检查 +func (s *TokenRefreshService) processRefresh() { + ctx := context.Background() + + // 计算刷新窗口 + refreshWindow := time.Duration(s.cfg.RefreshBeforeExpiryHours * float64(time.Hour)) + + // 获取所有active状态的账号 + accounts, err := s.listActiveAccounts(ctx) + if err != nil { + log.Printf("[TokenRefresh] Failed to list accounts: %v", err) + return + } + + totalAccounts := len(accounts) + oauthAccounts := 0 // 可刷新的OAuth账号数 + needsRefresh := 0 // 需要刷新的账号数 + refreshed, failed := 0, 0 + + for i := range accounts { + account := &accounts[i] + + // 遍历所有刷新器,找到能处理此账号的 + for _, refresher := range s.refreshers { + if !refresher.CanRefresh(account) { + continue + } + + oauthAccounts++ + + // 检查是否需要刷新 + if !refresher.NeedsRefresh(account, refreshWindow) { + break // 不需要刷新,跳过 + } + + needsRefresh++ + + // 执行刷新 + if err := s.refreshWithRetry(ctx, account, refresher); err != nil { + log.Printf("[TokenRefresh] Account %d (%s) failed: %v", account.ID, account.Name, err) + failed++ + } else { + log.Printf("[TokenRefresh] Account %d (%s) refreshed successfully", account.ID, account.Name) + refreshed++ + } + + // 每个账号只由一个refresher处理 + break + } + } + + // 始终打印周期日志,便于跟踪服务运行状态 + log.Printf("[TokenRefresh] Cycle complete: total=%d, oauth=%d, needs_refresh=%d, refreshed=%d, failed=%d", + totalAccounts, oauthAccounts, needsRefresh, refreshed, failed) +} + +// listActiveAccounts 获取所有active状态的账号 +// 使用ListActive确保刷新所有活跃账号的token(包括临时禁用的) +func (s *TokenRefreshService) listActiveAccounts(ctx context.Context) ([]Account, error) { + return s.accountRepo.ListActive(ctx) +} + +// refreshWithRetry 带重试的刷新 +func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Account, refresher TokenRefresher) error { + var lastErr error + + for attempt := 1; attempt <= s.cfg.MaxRetries; attempt++ { + newCredentials, err := refresher.Refresh(ctx, account) + if err == nil { + // 刷新成功,更新账号credentials + account.Credentials = newCredentials + if err := s.accountRepo.Update(ctx, account); err != nil { + return fmt.Errorf("failed to save credentials: %w", err) + } + if s.cacheInvalidator != nil && account.Type == AccountTypeOAuth && + (account.Platform == PlatformGemini || account.Platform == PlatformAntigravity) { + if err := s.cacheInvalidator.InvalidateToken(ctx, account); err != nil { + log.Printf("[TokenRefresh] Failed to invalidate token cache for account %d: %v", account.ID, err) + } else { + log.Printf("[TokenRefresh] Token cache invalidated for account %d", account.ID) + } + } + return nil + } + + // Antigravity 账户:不可重试错误直接标记 error 状态并返回 + if account.Platform == PlatformAntigravity && isNonRetryableRefreshError(err) { + errorMsg := fmt.Sprintf("Token refresh failed (non-retryable): %v", err) + if setErr := s.accountRepo.SetError(ctx, account.ID, errorMsg); setErr != nil { + log.Printf("[TokenRefresh] Failed to set error status for account %d: %v", account.ID, setErr) + } + return err + } + + lastErr = err + log.Printf("[TokenRefresh] Account %d attempt %d/%d failed: %v", + account.ID, attempt, s.cfg.MaxRetries, err) + + // 如果还有重试机会,等待后重试 + if attempt < s.cfg.MaxRetries { + // 指数退避:2^(attempt-1) * baseSeconds + backoff := time.Duration(s.cfg.RetryBackoffSeconds) * time.Second * time.Duration(1<<(attempt-1)) + time.Sleep(backoff) + } + } + + // Antigravity 账户:其他错误仅记录日志,不标记 error(可能是临时网络问题) + // 其他平台账户:重试失败后标记 error + if account.Platform == PlatformAntigravity { + log.Printf("[TokenRefresh] Account %d: refresh failed after %d retries: %v", account.ID, s.cfg.MaxRetries, lastErr) + } else { + errorMsg := fmt.Sprintf("Token refresh failed after %d retries: %v", s.cfg.MaxRetries, lastErr) + if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil { + log.Printf("[TokenRefresh] Failed to set error status for account %d: %v", account.ID, err) + } + } + + return lastErr +} + +// isNonRetryableRefreshError 判断是否为不可重试的刷新错误 +// 这些错误通常表示凭证已失效,需要用户重新授权 +func isNonRetryableRefreshError(err error) bool { + if err == nil { + return false + } + msg := strings.ToLower(err.Error()) + nonRetryable := []string{ + "invalid_grant", // refresh_token 已失效 + "invalid_client", // 客户端配置错误 + "unauthorized_client", // 客户端未授权 + "access_denied", // 访问被拒绝 + } + for _, needle := range nonRetryable { + if strings.Contains(msg, needle) { + return true + } + } + return false +} diff --git a/backend/internal/service/token_refresh_service_test.go b/backend/internal/service/token_refresh_service_test.go new file mode 100644 index 00000000..b11a0adc --- /dev/null +++ b/backend/internal/service/token_refresh_service_test.go @@ -0,0 +1,361 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type tokenRefreshAccountRepo struct { + mockAccountRepoForGemini + updateCalls int + setErrorCalls int + lastAccount *Account + updateErr error +} + +func (r *tokenRefreshAccountRepo) Update(ctx context.Context, account *Account) error { + r.updateCalls++ + r.lastAccount = account + return r.updateErr +} + +func (r *tokenRefreshAccountRepo) SetError(ctx context.Context, id int64, errorMsg string) error { + r.setErrorCalls++ + return nil +} + +type tokenCacheInvalidatorStub struct { + calls int + err error +} + +func (s *tokenCacheInvalidatorStub) InvalidateToken(ctx context.Context, account *Account) error { + s.calls++ + return s.err +} + +type tokenRefresherStub struct { + credentials map[string]any + err error +} + +func (r *tokenRefresherStub) CanRefresh(account *Account) bool { + return true +} + +func (r *tokenRefresherStub) NeedsRefresh(account *Account, refreshWindowDuration time.Duration) bool { + return true +} + +func (r *tokenRefresherStub) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + if r.err != nil { + return nil, r.err + } + return r.credentials, nil +} + +func TestTokenRefreshService_RefreshWithRetry_InvalidatesCache(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 5, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "new-token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) + require.Equal(t, "new-token", account.GetCredential("access_token")) +} + +func TestTokenRefreshService_RefreshWithRetry_InvalidatorErrorIgnored(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{err: errors.New("invalidate failed")} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 6, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) +} + +func TestTokenRefreshService_RefreshWithRetry_NilInvalidator(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, cfg) + account := &Account{ + ID: 7, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) +} + +// TestTokenRefreshService_RefreshWithRetry_Antigravity 测试 Antigravity 平台的缓存失效 +func TestTokenRefreshService_RefreshWithRetry_Antigravity(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 8, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "ag-token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) // Antigravity 也应触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount 测试非 OAuth 账号不触发缓存失效 +func TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 9, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, // 非 OAuth + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) // 非 OAuth 不触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth 测试其他平台的 OAuth 账号不触发缓存失效 +func TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 10, + Platform: PlatformOpenAI, // 其他平台 + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) // 其他平台不触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_UpdateFailed 测试更新失败的情况 +func TestTokenRefreshService_RefreshWithRetry_UpdateFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{updateErr: errors.New("update failed")} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 11, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to save credentials") + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) // 更新失败时不应触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_RefreshFailed 测试刷新失败的情况 +func TestTokenRefreshService_RefreshWithRetry_RefreshFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 2, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 12, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("refresh failed"), + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) // 刷新失败不应更新 + require.Equal(t, 0, invalidator.calls) // 刷新失败不应触发缓存失效 + require.Equal(t, 1, repo.setErrorCalls) // 应设置错误状态 +} + +// TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed 测试 Antigravity 刷新失败不设置错误状态 +func TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 13, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("network error"), // 可重试错误 + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) + require.Equal(t, 0, repo.setErrorCalls) // Antigravity 可重试错误不设置错误状态 +} + +// TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError 测试 Antigravity 不可重试错误 +func TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 3, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 14, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("invalid_grant: token revoked"), // 不可重试错误 + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) + require.Equal(t, 1, repo.setErrorCalls) // 不可重试错误应设置错误状态 +} + +// TestIsNonRetryableRefreshError 测试不可重试错误判断 +func TestIsNonRetryableRefreshError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + {name: "nil_error", err: nil, expected: false}, + {name: "network_error", err: errors.New("network timeout"), expected: false}, + {name: "invalid_grant", err: errors.New("invalid_grant"), expected: true}, + {name: "invalid_client", err: errors.New("invalid_client"), expected: true}, + {name: "unauthorized_client", err: errors.New("unauthorized_client"), expected: true}, + {name: "access_denied", err: errors.New("access_denied"), expected: true}, + {name: "invalid_grant_with_desc", err: errors.New("Error: invalid_grant - token revoked"), expected: true}, + {name: "case_insensitive", err: errors.New("INVALID_GRANT"), expected: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isNonRetryableRefreshError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/backend/internal/service/token_refresher.go b/backend/internal/service/token_refresher.go new file mode 100644 index 00000000..214a290a --- /dev/null +++ b/backend/internal/service/token_refresher.go @@ -0,0 +1,132 @@ +package service + +import ( + "context" + "strconv" + "time" +) + +// TokenRefresher 定义平台特定的token刷新策略接口 +// 通过此接口可以扩展支持不同平台(Anthropic/OpenAI/Gemini) +type TokenRefresher interface { + // CanRefresh 检查此刷新器是否能处理指定账号 + CanRefresh(account *Account) bool + + // NeedsRefresh 检查账号的token是否需要刷新 + NeedsRefresh(account *Account, refreshWindow time.Duration) bool + + // Refresh 执行token刷新,返回更新后的credentials + // 注意:返回的map应该保留原有credentials中的所有字段,只更新token相关字段 + Refresh(ctx context.Context, account *Account) (map[string]any, error) +} + +// ClaudeTokenRefresher 处理Anthropic/Claude OAuth token刷新 +type ClaudeTokenRefresher struct { + oauthService *OAuthService +} + +// NewClaudeTokenRefresher 创建Claude token刷新器 +func NewClaudeTokenRefresher(oauthService *OAuthService) *ClaudeTokenRefresher { + return &ClaudeTokenRefresher{ + oauthService: oauthService, + } +} + +// CanRefresh 检查是否能处理此账号 +// 只处理 anthropic 平台的 oauth 类型账号 +// setup-token 虽然也是OAuth,但有效期1年,不需要频繁刷新 +func (r *ClaudeTokenRefresher) CanRefresh(account *Account) bool { + return account.Platform == PlatformAnthropic && + account.Type == AccountTypeOAuth +} + +// NeedsRefresh 检查token是否需要刷新 +// 基于 expires_at 字段判断是否在刷新窗口内 +func (r *ClaudeTokenRefresher) NeedsRefresh(account *Account, refreshWindow time.Duration) bool { + expiresAt := account.GetCredentialAsTime("expires_at") + if expiresAt == nil { + return false + } + return time.Until(*expiresAt) < refreshWindow +} + +// Refresh 执行token刷新 +// 保留原有credentials中的所有字段,只更新token相关字段 +func (r *ClaudeTokenRefresher) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + tokenInfo, err := r.oauthService.RefreshAccountToken(ctx, account) + if err != nil { + return nil, err + } + + // 保留现有credentials中的所有字段 + newCredentials := make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + + // 只更新token相关字段 + // 注意:expires_at 和 expires_in 必须存为字符串,因为 GetCredential 只返回 string 类型 + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = strconv.FormatInt(tokenInfo.ExpiresIn, 10) + newCredentials["expires_at"] = strconv.FormatInt(tokenInfo.ExpiresAt, 10) + if tokenInfo.RefreshToken != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.Scope != "" { + newCredentials["scope"] = tokenInfo.Scope + } + + return newCredentials, nil +} + +// OpenAITokenRefresher 处理 OpenAI OAuth token刷新 +type OpenAITokenRefresher struct { + openaiOAuthService *OpenAIOAuthService +} + +// NewOpenAITokenRefresher 创建 OpenAI token刷新器 +func NewOpenAITokenRefresher(openaiOAuthService *OpenAIOAuthService) *OpenAITokenRefresher { + return &OpenAITokenRefresher{ + openaiOAuthService: openaiOAuthService, + } +} + +// CanRefresh 检查是否能处理此账号 +// 只处理 openai 平台的 oauth 类型账号 +func (r *OpenAITokenRefresher) CanRefresh(account *Account) bool { + return account.Platform == PlatformOpenAI && + account.Type == AccountTypeOAuth +} + +// NeedsRefresh 检查token是否需要刷新 +// 基于 expires_at 字段判断是否在刷新窗口内 +func (r *OpenAITokenRefresher) NeedsRefresh(account *Account, refreshWindow time.Duration) bool { + expiresAt := account.GetOpenAITokenExpiresAt() + if expiresAt == nil { + return false + } + + return time.Until(*expiresAt) < refreshWindow +} + +// Refresh 执行token刷新 +// 保留原有credentials中的所有字段,只更新token相关字段 +func (r *OpenAITokenRefresher) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + tokenInfo, err := r.openaiOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + return nil, err + } + + // 使用服务提供的方法构建新凭证,并保留原有字段 + newCredentials := r.openaiOAuthService.BuildAccountCredentials(tokenInfo) + + // 保留原有credentials中非token相关字段 + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + + return newCredentials, nil +} diff --git a/backend/internal/service/token_refresher_test.go b/backend/internal/service/token_refresher_test.go new file mode 100644 index 00000000..c7505037 --- /dev/null +++ b/backend/internal/service/token_refresher_test.go @@ -0,0 +1,228 @@ +//go:build unit + +package service + +import ( + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestClaudeTokenRefresher_NeedsRefresh(t *testing.T) { + refresher := &ClaudeTokenRefresher{} + refreshWindow := 30 * time.Minute + + tests := []struct { + name string + credentials map[string]any + wantRefresh bool + }{ + { + name: "expires_at as string - expired", + credentials: map[string]any{ + "expires_at": "1000", // 1970-01-01 00:16:40 UTC, 已过期 + }, + wantRefresh: true, + }, + { + name: "expires_at as float64 - expired", + credentials: map[string]any{ + "expires_at": float64(1000), // 数字类型,已过期 + }, + wantRefresh: true, + }, + { + name: "expires_at as RFC3339 - expired", + credentials: map[string]any{ + "expires_at": "1970-01-01T00:00:00Z", // RFC3339 格式,已过期 + }, + wantRefresh: true, + }, + { + name: "expires_at as string - far future", + credentials: map[string]any{ + "expires_at": "9999999999", // 远未来 + }, + wantRefresh: false, + }, + { + name: "expires_at as float64 - far future", + credentials: map[string]any{ + "expires_at": float64(9999999999), // 远未来,数字类型 + }, + wantRefresh: false, + }, + { + name: "expires_at as RFC3339 - far future", + credentials: map[string]any{ + "expires_at": "2099-12-31T23:59:59Z", // RFC3339 格式,远未来 + }, + wantRefresh: false, + }, + { + name: "expires_at missing", + credentials: map[string]any{}, + wantRefresh: false, + }, + { + name: "expires_at is nil", + credentials: map[string]any{ + "expires_at": nil, + }, + wantRefresh: false, + }, + { + name: "expires_at is invalid string", + credentials: map[string]any{ + "expires_at": "invalid", + }, + wantRefresh: false, + }, + { + name: "credentials is nil", + credentials: nil, + wantRefresh: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{ + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: tt.credentials, + } + + got := refresher.NeedsRefresh(account, refreshWindow) + require.Equal(t, tt.wantRefresh, got) + }) + } +} + +func TestClaudeTokenRefresher_NeedsRefresh_WithinWindow(t *testing.T) { + refresher := &ClaudeTokenRefresher{} + refreshWindow := 30 * time.Minute + + // 设置一个在刷新窗口内的时间(当前时间 + 15分钟) + expiresAt := time.Now().Add(15 * time.Minute).Unix() + + tests := []struct { + name string + credentials map[string]any + }{ + { + name: "string type - within refresh window", + credentials: map[string]any{ + "expires_at": strconv.FormatInt(expiresAt, 10), + }, + }, + { + name: "float64 type - within refresh window", + credentials: map[string]any{ + "expires_at": float64(expiresAt), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{ + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: tt.credentials, + } + + got := refresher.NeedsRefresh(account, refreshWindow) + require.True(t, got, "should need refresh when within window") + }) + } +} + +func TestClaudeTokenRefresher_NeedsRefresh_OutsideWindow(t *testing.T) { + refresher := &ClaudeTokenRefresher{} + refreshWindow := 30 * time.Minute + + // 设置一个在刷新窗口外的时间(当前时间 + 1小时) + expiresAt := time.Now().Add(1 * time.Hour).Unix() + + tests := []struct { + name string + credentials map[string]any + }{ + { + name: "string type - outside refresh window", + credentials: map[string]any{ + "expires_at": strconv.FormatInt(expiresAt, 10), + }, + }, + { + name: "float64 type - outside refresh window", + credentials: map[string]any{ + "expires_at": float64(expiresAt), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{ + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: tt.credentials, + } + + got := refresher.NeedsRefresh(account, refreshWindow) + require.False(t, got, "should not need refresh when outside window") + }) + } +} + +func TestClaudeTokenRefresher_CanRefresh(t *testing.T) { + refresher := &ClaudeTokenRefresher{} + + tests := []struct { + name string + platform string + accType string + want bool + }{ + { + name: "anthropic oauth - can refresh", + platform: PlatformAnthropic, + accType: AccountTypeOAuth, + want: true, + }, + { + name: "anthropic api-key - cannot refresh", + platform: PlatformAnthropic, + accType: AccountTypeAPIKey, + want: false, + }, + { + name: "openai oauth - cannot refresh", + platform: PlatformOpenAI, + accType: AccountTypeOAuth, + want: false, + }, + { + name: "gemini oauth - cannot refresh", + platform: PlatformGemini, + accType: AccountTypeOAuth, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + account := &Account{ + Platform: tt.platform, + Type: tt.accType, + } + + got := refresher.CanRefresh(account) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/backend/internal/service/turnstile_service.go b/backend/internal/service/turnstile_service.go new file mode 100644 index 00000000..4afcc335 --- /dev/null +++ b/backend/internal/service/turnstile_service.go @@ -0,0 +1,105 @@ +package service + +import ( + "context" + "fmt" + "log" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ( + ErrTurnstileVerificationFailed = infraerrors.BadRequest("TURNSTILE_VERIFICATION_FAILED", "turnstile verification failed") + ErrTurnstileNotConfigured = infraerrors.ServiceUnavailable("TURNSTILE_NOT_CONFIGURED", "turnstile not configured") + ErrTurnstileInvalidSecretKey = infraerrors.BadRequest("TURNSTILE_INVALID_SECRET_KEY", "invalid turnstile secret key") +) + +// TurnstileVerifier 验证 Turnstile token 的接口 +type TurnstileVerifier interface { + VerifyToken(ctx context.Context, secretKey, token, remoteIP string) (*TurnstileVerifyResponse, error) +} + +// TurnstileService Turnstile 验证服务 +type TurnstileService struct { + settingService *SettingService + verifier TurnstileVerifier +} + +// TurnstileVerifyResponse Cloudflare Turnstile 验证响应 +type TurnstileVerifyResponse struct { + Success bool `json:"success"` + ChallengeTS string `json:"challenge_ts"` + Hostname string `json:"hostname"` + ErrorCodes []string `json:"error-codes"` + Action string `json:"action"` + CData string `json:"cdata"` +} + +// NewTurnstileService 创建 Turnstile 服务实例 +func NewTurnstileService(settingService *SettingService, verifier TurnstileVerifier) *TurnstileService { + return &TurnstileService{ + settingService: settingService, + verifier: verifier, + } +} + +// VerifyToken 验证 Turnstile token +func (s *TurnstileService) VerifyToken(ctx context.Context, token string, remoteIP string) error { + // 检查是否启用 Turnstile + if !s.settingService.IsTurnstileEnabled(ctx) { + log.Println("[Turnstile] Disabled, skipping verification") + return nil + } + + // 获取 Secret Key + secretKey := s.settingService.GetTurnstileSecretKey(ctx) + if secretKey == "" { + log.Println("[Turnstile] Secret key not configured") + return ErrTurnstileNotConfigured + } + + // 如果 token 为空,返回错误 + if token == "" { + log.Println("[Turnstile] Token is empty") + return ErrTurnstileVerificationFailed + } + + log.Printf("[Turnstile] Verifying token for IP: %s", remoteIP) + result, err := s.verifier.VerifyToken(ctx, secretKey, token, remoteIP) + if err != nil { + log.Printf("[Turnstile] Request failed: %v", err) + return fmt.Errorf("send request: %w", err) + } + + if !result.Success { + log.Printf("[Turnstile] Verification failed, error codes: %v", result.ErrorCodes) + return ErrTurnstileVerificationFailed + } + + log.Println("[Turnstile] Verification successful") + return nil +} + +// IsEnabled 检查 Turnstile 是否启用 +func (s *TurnstileService) IsEnabled(ctx context.Context) bool { + return s.settingService.IsTurnstileEnabled(ctx) +} + +// ValidateSecretKey 验证 Turnstile Secret Key 是否有效 +func (s *TurnstileService) ValidateSecretKey(ctx context.Context, secretKey string) error { + // 发送一个测试token的验证请求来检查secret_key是否有效 + result, err := s.verifier.VerifyToken(ctx, secretKey, "test-validation", "") + if err != nil { + return fmt.Errorf("validate secret key: %w", err) + } + + // 检查是否有 invalid-input-secret 错误 + for _, code := range result.ErrorCodes { + if code == "invalid-input-secret" { + return ErrTurnstileInvalidSecretKey + } + } + + // 其他错误(如 invalid-input-response)说明 secret key 是有效的 + return nil +} diff --git a/backend/internal/service/update_service.go b/backend/internal/service/update_service.go new file mode 100644 index 00000000..34ad4610 --- /dev/null +++ b/backend/internal/service/update_service.go @@ -0,0 +1,540 @@ +package service + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" +) + +const ( + updateCacheKey = "update_check_cache" + updateCacheTTL = 1200 // 20 minutes + githubRepo = "Wei-Shaw/sub2api" + + // Security: allowed download domains for updates + allowedDownloadHost = "github.com" + allowedAssetHost = "objects.githubusercontent.com" + + // Security: max download size (500MB) + maxDownloadSize = 500 * 1024 * 1024 +) + +// UpdateCache defines cache operations for update service +type UpdateCache interface { + GetUpdateInfo(ctx context.Context) (string, error) + SetUpdateInfo(ctx context.Context, data string, ttl time.Duration) error +} + +// GitHubReleaseClient 获取 GitHub release 信息的接口 +type GitHubReleaseClient interface { + FetchLatestRelease(ctx context.Context, repo string) (*GitHubRelease, error) + DownloadFile(ctx context.Context, url, dest string, maxSize int64) error + FetchChecksumFile(ctx context.Context, url string) ([]byte, error) +} + +// UpdateService handles software updates +type UpdateService struct { + cache UpdateCache + githubClient GitHubReleaseClient + currentVersion string + buildType string // "source" for manual builds, "release" for CI builds +} + +// NewUpdateService creates a new UpdateService +func NewUpdateService(cache UpdateCache, githubClient GitHubReleaseClient, version, buildType string) *UpdateService { + return &UpdateService{ + cache: cache, + githubClient: githubClient, + currentVersion: version, + buildType: buildType, + } +} + +// UpdateInfo contains update information +type UpdateInfo struct { + CurrentVersion string `json:"current_version"` + LatestVersion string `json:"latest_version"` + HasUpdate bool `json:"has_update"` + ReleaseInfo *ReleaseInfo `json:"release_info,omitempty"` + Cached bool `json:"cached"` + Warning string `json:"warning,omitempty"` + BuildType string `json:"build_type"` // "source" or "release" +} + +// ReleaseInfo contains GitHub release details +type ReleaseInfo struct { + Name string `json:"name"` + Body string `json:"body"` + PublishedAt string `json:"published_at"` + HTMLURL string `json:"html_url"` + Assets []Asset `json:"assets,omitempty"` +} + +// Asset represents a release asset +type Asset struct { + Name string `json:"name"` + DownloadURL string `json:"download_url"` + Size int64 `json:"size"` +} + +// GitHubRelease represents GitHub API response +type GitHubRelease struct { + TagName string `json:"tag_name"` + Name string `json:"name"` + Body string `json:"body"` + PublishedAt string `json:"published_at"` + HTMLURL string `json:"html_url"` + Assets []GitHubAsset `json:"assets"` +} + +type GitHubAsset struct { + Name string `json:"name"` + BrowserDownloadURL string `json:"browser_download_url"` + Size int64 `json:"size"` +} + +// CheckUpdate checks for available updates +func (s *UpdateService) CheckUpdate(ctx context.Context, force bool) (*UpdateInfo, error) { + // Try cache first + if !force { + if cached, err := s.getFromCache(ctx); err == nil && cached != nil { + return cached, nil + } + } + + // Fetch from GitHub + info, err := s.fetchLatestRelease(ctx) + if err != nil { + // Return cached on error + if cached, cacheErr := s.getFromCache(ctx); cacheErr == nil && cached != nil { + cached.Warning = "Using cached data: " + err.Error() + return cached, nil + } + return &UpdateInfo{ + CurrentVersion: s.currentVersion, + LatestVersion: s.currentVersion, + HasUpdate: false, + Warning: err.Error(), + BuildType: s.buildType, + }, nil + } + + // Cache result + s.saveToCache(ctx, info) + return info, nil +} + +// PerformUpdate downloads and applies the update +// Uses atomic file replacement pattern for safe in-place updates +func (s *UpdateService) PerformUpdate(ctx context.Context) error { + info, err := s.CheckUpdate(ctx, true) + if err != nil { + return err + } + + if !info.HasUpdate { + return fmt.Errorf("no update available") + } + + // Find matching archive and checksum for current platform + archiveName := s.getArchiveName() + var downloadURL string + var checksumURL string + + for _, asset := range info.ReleaseInfo.Assets { + if strings.Contains(asset.Name, archiveName) && !strings.HasSuffix(asset.Name, ".txt") { + downloadURL = asset.DownloadURL + } + if asset.Name == "checksums.txt" { + checksumURL = asset.DownloadURL + } + } + + if downloadURL == "" { + return fmt.Errorf("no compatible release found for %s/%s", runtime.GOOS, runtime.GOARCH) + } + + // SECURITY: Validate download URL is from trusted domain + if err := validateDownloadURL(downloadURL); err != nil { + return fmt.Errorf("invalid download URL: %w", err) + } + if checksumURL != "" { + if err := validateDownloadURL(checksumURL); err != nil { + return fmt.Errorf("invalid checksum URL: %w", err) + } + } + + // Get current executable path + exePath, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to get executable path: %w", err) + } + exePath, err = filepath.EvalSymlinks(exePath) + if err != nil { + return fmt.Errorf("failed to resolve symlinks: %w", err) + } + + exeDir := filepath.Dir(exePath) + + // Create temp directory in the SAME directory as executable + // This ensures os.Rename is atomic (same filesystem) + tempDir, err := os.MkdirTemp(exeDir, ".sub2api-update-*") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + defer func() { _ = os.RemoveAll(tempDir) }() + + // Download archive + archivePath := filepath.Join(tempDir, filepath.Base(downloadURL)) + if err := s.downloadFile(ctx, downloadURL, archivePath); err != nil { + return fmt.Errorf("download failed: %w", err) + } + + // Verify checksum if available + if checksumURL != "" { + if err := s.verifyChecksum(ctx, archivePath, checksumURL); err != nil { + return fmt.Errorf("checksum verification failed: %w", err) + } + } + + // Extract binary from archive + newBinaryPath := filepath.Join(tempDir, "sub2api") + if err := s.extractBinary(archivePath, newBinaryPath); err != nil { + return fmt.Errorf("extraction failed: %w", err) + } + + // Set executable permission before replacement + if err := os.Chmod(newBinaryPath, 0755); err != nil { + return fmt.Errorf("chmod failed: %w", err) + } + + // Atomic replacement using rename pattern: + // 1. Rename current -> backup (atomic on Unix) + // 2. Rename new -> current (atomic on Unix, same filesystem) + // If step 2 fails, restore backup + backupPath := exePath + ".backup" + + // Remove old backup if exists + _ = os.Remove(backupPath) + + // Step 1: Move current binary to backup + if err := os.Rename(exePath, backupPath); err != nil { + return fmt.Errorf("backup failed: %w", err) + } + + // Step 2: Move new binary to target location (atomic, same filesystem) + if err := os.Rename(newBinaryPath, exePath); err != nil { + // Restore backup on failure + if restoreErr := os.Rename(backupPath, exePath); restoreErr != nil { + return fmt.Errorf("replace failed and restore failed: %w (restore error: %v)", err, restoreErr) + } + return fmt.Errorf("replace failed (restored backup): %w", err) + } + + // Success - backup file is kept for rollback capability + // It will be cleaned up on next successful update + return nil +} + +// Rollback restores the previous version +func (s *UpdateService) Rollback() error { + exePath, err := os.Executable() + if err != nil { + return fmt.Errorf("failed to get executable path: %w", err) + } + exePath, err = filepath.EvalSymlinks(exePath) + if err != nil { + return fmt.Errorf("failed to resolve symlinks: %w", err) + } + + backupFile := exePath + ".backup" + if _, err := os.Stat(backupFile); os.IsNotExist(err) { + return fmt.Errorf("no backup found") + } + + // Replace current with backup + if err := os.Rename(backupFile, exePath); err != nil { + return fmt.Errorf("rollback failed: %w", err) + } + + return nil +} + +func (s *UpdateService) fetchLatestRelease(ctx context.Context) (*UpdateInfo, error) { + release, err := s.githubClient.FetchLatestRelease(ctx, githubRepo) + if err != nil { + return nil, err + } + + latestVersion := strings.TrimPrefix(release.TagName, "v") + + assets := make([]Asset, len(release.Assets)) + for i, a := range release.Assets { + assets[i] = Asset{ + Name: a.Name, + DownloadURL: a.BrowserDownloadURL, + Size: a.Size, + } + } + + return &UpdateInfo{ + CurrentVersion: s.currentVersion, + LatestVersion: latestVersion, + HasUpdate: compareVersions(s.currentVersion, latestVersion) < 0, + ReleaseInfo: &ReleaseInfo{ + Name: release.Name, + Body: release.Body, + PublishedAt: release.PublishedAt, + HTMLURL: release.HTMLURL, + Assets: assets, + }, + Cached: false, + BuildType: s.buildType, + }, nil +} + +func (s *UpdateService) downloadFile(ctx context.Context, downloadURL, dest string) error { + return s.githubClient.DownloadFile(ctx, downloadURL, dest, maxDownloadSize) +} + +func (s *UpdateService) getArchiveName() string { + osName := runtime.GOOS + arch := runtime.GOARCH + return fmt.Sprintf("%s_%s", osName, arch) +} + +// validateDownloadURL checks if the URL is from an allowed domain +// SECURITY: This prevents SSRF and ensures downloads only come from trusted GitHub domains +func validateDownloadURL(rawURL string) error { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return fmt.Errorf("invalid URL: %w", err) + } + + // Must be HTTPS + if parsedURL.Scheme != "https" { + return fmt.Errorf("only HTTPS URLs are allowed") + } + + // Check against allowed hosts + host := parsedURL.Host + // GitHub release URLs can be from github.com or objects.githubusercontent.com + if host != allowedDownloadHost && + !strings.HasSuffix(host, "."+allowedDownloadHost) && + host != allowedAssetHost && + !strings.HasSuffix(host, "."+allowedAssetHost) { + return fmt.Errorf("download from untrusted host: %s", host) + } + + return nil +} + +func (s *UpdateService) verifyChecksum(ctx context.Context, filePath, checksumURL string) error { + // Download checksums file + checksumData, err := s.githubClient.FetchChecksumFile(ctx, checksumURL) + if err != nil { + return fmt.Errorf("failed to download checksums: %w", err) + } + + // Calculate file hash + f, err := os.Open(filePath) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return err + } + actualHash := hex.EncodeToString(h.Sum(nil)) + + // Find expected hash in checksums file + fileName := filepath.Base(filePath) + scanner := bufio.NewScanner(strings.NewReader(string(checksumData))) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(line) + if len(parts) == 2 && parts[1] == fileName { + if parts[0] == actualHash { + return nil + } + return fmt.Errorf("checksum mismatch: expected %s, got %s", parts[0], actualHash) + } + } + + return fmt.Errorf("checksum not found for %s", fileName) +} + +func (s *UpdateService) extractBinary(archivePath, destPath string) error { + f, err := os.Open(archivePath) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + var reader io.Reader = f + + // Handle gzip compression + if strings.HasSuffix(archivePath, ".gz") || strings.HasSuffix(archivePath, ".tar.gz") || strings.HasSuffix(archivePath, ".tgz") { + gzr, err := gzip.NewReader(f) + if err != nil { + return err + } + defer func() { _ = gzr.Close() }() + reader = gzr + } + + // Handle tar archive + if strings.Contains(archivePath, ".tar") { + tr := tar.NewReader(reader) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + // SECURITY: Prevent Zip Slip / Path Traversal attack + // Only allow files with safe base names, no directory traversal + baseName := filepath.Base(hdr.Name) + + // Check for path traversal attempts + if strings.Contains(hdr.Name, "..") { + return fmt.Errorf("path traversal attempt detected: %s", hdr.Name) + } + + // Validate the entry is a regular file + if hdr.Typeflag != tar.TypeReg { + continue // Skip directories and special files + } + + // Only extract the specific binary we need + if baseName == "sub2api" || baseName == "sub2api.exe" { + // Additional security: limit file size (max 500MB) + const maxBinarySize = 500 * 1024 * 1024 + if hdr.Size > maxBinarySize { + return fmt.Errorf("binary too large: %d bytes (max %d)", hdr.Size, maxBinarySize) + } + + out, err := os.Create(destPath) + if err != nil { + return err + } + + // Use LimitReader to prevent decompression bombs + limited := io.LimitReader(tr, maxBinarySize) + if _, err := io.Copy(out, limited); err != nil { + _ = out.Close() + return err + } + if err := out.Close(); err != nil { + return err + } + return nil + } + } + return fmt.Errorf("binary not found in archive") + } + + // Direct copy for non-tar files (with size limit) + const maxBinarySize = 500 * 1024 * 1024 + out, err := os.Create(destPath) + if err != nil { + return err + } + + limited := io.LimitReader(reader, maxBinarySize) + if _, err := io.Copy(out, limited); err != nil { + _ = out.Close() + return err + } + return out.Close() +} + +func (s *UpdateService) getFromCache(ctx context.Context) (*UpdateInfo, error) { + data, err := s.cache.GetUpdateInfo(ctx) + if err != nil { + return nil, err + } + + var cached struct { + Latest string `json:"latest"` + ReleaseInfo *ReleaseInfo `json:"release_info"` + Timestamp int64 `json:"timestamp"` + } + if err := json.Unmarshal([]byte(data), &cached); err != nil { + return nil, err + } + + if time.Now().Unix()-cached.Timestamp > updateCacheTTL { + return nil, fmt.Errorf("cache expired") + } + + return &UpdateInfo{ + CurrentVersion: s.currentVersion, + LatestVersion: cached.Latest, + HasUpdate: compareVersions(s.currentVersion, cached.Latest) < 0, + ReleaseInfo: cached.ReleaseInfo, + Cached: true, + BuildType: s.buildType, + }, nil +} + +func (s *UpdateService) saveToCache(ctx context.Context, info *UpdateInfo) { + cacheData := struct { + Latest string `json:"latest"` + ReleaseInfo *ReleaseInfo `json:"release_info"` + Timestamp int64 `json:"timestamp"` + }{ + Latest: info.LatestVersion, + ReleaseInfo: info.ReleaseInfo, + Timestamp: time.Now().Unix(), + } + + data, _ := json.Marshal(cacheData) + _ = s.cache.SetUpdateInfo(ctx, string(data), time.Duration(updateCacheTTL)*time.Second) +} + +// compareVersions compares two semantic versions +func compareVersions(current, latest string) int { + currentParts := parseVersion(current) + latestParts := parseVersion(latest) + + for i := 0; i < 3; i++ { + if currentParts[i] < latestParts[i] { + return -1 + } + if currentParts[i] > latestParts[i] { + return 1 + } + } + return 0 +} + +func parseVersion(v string) [3]int { + v = strings.TrimPrefix(v, "v") + parts := strings.Split(v, ".") + result := [3]int{0, 0, 0} + for i := 0; i < len(parts) && i < 3; i++ { + if parsed, err := strconv.Atoi(parts[i]); err == nil { + result[i] = parsed + } + } + return result +} diff --git a/backend/internal/service/usage_log.go b/backend/internal/service/usage_log.go new file mode 100644 index 00000000..3b0e934f --- /dev/null +++ b/backend/internal/service/usage_log.go @@ -0,0 +1,61 @@ +package service + +import "time" + +const ( + BillingTypeBalance int8 = 0 // 钱包余额 + BillingTypeSubscription int8 = 1 // 订阅套餐 +) + +type UsageLog struct { + ID int64 + UserID int64 + APIKeyID int64 + AccountID int64 + RequestID string + Model string + + GroupID *int64 + SubscriptionID *int64 + + InputTokens int + OutputTokens int + CacheCreationTokens int + CacheReadTokens int + + CacheCreation5mTokens int + CacheCreation1hTokens int + + InputCost float64 + OutputCost float64 + CacheCreationCost float64 + CacheReadCost float64 + TotalCost float64 + ActualCost float64 + RateMultiplier float64 + // AccountRateMultiplier 账号计费倍率快照(nil 表示历史数据,按 1.0 处理) + AccountRateMultiplier *float64 + + BillingType int8 + Stream bool + DurationMs *int + FirstTokenMs *int + UserAgent *string + IPAddress *string + + // 图片生成字段 + ImageCount int + ImageSize *string + + CreatedAt time.Time + + User *User + APIKey *APIKey + Account *Account + Group *Group + Subscription *UserSubscription +} + +func (u *UsageLog) TotalTokens() int { + return u.InputTokens + u.OutputTokens + u.CacheCreationTokens + u.CacheReadTokens +} diff --git a/backend/internal/service/usage_service.go b/backend/internal/service/usage_service.go new file mode 100644 index 00000000..aa0a5b87 --- /dev/null +++ b/backend/internal/service/usage_service.go @@ -0,0 +1,343 @@ +package service + +import ( + "context" + "errors" + "fmt" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" +) + +var ( + ErrUsageLogNotFound = infraerrors.NotFound("USAGE_LOG_NOT_FOUND", "usage log not found") +) + +// CreateUsageLogRequest 创建使用日志请求 +type CreateUsageLogRequest struct { + UserID int64 `json:"user_id"` + APIKeyID int64 `json:"api_key_id"` + AccountID int64 `json:"account_id"` + RequestID string `json:"request_id"` + Model string `json:"model"` + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + CacheCreationTokens int `json:"cache_creation_tokens"` + CacheReadTokens int `json:"cache_read_tokens"` + CacheCreation5mTokens int `json:"cache_creation_5m_tokens"` + CacheCreation1hTokens int `json:"cache_creation_1h_tokens"` + InputCost float64 `json:"input_cost"` + OutputCost float64 `json:"output_cost"` + CacheCreationCost float64 `json:"cache_creation_cost"` + CacheReadCost float64 `json:"cache_read_cost"` + TotalCost float64 `json:"total_cost"` + ActualCost float64 `json:"actual_cost"` + RateMultiplier float64 `json:"rate_multiplier"` + Stream bool `json:"stream"` + DurationMs *int `json:"duration_ms"` +} + +// UsageStats 使用统计 +type UsageStats struct { + TotalRequests int64 `json:"total_requests"` + TotalInputTokens int64 `json:"total_input_tokens"` + TotalOutputTokens int64 `json:"total_output_tokens"` + TotalCacheTokens int64 `json:"total_cache_tokens"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` + TotalActualCost float64 `json:"total_actual_cost"` + AverageDurationMs float64 `json:"average_duration_ms"` +} + +// UsageService 使用统计服务 +type UsageService struct { + usageRepo UsageLogRepository + userRepo UserRepository + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewUsageService 创建使用统计服务实例 +func NewUsageService(usageRepo UsageLogRepository, userRepo UserRepository, entClient *dbent.Client, authCacheInvalidator APIKeyAuthCacheInvalidator) *UsageService { + return &UsageService{ + usageRepo: usageRepo, + userRepo: userRepo, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, + } +} + +// Create 创建使用日志 +func (s *UsageService) Create(ctx context.Context, req CreateUsageLogRequest) (*UsageLog, error) { + // 使用数据库事务保证「使用日志插入」与「扣费」的原子性,避免重复扣费或漏扣风险。 + tx, err := s.entClient.Tx(ctx) + if err != nil && !errors.Is(err, dbent.ErrTxStarted) { + return nil, fmt.Errorf("begin transaction: %w", err) + } + + txCtx := ctx + if err == nil { + defer func() { _ = tx.Rollback() }() + txCtx = dbent.NewTxContext(ctx, tx) + } + + // 验证用户存在 + _, err = s.userRepo.GetByID(txCtx, req.UserID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + + // 创建使用日志 + usageLog := &UsageLog{ + UserID: req.UserID, + APIKeyID: req.APIKeyID, + AccountID: req.AccountID, + RequestID: req.RequestID, + Model: req.Model, + InputTokens: req.InputTokens, + OutputTokens: req.OutputTokens, + CacheCreationTokens: req.CacheCreationTokens, + CacheReadTokens: req.CacheReadTokens, + CacheCreation5mTokens: req.CacheCreation5mTokens, + CacheCreation1hTokens: req.CacheCreation1hTokens, + InputCost: req.InputCost, + OutputCost: req.OutputCost, + CacheCreationCost: req.CacheCreationCost, + CacheReadCost: req.CacheReadCost, + TotalCost: req.TotalCost, + ActualCost: req.ActualCost, + RateMultiplier: req.RateMultiplier, + Stream: req.Stream, + DurationMs: req.DurationMs, + } + + inserted, err := s.usageRepo.Create(txCtx, usageLog) + if err != nil { + return nil, fmt.Errorf("create usage log: %w", err) + } + + // 扣除用户余额 + balanceUpdated := false + if inserted && req.ActualCost > 0 { + if err := s.userRepo.UpdateBalance(txCtx, req.UserID, -req.ActualCost); err != nil { + return nil, fmt.Errorf("update user balance: %w", err) + } + balanceUpdated = true + } + + if tx != nil { + if err := tx.Commit(); err != nil { + return nil, fmt.Errorf("commit transaction: %w", err) + } + } + + s.invalidateUsageCaches(ctx, req.UserID, balanceUpdated) + + return usageLog, nil +} + +func (s *UsageService) invalidateUsageCaches(ctx context.Context, userID int64, balanceUpdated bool) { + if !balanceUpdated || s.authCacheInvalidator == nil { + return + } + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) +} + +// GetByID 根据ID获取使用日志 +func (s *UsageService) GetByID(ctx context.Context, id int64) (*UsageLog, error) { + log, err := s.usageRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get usage log: %w", err) + } + return log, nil +} + +// ListByUser 获取用户的使用日志列表 +func (s *UsageService) ListByUser(ctx context.Context, userID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) { + logs, pagination, err := s.usageRepo.ListByUser(ctx, userID, params) + if err != nil { + return nil, nil, fmt.Errorf("list usage logs: %w", err) + } + return logs, pagination, nil +} + +// ListByAPIKey 获取API Key的使用日志列表 +func (s *UsageService) ListByAPIKey(ctx context.Context, apiKeyID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) { + logs, pagination, err := s.usageRepo.ListByAPIKey(ctx, apiKeyID, params) + if err != nil { + return nil, nil, fmt.Errorf("list usage logs: %w", err) + } + return logs, pagination, nil +} + +// ListByAccount 获取账号的使用日志列表 +func (s *UsageService) ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]UsageLog, *pagination.PaginationResult, error) { + logs, pagination, err := s.usageRepo.ListByAccount(ctx, accountID, params) + if err != nil { + return nil, nil, fmt.Errorf("list usage logs: %w", err) + } + return logs, pagination, nil +} + +// GetStatsByUser 获取用户的使用统计 +func (s *UsageService) GetStatsByUser(ctx context.Context, userID int64, startTime, endTime time.Time) (*UsageStats, error) { + stats, err := s.usageRepo.GetUserStatsAggregated(ctx, userID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get user stats: %w", err) + } + + return &UsageStats{ + TotalRequests: stats.TotalRequests, + TotalInputTokens: stats.TotalInputTokens, + TotalOutputTokens: stats.TotalOutputTokens, + TotalCacheTokens: stats.TotalCacheTokens, + TotalTokens: stats.TotalTokens, + TotalCost: stats.TotalCost, + TotalActualCost: stats.TotalActualCost, + AverageDurationMs: stats.AverageDurationMs, + }, nil +} + +// GetStatsByAPIKey 获取API Key的使用统计 +func (s *UsageService) GetStatsByAPIKey(ctx context.Context, apiKeyID int64, startTime, endTime time.Time) (*UsageStats, error) { + stats, err := s.usageRepo.GetAPIKeyStatsAggregated(ctx, apiKeyID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get api key stats: %w", err) + } + + return &UsageStats{ + TotalRequests: stats.TotalRequests, + TotalInputTokens: stats.TotalInputTokens, + TotalOutputTokens: stats.TotalOutputTokens, + TotalCacheTokens: stats.TotalCacheTokens, + TotalTokens: stats.TotalTokens, + TotalCost: stats.TotalCost, + TotalActualCost: stats.TotalActualCost, + AverageDurationMs: stats.AverageDurationMs, + }, nil +} + +// GetStatsByAccount 获取账号的使用统计 +func (s *UsageService) GetStatsByAccount(ctx context.Context, accountID int64, startTime, endTime time.Time) (*UsageStats, error) { + stats, err := s.usageRepo.GetAccountStatsAggregated(ctx, accountID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get account stats: %w", err) + } + + return &UsageStats{ + TotalRequests: stats.TotalRequests, + TotalInputTokens: stats.TotalInputTokens, + TotalOutputTokens: stats.TotalOutputTokens, + TotalCacheTokens: stats.TotalCacheTokens, + TotalTokens: stats.TotalTokens, + TotalCost: stats.TotalCost, + TotalActualCost: stats.TotalActualCost, + AverageDurationMs: stats.AverageDurationMs, + }, nil +} + +// GetStatsByModel 获取模型的使用统计 +func (s *UsageService) GetStatsByModel(ctx context.Context, modelName string, startTime, endTime time.Time) (*UsageStats, error) { + stats, err := s.usageRepo.GetModelStatsAggregated(ctx, modelName, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get model stats: %w", err) + } + + return &UsageStats{ + TotalRequests: stats.TotalRequests, + TotalInputTokens: stats.TotalInputTokens, + TotalOutputTokens: stats.TotalOutputTokens, + TotalCacheTokens: stats.TotalCacheTokens, + TotalTokens: stats.TotalTokens, + TotalCost: stats.TotalCost, + TotalActualCost: stats.TotalActualCost, + AverageDurationMs: stats.AverageDurationMs, + }, nil +} + +// GetDailyStats 获取每日使用统计(最近N天) +func (s *UsageService) GetDailyStats(ctx context.Context, userID int64, days int) ([]map[string]any, error) { + endTime := time.Now() + startTime := endTime.AddDate(0, 0, -days) + + stats, err := s.usageRepo.GetDailyStatsAggregated(ctx, userID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get daily stats: %w", err) + } + + return stats, nil +} + +// Delete 删除使用日志(管理员功能,谨慎使用) +func (s *UsageService) Delete(ctx context.Context, id int64) error { + if err := s.usageRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete usage log: %w", err) + } + return nil +} + +// GetUserDashboardStats returns per-user dashboard summary stats. +func (s *UsageService) GetUserDashboardStats(ctx context.Context, userID int64) (*usagestats.UserDashboardStats, error) { + stats, err := s.usageRepo.GetUserDashboardStats(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user dashboard stats: %w", err) + } + return stats, nil +} + +// GetUserUsageTrendByUserID returns per-user usage trend. +func (s *UsageService) GetUserUsageTrendByUserID(ctx context.Context, userID int64, startTime, endTime time.Time, granularity string) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUserUsageTrendByUserID(ctx, userID, startTime, endTime, granularity) + if err != nil { + return nil, fmt.Errorf("get user usage trend: %w", err) + } + return trend, nil +} + +// GetUserModelStats returns per-user model usage stats. +func (s *UsageService) GetUserModelStats(ctx context.Context, userID int64, startTime, endTime time.Time) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetUserModelStats(ctx, userID, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get user model stats: %w", err) + } + return stats, nil +} + +// GetBatchAPIKeyUsageStats returns today/total actual_cost for given api keys. +func (s *UsageService) GetBatchAPIKeyUsageStats(ctx context.Context, apiKeyIDs []int64) (map[int64]*usagestats.BatchAPIKeyUsageStats, error) { + stats, err := s.usageRepo.GetBatchAPIKeyUsageStats(ctx, apiKeyIDs) + if err != nil { + return nil, fmt.Errorf("get batch api key usage stats: %w", err) + } + return stats, nil +} + +// ListWithFilters lists usage logs with admin filters. +func (s *UsageService) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]UsageLog, *pagination.PaginationResult, error) { + logs, result, err := s.usageRepo.ListWithFilters(ctx, params, filters) + if err != nil { + return nil, nil, fmt.Errorf("list usage logs with filters: %w", err) + } + return logs, result, nil +} + +// GetGlobalStats returns global usage stats for a time range. +func (s *UsageService) GetGlobalStats(ctx context.Context, startTime, endTime time.Time) (*usagestats.UsageStats, error) { + stats, err := s.usageRepo.GetGlobalStats(ctx, startTime, endTime) + if err != nil { + return nil, fmt.Errorf("get global usage stats: %w", err) + } + return stats, nil +} + +// GetStatsWithFilters returns usage stats with optional filters. +func (s *UsageService) GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) { + stats, err := s.usageRepo.GetStatsWithFilters(ctx, filters) + if err != nil { + return nil, fmt.Errorf("get usage stats with filters: %w", err) + } + return stats, nil +} diff --git a/backend/internal/service/user.go b/backend/internal/service/user.go new file mode 100644 index 00000000..c565607e --- /dev/null +++ b/backend/internal/service/user.go @@ -0,0 +1,63 @@ +package service + +import ( + "time" + + "golang.org/x/crypto/bcrypt" +) + +type User struct { + ID int64 + Email string + Username string + Notes string + PasswordHash string + Role string + Balance float64 + Concurrency int + Status string + AllowedGroups []int64 + TokenVersion int64 // Incremented on password change to invalidate existing tokens + CreatedAt time.Time + UpdatedAt time.Time + + APIKeys []APIKey + Subscriptions []UserSubscription +} + +func (u *User) IsAdmin() bool { + return u.Role == RoleAdmin +} + +func (u *User) IsActive() bool { + return u.Status == StatusActive +} + +// CanBindGroup checks whether a user can bind to a given group. +// For standard groups: +// - If AllowedGroups is non-empty, only allow binding to IDs in that list. +// - If AllowedGroups is empty (nil or length 0), allow binding to any non-exclusive group. +func (u *User) CanBindGroup(groupID int64, isExclusive bool) bool { + if len(u.AllowedGroups) > 0 { + for _, id := range u.AllowedGroups { + if id == groupID { + return true + } + } + return false + } + return !isExclusive +} + +func (u *User) SetPassword(password string) error { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return err + } + u.PasswordHash = string(hash) + return nil +} + +func (u *User) CheckPassword(password string) bool { + return bcrypt.CompareHashAndPassword([]byte(u.PasswordHash), []byte(password)) == nil +} diff --git a/backend/internal/service/user_attribute.go b/backend/internal/service/user_attribute.go new file mode 100644 index 00000000..0637102e --- /dev/null +++ b/backend/internal/service/user_attribute.go @@ -0,0 +1,125 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// Error definitions for user attribute operations +var ( + ErrAttributeDefinitionNotFound = infraerrors.NotFound("ATTRIBUTE_DEFINITION_NOT_FOUND", "attribute definition not found") + ErrAttributeKeyExists = infraerrors.Conflict("ATTRIBUTE_KEY_EXISTS", "attribute key already exists") + ErrInvalidAttributeType = infraerrors.BadRequest("INVALID_ATTRIBUTE_TYPE", "invalid attribute type") + ErrAttributeValidationFailed = infraerrors.BadRequest("ATTRIBUTE_VALIDATION_FAILED", "attribute value validation failed") +) + +// UserAttributeType represents supported attribute types +type UserAttributeType string + +const ( + AttributeTypeText UserAttributeType = "text" + AttributeTypeTextarea UserAttributeType = "textarea" + AttributeTypeNumber UserAttributeType = "number" + AttributeTypeEmail UserAttributeType = "email" + AttributeTypeURL UserAttributeType = "url" + AttributeTypeDate UserAttributeType = "date" + AttributeTypeSelect UserAttributeType = "select" + AttributeTypeMultiSelect UserAttributeType = "multi_select" +) + +// UserAttributeOption represents a select option for select/multi_select types +type UserAttributeOption struct { + Value string `json:"value"` + Label string `json:"label"` +} + +// UserAttributeValidation represents validation rules for an attribute +type UserAttributeValidation struct { + MinLength *int `json:"min_length,omitempty"` + MaxLength *int `json:"max_length,omitempty"` + Min *int `json:"min,omitempty"` + Max *int `json:"max,omitempty"` + Pattern *string `json:"pattern,omitempty"` + Message *string `json:"message,omitempty"` +} + +// UserAttributeDefinition represents a custom attribute definition +type UserAttributeDefinition struct { + ID int64 + Key string + Name string + Description string + Type UserAttributeType + Options []UserAttributeOption + Required bool + Validation UserAttributeValidation + Placeholder string + DisplayOrder int + Enabled bool + CreatedAt time.Time + UpdatedAt time.Time +} + +// UserAttributeValue represents a user's attribute value +type UserAttributeValue struct { + ID int64 + UserID int64 + AttributeID int64 + Value string + CreatedAt time.Time + UpdatedAt time.Time +} + +// CreateAttributeDefinitionInput for creating new definition +type CreateAttributeDefinitionInput struct { + Key string + Name string + Description string + Type UserAttributeType + Options []UserAttributeOption + Required bool + Validation UserAttributeValidation + Placeholder string + Enabled bool +} + +// UpdateAttributeDefinitionInput for updating definition +type UpdateAttributeDefinitionInput struct { + Name *string + Description *string + Type *UserAttributeType + Options *[]UserAttributeOption + Required *bool + Validation *UserAttributeValidation + Placeholder *string + Enabled *bool +} + +// UpdateUserAttributeInput for updating a single attribute value +type UpdateUserAttributeInput struct { + AttributeID int64 + Value string +} + +// UserAttributeDefinitionRepository interface for attribute definition persistence +type UserAttributeDefinitionRepository interface { + Create(ctx context.Context, def *UserAttributeDefinition) error + GetByID(ctx context.Context, id int64) (*UserAttributeDefinition, error) + GetByKey(ctx context.Context, key string) (*UserAttributeDefinition, error) + Update(ctx context.Context, def *UserAttributeDefinition) error + Delete(ctx context.Context, id int64) error + List(ctx context.Context, enabledOnly bool) ([]UserAttributeDefinition, error) + UpdateDisplayOrders(ctx context.Context, orders map[int64]int) error + ExistsByKey(ctx context.Context, key string) (bool, error) +} + +// UserAttributeValueRepository interface for user attribute value persistence +type UserAttributeValueRepository interface { + GetByUserID(ctx context.Context, userID int64) ([]UserAttributeValue, error) + GetByUserIDs(ctx context.Context, userIDs []int64) ([]UserAttributeValue, error) + UpsertBatch(ctx context.Context, userID int64, values []UpdateUserAttributeInput) error + DeleteByAttributeID(ctx context.Context, attributeID int64) error + DeleteByUserID(ctx context.Context, userID int64) error +} diff --git a/backend/internal/service/user_attribute_service.go b/backend/internal/service/user_attribute_service.go new file mode 100644 index 00000000..6c2f8077 --- /dev/null +++ b/backend/internal/service/user_attribute_service.go @@ -0,0 +1,323 @@ +package service + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// UserAttributeService handles attribute management +type UserAttributeService struct { + defRepo UserAttributeDefinitionRepository + valueRepo UserAttributeValueRepository +} + +// NewUserAttributeService creates a new service instance +func NewUserAttributeService( + defRepo UserAttributeDefinitionRepository, + valueRepo UserAttributeValueRepository, +) *UserAttributeService { + return &UserAttributeService{ + defRepo: defRepo, + valueRepo: valueRepo, + } +} + +// CreateDefinition creates a new attribute definition +func (s *UserAttributeService) CreateDefinition(ctx context.Context, input CreateAttributeDefinitionInput) (*UserAttributeDefinition, error) { + // Validate type + if !isValidAttributeType(input.Type) { + return nil, ErrInvalidAttributeType + } + + // Check if key exists + exists, err := s.defRepo.ExistsByKey(ctx, input.Key) + if err != nil { + return nil, fmt.Errorf("check key exists: %w", err) + } + if exists { + return nil, ErrAttributeKeyExists + } + + def := &UserAttributeDefinition{ + Key: input.Key, + Name: input.Name, + Description: input.Description, + Type: input.Type, + Options: input.Options, + Required: input.Required, + Validation: input.Validation, + Placeholder: input.Placeholder, + Enabled: input.Enabled, + } + + if err := validateDefinitionPattern(def); err != nil { + return nil, err + } + + if err := s.defRepo.Create(ctx, def); err != nil { + return nil, fmt.Errorf("create definition: %w", err) + } + + return def, nil +} + +// GetDefinition retrieves a definition by ID +func (s *UserAttributeService) GetDefinition(ctx context.Context, id int64) (*UserAttributeDefinition, error) { + return s.defRepo.GetByID(ctx, id) +} + +// ListDefinitions lists all definitions +func (s *UserAttributeService) ListDefinitions(ctx context.Context, enabledOnly bool) ([]UserAttributeDefinition, error) { + return s.defRepo.List(ctx, enabledOnly) +} + +// UpdateDefinition updates an existing definition +func (s *UserAttributeService) UpdateDefinition(ctx context.Context, id int64, input UpdateAttributeDefinitionInput) (*UserAttributeDefinition, error) { + def, err := s.defRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Name != nil { + def.Name = *input.Name + } + if input.Description != nil { + def.Description = *input.Description + } + if input.Type != nil { + if !isValidAttributeType(*input.Type) { + return nil, ErrInvalidAttributeType + } + def.Type = *input.Type + } + if input.Options != nil { + def.Options = *input.Options + } + if input.Required != nil { + def.Required = *input.Required + } + if input.Validation != nil { + def.Validation = *input.Validation + } + if input.Placeholder != nil { + def.Placeholder = *input.Placeholder + } + if input.Enabled != nil { + def.Enabled = *input.Enabled + } + + if err := validateDefinitionPattern(def); err != nil { + return nil, err + } + + if err := s.defRepo.Update(ctx, def); err != nil { + return nil, fmt.Errorf("update definition: %w", err) + } + + return def, nil +} + +// DeleteDefinition soft-deletes a definition and hard-deletes associated values +func (s *UserAttributeService) DeleteDefinition(ctx context.Context, id int64) error { + // Check if definition exists + _, err := s.defRepo.GetByID(ctx, id) + if err != nil { + return err + } + + // First delete all values (hard delete) + if err := s.valueRepo.DeleteByAttributeID(ctx, id); err != nil { + return fmt.Errorf("delete values: %w", err) + } + + // Then soft-delete the definition + if err := s.defRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete definition: %w", err) + } + + return nil +} + +// ReorderDefinitions updates display order for multiple definitions +func (s *UserAttributeService) ReorderDefinitions(ctx context.Context, orders map[int64]int) error { + return s.defRepo.UpdateDisplayOrders(ctx, orders) +} + +// GetUserAttributes retrieves all attribute values for a user +func (s *UserAttributeService) GetUserAttributes(ctx context.Context, userID int64) ([]UserAttributeValue, error) { + return s.valueRepo.GetByUserID(ctx, userID) +} + +// GetBatchUserAttributes retrieves attribute values for multiple users +// Returns a map of userID -> map of attributeID -> value +func (s *UserAttributeService) GetBatchUserAttributes(ctx context.Context, userIDs []int64) (map[int64]map[int64]string, error) { + values, err := s.valueRepo.GetByUserIDs(ctx, userIDs) + if err != nil { + return nil, err + } + + result := make(map[int64]map[int64]string) + for _, v := range values { + if result[v.UserID] == nil { + result[v.UserID] = make(map[int64]string) + } + result[v.UserID][v.AttributeID] = v.Value + } + + return result, nil +} + +// UpdateUserAttributes batch updates attribute values for a user +func (s *UserAttributeService) UpdateUserAttributes(ctx context.Context, userID int64, inputs []UpdateUserAttributeInput) error { + // Validate all values before updating + defs, err := s.defRepo.List(ctx, true) + if err != nil { + return fmt.Errorf("list definitions: %w", err) + } + + defMap := make(map[int64]*UserAttributeDefinition, len(defs)) + for i := range defs { + defMap[defs[i].ID] = &defs[i] + } + + for _, input := range inputs { + def, ok := defMap[input.AttributeID] + if !ok { + return ErrAttributeDefinitionNotFound + } + + if err := s.validateValue(def, input.Value); err != nil { + return err + } + } + + return s.valueRepo.UpsertBatch(ctx, userID, inputs) +} + +// validateValue validates a value against its definition +func (s *UserAttributeService) validateValue(def *UserAttributeDefinition, value string) error { + // Skip validation for empty non-required fields + if value == "" && !def.Required { + return nil + } + + // Required check + if def.Required && value == "" { + return validationError(fmt.Sprintf("%s is required", def.Name)) + } + + v := def.Validation + + // String length validation + if v.MinLength != nil && len(value) < *v.MinLength { + return validationError(fmt.Sprintf("%s must be at least %d characters", def.Name, *v.MinLength)) + } + if v.MaxLength != nil && len(value) > *v.MaxLength { + return validationError(fmt.Sprintf("%s must be at most %d characters", def.Name, *v.MaxLength)) + } + + // Number validation + if def.Type == AttributeTypeNumber && value != "" { + num, err := strconv.Atoi(value) + if err != nil { + return validationError(fmt.Sprintf("%s must be a number", def.Name)) + } + if v.Min != nil && num < *v.Min { + return validationError(fmt.Sprintf("%s must be at least %d", def.Name, *v.Min)) + } + if v.Max != nil && num > *v.Max { + return validationError(fmt.Sprintf("%s must be at most %d", def.Name, *v.Max)) + } + } + + // Pattern validation + if v.Pattern != nil && *v.Pattern != "" && value != "" { + re, err := regexp.Compile(*v.Pattern) + if err != nil { + return validationError(def.Name + " has an invalid pattern") + } + if !re.MatchString(value) { + msg := def.Name + " format is invalid" + if v.Message != nil && *v.Message != "" { + msg = *v.Message + } + return validationError(msg) + } + } + + // Select validation + if def.Type == AttributeTypeSelect && value != "" { + found := false + for _, opt := range def.Options { + if opt.Value == value { + found = true + break + } + } + if !found { + return validationError(fmt.Sprintf("%s: invalid option", def.Name)) + } + } + + // Multi-select validation (stored as JSON array) + if def.Type == AttributeTypeMultiSelect && value != "" { + var values []string + if err := json.Unmarshal([]byte(value), &values); err != nil { + // Try comma-separated fallback + values = strings.Split(value, ",") + } + for _, val := range values { + val = strings.TrimSpace(val) + found := false + for _, opt := range def.Options { + if opt.Value == val { + found = true + break + } + } + if !found { + return validationError(fmt.Sprintf("%s: invalid option %s", def.Name, val)) + } + } + } + + return nil +} + +// validationError creates a validation error with a custom message +func validationError(msg string) error { + return infraerrors.BadRequest("ATTRIBUTE_VALIDATION_FAILED", msg) +} + +func isValidAttributeType(t UserAttributeType) bool { + switch t { + case AttributeTypeText, AttributeTypeTextarea, AttributeTypeNumber, + AttributeTypeEmail, AttributeTypeURL, AttributeTypeDate, + AttributeTypeSelect, AttributeTypeMultiSelect: + return true + } + return false +} + +func validateDefinitionPattern(def *UserAttributeDefinition) error { + if def == nil { + return nil + } + if def.Validation.Pattern == nil { + return nil + } + pattern := strings.TrimSpace(*def.Validation.Pattern) + if pattern == "" { + return nil + } + if _, err := regexp.Compile(pattern); err != nil { + return infraerrors.BadRequest("INVALID_ATTRIBUTE_PATTERN", fmt.Sprintf("invalid pattern for %s: %v", def.Name, err)) + } + return nil +} diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go new file mode 100644 index 00000000..1734914a --- /dev/null +++ b/backend/internal/service/user_service.go @@ -0,0 +1,223 @@ +package service + +import ( + "context" + "fmt" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrUserNotFound = infraerrors.NotFound("USER_NOT_FOUND", "user not found") + ErrPasswordIncorrect = infraerrors.BadRequest("PASSWORD_INCORRECT", "current password is incorrect") + ErrInsufficientPerms = infraerrors.Forbidden("INSUFFICIENT_PERMISSIONS", "insufficient permissions") +) + +// UserListFilters contains all filter options for listing users +type UserListFilters struct { + Status string // User status filter + Role string // User role filter + Search string // Search in email, username + Attributes map[int64]string // Custom attribute filters: attributeID -> value +} + +type UserRepository interface { + Create(ctx context.Context, user *User) error + GetByID(ctx context.Context, id int64) (*User, error) + GetByEmail(ctx context.Context, email string) (*User, error) + GetFirstAdmin(ctx context.Context) (*User, error) + Update(ctx context.Context, user *User) error + Delete(ctx context.Context, id int64) error + + List(ctx context.Context, params pagination.PaginationParams) ([]User, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters UserListFilters) ([]User, *pagination.PaginationResult, error) + + UpdateBalance(ctx context.Context, id int64, amount float64) error + DeductBalance(ctx context.Context, id int64, amount float64) error + UpdateConcurrency(ctx context.Context, id int64, amount int) error + ExistsByEmail(ctx context.Context, email string) (bool, error) + RemoveGroupFromAllowedGroups(ctx context.Context, groupID int64) (int64, error) +} + +// UpdateProfileRequest 更新用户资料请求 +type UpdateProfileRequest struct { + Email *string `json:"email"` + Username *string `json:"username"` + Concurrency *int `json:"concurrency"` +} + +// ChangePasswordRequest 修改密码请求 +type ChangePasswordRequest struct { + CurrentPassword string `json:"current_password"` + NewPassword string `json:"new_password"` +} + +// UserService 用户服务 +type UserService struct { + userRepo UserRepository + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewUserService 创建用户服务实例 +func NewUserService(userRepo UserRepository, authCacheInvalidator APIKeyAuthCacheInvalidator) *UserService { + return &UserService{ + userRepo: userRepo, + authCacheInvalidator: authCacheInvalidator, + } +} + +// GetFirstAdmin 获取首个管理员用户(用于 Admin API Key 认证) +func (s *UserService) GetFirstAdmin(ctx context.Context) (*User, error) { + admin, err := s.userRepo.GetFirstAdmin(ctx) + if err != nil { + return nil, fmt.Errorf("get first admin: %w", err) + } + return admin, nil +} + +// GetProfile 获取用户资料 +func (s *UserService) GetProfile(ctx context.Context, userID int64) (*User, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + return user, nil +} + +// UpdateProfile 更新用户资料 +func (s *UserService) UpdateProfile(ctx context.Context, userID int64, req UpdateProfileRequest) (*User, error) { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + oldConcurrency := user.Concurrency + + // 更新字段 + if req.Email != nil { + // 检查新邮箱是否已被使用 + exists, err := s.userRepo.ExistsByEmail(ctx, *req.Email) + if err != nil { + return nil, fmt.Errorf("check email exists: %w", err) + } + if exists && *req.Email != user.Email { + return nil, ErrEmailExists + } + user.Email = *req.Email + } + + if req.Username != nil { + user.Username = *req.Username + } + + if req.Concurrency != nil { + user.Concurrency = *req.Concurrency + } + + if err := s.userRepo.Update(ctx, user); err != nil { + return nil, fmt.Errorf("update user: %w", err) + } + if s.authCacheInvalidator != nil && user.Concurrency != oldConcurrency { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + + return user, nil +} + +// ChangePassword 修改密码 +// Security: Increments TokenVersion to invalidate all existing JWT tokens +func (s *UserService) ChangePassword(ctx context.Context, userID int64, req ChangePasswordRequest) error { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + // 验证当前密码 + if !user.CheckPassword(req.CurrentPassword) { + return ErrPasswordIncorrect + } + + if err := user.SetPassword(req.NewPassword); err != nil { + return fmt.Errorf("set password: %w", err) + } + + // Increment TokenVersion to invalidate all existing tokens + // This ensures that any tokens issued before the password change become invalid + user.TokenVersion++ + + if err := s.userRepo.Update(ctx, user); err != nil { + return fmt.Errorf("update user: %w", err) + } + + return nil +} + +// GetByID 根据ID获取用户(管理员功能) +func (s *UserService) GetByID(ctx context.Context, id int64) (*User, error) { + user, err := s.userRepo.GetByID(ctx, id) + if err != nil { + return nil, fmt.Errorf("get user: %w", err) + } + return user, nil +} + +// List 获取用户列表(管理员功能) +func (s *UserService) List(ctx context.Context, params pagination.PaginationParams) ([]User, *pagination.PaginationResult, error) { + users, pagination, err := s.userRepo.List(ctx, params) + if err != nil { + return nil, nil, fmt.Errorf("list users: %w", err) + } + return users, pagination, nil +} + +// UpdateBalance 更新用户余额(管理员功能) +func (s *UserService) UpdateBalance(ctx context.Context, userID int64, amount float64) error { + if err := s.userRepo.UpdateBalance(ctx, userID, amount); err != nil { + return fmt.Errorf("update balance: %w", err) + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + return nil +} + +// UpdateConcurrency 更新用户并发数(管理员功能) +func (s *UserService) UpdateConcurrency(ctx context.Context, userID int64, concurrency int) error { + if err := s.userRepo.UpdateConcurrency(ctx, userID, concurrency); err != nil { + return fmt.Errorf("update concurrency: %w", err) + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + return nil +} + +// UpdateStatus 更新用户状态(管理员功能) +func (s *UserService) UpdateStatus(ctx context.Context, userID int64, status string) error { + user, err := s.userRepo.GetByID(ctx, userID) + if err != nil { + return fmt.Errorf("get user: %w", err) + } + + user.Status = status + + if err := s.userRepo.Update(ctx, user); err != nil { + return fmt.Errorf("update user: %w", err) + } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + + return nil +} + +// Delete 删除用户(管理员功能) +func (s *UserService) Delete(ctx context.Context, userID int64) error { + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if err := s.userRepo.Delete(ctx, userID); err != nil { + return fmt.Errorf("delete user: %w", err) + } + return nil +} diff --git a/backend/internal/service/user_subscription.go b/backend/internal/service/user_subscription.go new file mode 100644 index 00000000..ec547d81 --- /dev/null +++ b/backend/internal/service/user_subscription.go @@ -0,0 +1,124 @@ +package service + +import "time" + +type UserSubscription struct { + ID int64 + UserID int64 + GroupID int64 + + StartsAt time.Time + ExpiresAt time.Time + Status string + + DailyWindowStart *time.Time + WeeklyWindowStart *time.Time + MonthlyWindowStart *time.Time + + DailyUsageUSD float64 + WeeklyUsageUSD float64 + MonthlyUsageUSD float64 + + AssignedBy *int64 + AssignedAt time.Time + Notes string + + CreatedAt time.Time + UpdatedAt time.Time + + User *User + Group *Group + AssignedByUser *User +} + +func (s *UserSubscription) IsActive() bool { + return s.Status == SubscriptionStatusActive && time.Now().Before(s.ExpiresAt) +} + +func (s *UserSubscription) IsExpired() bool { + return time.Now().After(s.ExpiresAt) +} + +func (s *UserSubscription) DaysRemaining() int { + if s.IsExpired() { + return 0 + } + return int(time.Until(s.ExpiresAt).Hours() / 24) +} + +func (s *UserSubscription) IsWindowActivated() bool { + return s.DailyWindowStart != nil || s.WeeklyWindowStart != nil || s.MonthlyWindowStart != nil +} + +func (s *UserSubscription) NeedsDailyReset() bool { + if s.DailyWindowStart == nil { + return false + } + return time.Since(*s.DailyWindowStart) >= 24*time.Hour +} + +func (s *UserSubscription) NeedsWeeklyReset() bool { + if s.WeeklyWindowStart == nil { + return false + } + return time.Since(*s.WeeklyWindowStart) >= 7*24*time.Hour +} + +func (s *UserSubscription) NeedsMonthlyReset() bool { + if s.MonthlyWindowStart == nil { + return false + } + return time.Since(*s.MonthlyWindowStart) >= 30*24*time.Hour +} + +func (s *UserSubscription) DailyResetTime() *time.Time { + if s.DailyWindowStart == nil { + return nil + } + t := s.DailyWindowStart.Add(24 * time.Hour) + return &t +} + +func (s *UserSubscription) WeeklyResetTime() *time.Time { + if s.WeeklyWindowStart == nil { + return nil + } + t := s.WeeklyWindowStart.Add(7 * 24 * time.Hour) + return &t +} + +func (s *UserSubscription) MonthlyResetTime() *time.Time { + if s.MonthlyWindowStart == nil { + return nil + } + t := s.MonthlyWindowStart.Add(30 * 24 * time.Hour) + return &t +} + +func (s *UserSubscription) CheckDailyLimit(group *Group, additionalCost float64) bool { + if !group.HasDailyLimit() { + return true + } + return s.DailyUsageUSD+additionalCost <= *group.DailyLimitUSD +} + +func (s *UserSubscription) CheckWeeklyLimit(group *Group, additionalCost float64) bool { + if !group.HasWeeklyLimit() { + return true + } + return s.WeeklyUsageUSD+additionalCost <= *group.WeeklyLimitUSD +} + +func (s *UserSubscription) CheckMonthlyLimit(group *Group, additionalCost float64) bool { + if !group.HasMonthlyLimit() { + return true + } + return s.MonthlyUsageUSD+additionalCost <= *group.MonthlyLimitUSD +} + +func (s *UserSubscription) CheckAllLimits(group *Group, additionalCost float64) (daily, weekly, monthly bool) { + daily = s.CheckDailyLimit(group, additionalCost) + weekly = s.CheckWeeklyLimit(group, additionalCost) + monthly = s.CheckMonthlyLimit(group, additionalCost) + return +} diff --git a/backend/internal/service/user_subscription_port.go b/backend/internal/service/user_subscription_port.go new file mode 100644 index 00000000..abf4dffd --- /dev/null +++ b/backend/internal/service/user_subscription_port.go @@ -0,0 +1,35 @@ +package service + +import ( + "context" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +type UserSubscriptionRepository interface { + Create(ctx context.Context, sub *UserSubscription) error + GetByID(ctx context.Context, id int64) (*UserSubscription, error) + GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*UserSubscription, error) + GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*UserSubscription, error) + Update(ctx context.Context, sub *UserSubscription) error + Delete(ctx context.Context, id int64) error + + ListByUserID(ctx context.Context, userID int64) ([]UserSubscription, error) + ListActiveByUserID(ctx context.Context, userID int64) ([]UserSubscription, error) + ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]UserSubscription, *pagination.PaginationResult, error) + List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status string) ([]UserSubscription, *pagination.PaginationResult, error) + + ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) + ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error + UpdateStatus(ctx context.Context, subscriptionID int64, status string) error + UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error + + ActivateWindows(ctx context.Context, id int64, start time.Time) error + ResetDailyUsage(ctx context.Context, id int64, newWindowStart time.Time) error + ResetWeeklyUsage(ctx context.Context, id int64, newWindowStart time.Time) error + ResetMonthlyUsage(ctx context.Context, id int64, newWindowStart time.Time) error + IncrementUsage(ctx context.Context, id int64, costUSD float64) error + + BatchUpdateExpiredStatus(ctx context.Context) (int64, error) +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go new file mode 100644 index 00000000..05dbb0b0 --- /dev/null +++ b/backend/internal/service/wire.go @@ -0,0 +1,249 @@ +package service + +import ( + "database/sql" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/wire" + "github.com/redis/go-redis/v9" +) + +// BuildInfo contains build information +type BuildInfo struct { + Version string + BuildType string +} + +// ProvidePricingService creates and initializes PricingService +func ProvidePricingService(cfg *config.Config, remoteClient PricingRemoteClient) (*PricingService, error) { + svc := NewPricingService(cfg, remoteClient) + if err := svc.Initialize(); err != nil { + // Pricing service initialization failure should not block startup, use fallback prices + println("[Service] Warning: Pricing service initialization failed:", err.Error()) + } + return svc, nil +} + +// ProvideUpdateService creates UpdateService with BuildInfo +func ProvideUpdateService(cache UpdateCache, githubClient GitHubReleaseClient, buildInfo BuildInfo) *UpdateService { + return NewUpdateService(cache, githubClient, buildInfo.Version, buildInfo.BuildType) +} + +// ProvideEmailQueueService creates EmailQueueService with default worker count +func ProvideEmailQueueService(emailService *EmailService) *EmailQueueService { + return NewEmailQueueService(emailService, 3) +} + +// ProvideTokenRefreshService creates and starts TokenRefreshService +func ProvideTokenRefreshService( + accountRepo AccountRepository, + oauthService *OAuthService, + openaiOAuthService *OpenAIOAuthService, + geminiOAuthService *GeminiOAuthService, + antigravityOAuthService *AntigravityOAuthService, + cacheInvalidator TokenCacheInvalidator, + cfg *config.Config, +) *TokenRefreshService { + svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, cfg) + svc.Start() + return svc +} + +// ProvideDashboardAggregationService 创建并启动仪表盘聚合服务 +func ProvideDashboardAggregationService(repo DashboardAggregationRepository, timingWheel *TimingWheelService, cfg *config.Config) *DashboardAggregationService { + svc := NewDashboardAggregationService(repo, timingWheel, cfg) + svc.Start() + return svc +} + +// ProvideAccountExpiryService creates and starts AccountExpiryService. +func ProvideAccountExpiryService(accountRepo AccountRepository) *AccountExpiryService { + svc := NewAccountExpiryService(accountRepo, time.Minute) + svc.Start() + return svc +} + +// ProvideTimingWheelService creates and starts TimingWheelService +func ProvideTimingWheelService() *TimingWheelService { + svc := NewTimingWheelService() + svc.Start() + return svc +} + +// ProvideDeferredService creates and starts DeferredService +func ProvideDeferredService(accountRepo AccountRepository, timingWheel *TimingWheelService) *DeferredService { + svc := NewDeferredService(accountRepo, timingWheel, 10*time.Second) + svc.Start() + return svc +} + +// ProvideConcurrencyService creates ConcurrencyService and starts slot cleanup worker. +func ProvideConcurrencyService(cache ConcurrencyCache, accountRepo AccountRepository, cfg *config.Config) *ConcurrencyService { + svc := NewConcurrencyService(cache) + if cfg != nil { + svc.StartSlotCleanupWorker(accountRepo, cfg.Gateway.Scheduling.SlotCleanupInterval) + } + return svc +} + +// ProvideSchedulerSnapshotService creates and starts SchedulerSnapshotService. +func ProvideSchedulerSnapshotService( + cache SchedulerCache, + outboxRepo SchedulerOutboxRepository, + accountRepo AccountRepository, + groupRepo GroupRepository, + cfg *config.Config, +) *SchedulerSnapshotService { + svc := NewSchedulerSnapshotService(cache, outboxRepo, accountRepo, groupRepo, cfg) + svc.Start() + return svc +} + +// ProvideRateLimitService creates RateLimitService with optional dependencies. +func ProvideRateLimitService( + accountRepo AccountRepository, + usageRepo UsageLogRepository, + cfg *config.Config, + geminiQuotaService *GeminiQuotaService, + tempUnschedCache TempUnschedCache, + timeoutCounterCache TimeoutCounterCache, + settingService *SettingService, + tokenCacheInvalidator TokenCacheInvalidator, +) *RateLimitService { + svc := NewRateLimitService(accountRepo, usageRepo, cfg, geminiQuotaService, tempUnschedCache) + svc.SetTimeoutCounterCache(timeoutCounterCache) + svc.SetSettingService(settingService) + svc.SetTokenCacheInvalidator(tokenCacheInvalidator) + return svc +} + +// ProvideOpsMetricsCollector creates and starts OpsMetricsCollector. +func ProvideOpsMetricsCollector( + opsRepo OpsRepository, + settingRepo SettingRepository, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsMetricsCollector { + collector := NewOpsMetricsCollector(opsRepo, settingRepo, accountRepo, concurrencyService, db, redisClient, cfg) + collector.Start() + return collector +} + +// ProvideOpsAggregationService creates and starts OpsAggregationService (hourly/daily pre-aggregation). +func ProvideOpsAggregationService( + opsRepo OpsRepository, + settingRepo SettingRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAggregationService { + svc := NewOpsAggregationService(opsRepo, settingRepo, db, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsAlertEvaluatorService creates and starts OpsAlertEvaluatorService. +func ProvideOpsAlertEvaluatorService( + opsService *OpsService, + opsRepo OpsRepository, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAlertEvaluatorService { + svc := NewOpsAlertEvaluatorService(opsService, opsRepo, emailService, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsCleanupService creates and starts OpsCleanupService (cron scheduled). +func ProvideOpsCleanupService( + opsRepo OpsRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsCleanupService { + svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsScheduledReportService creates and starts OpsScheduledReportService. +func ProvideOpsScheduledReportService( + opsService *OpsService, + userService *UserService, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsScheduledReportService { + svc := NewOpsScheduledReportService(opsService, userService, emailService, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideAPIKeyAuthCacheInvalidator 提供 API Key 认证缓存失效能力 +func ProvideAPIKeyAuthCacheInvalidator(apiKeyService *APIKeyService) APIKeyAuthCacheInvalidator { + return apiKeyService +} + +// ProviderSet is the Wire provider set for all services +var ProviderSet = wire.NewSet( + // Core services + NewAuthService, + NewUserService, + NewAPIKeyService, + ProvideAPIKeyAuthCacheInvalidator, + NewGroupService, + NewAccountService, + NewProxyService, + NewRedeemService, + NewPromoService, + NewUsageService, + NewDashboardService, + ProvidePricingService, + NewBillingService, + NewBillingCacheService, + NewAdminService, + NewGatewayService, + NewOpenAIGatewayService, + NewOAuthService, + NewOpenAIOAuthService, + NewGeminiOAuthService, + NewGeminiQuotaService, + NewCompositeTokenCacheInvalidator, + NewAntigravityOAuthService, + NewGeminiTokenProvider, + NewGeminiMessagesCompatService, + NewAntigravityTokenProvider, + NewAntigravityGatewayService, + ProvideRateLimitService, + NewAccountUsageService, + NewAccountTestService, + NewSettingService, + NewOpsService, + ProvideOpsMetricsCollector, + ProvideOpsAggregationService, + ProvideOpsAlertEvaluatorService, + ProvideOpsCleanupService, + ProvideOpsScheduledReportService, + NewEmailService, + ProvideEmailQueueService, + NewTurnstileService, + NewSubscriptionService, + ProvideConcurrencyService, + ProvideSchedulerSnapshotService, + NewIdentityService, + NewCRSSyncService, + ProvideUpdateService, + ProvideTokenRefreshService, + ProvideAccountExpiryService, + ProvideTimingWheelService, + ProvideDashboardAggregationService, + ProvideDeferredService, + NewAntigravityQuotaFetcher, + NewUserAttributeService, + NewUsageCache, +) diff --git a/backend/internal/setup/cli.go b/backend/internal/setup/cli.go new file mode 100644 index 00000000..03ac3f66 --- /dev/null +++ b/backend/internal/setup/cli.go @@ -0,0 +1,295 @@ +// Package setup provides CLI commands and application initialization helpers. +package setup + +import ( + "bufio" + "fmt" + "net/mail" + "os" + "regexp" + "strconv" + "strings" + + "golang.org/x/term" +) + +// CLI input validation functions (matching Web API validation) +func cliValidateHostname(host string) bool { + validHost := regexp.MustCompile(`^[a-zA-Z0-9.\-:]+$`) + return validHost.MatchString(host) && len(host) <= 253 +} + +func cliValidateDBName(name string) bool { + validName := regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_]*$`) + return validName.MatchString(name) && len(name) <= 63 +} + +func cliValidateUsername(name string) bool { + validName := regexp.MustCompile(`^[a-zA-Z0-9_]+$`) + return validName.MatchString(name) && len(name) <= 63 +} + +func cliValidateEmail(email string) bool { + _, err := mail.ParseAddress(email) + return err == nil && len(email) <= 254 +} + +func cliValidatePort(port int) bool { + return port > 0 && port <= 65535 +} + +func cliValidateSSLMode(mode string) bool { + validModes := map[string]bool{ + "disable": true, "require": true, "verify-ca": true, "verify-full": true, + } + return validModes[mode] +} + +// RunCLI runs the CLI setup wizard +func RunCLI() error { + reader := bufio.NewReader(os.Stdin) + + fmt.Println() + fmt.Println("╔═══════════════════════════════════════════╗") + fmt.Println("║ Sub2API Installation Wizard ║") + fmt.Println("╚═══════════════════════════════════════════╝") + fmt.Println() + + cfg := &SetupConfig{ + Server: ServerConfig{ + Host: "0.0.0.0", + Port: 8080, + Mode: "release", + }, + JWT: JWTConfig{ + ExpireHour: 24, + }, + } + + // Database configuration with validation + fmt.Println("── Database Configuration ──") + + for { + cfg.Database.Host = promptString(reader, "PostgreSQL Host", "localhost") + if cliValidateHostname(cfg.Database.Host) { + break + } + fmt.Println(" Invalid hostname format. Use alphanumeric, dots, hyphens only.") + } + + for { + cfg.Database.Port = promptInt(reader, "PostgreSQL Port", 5432) + if cliValidatePort(cfg.Database.Port) { + break + } + fmt.Println(" Invalid port. Must be between 1 and 65535.") + } + + for { + cfg.Database.User = promptString(reader, "PostgreSQL User", "postgres") + if cliValidateUsername(cfg.Database.User) { + break + } + fmt.Println(" Invalid username. Use alphanumeric and underscores only.") + } + + cfg.Database.Password = promptPassword("PostgreSQL Password") + + for { + cfg.Database.DBName = promptString(reader, "Database Name", "sub2api") + if cliValidateDBName(cfg.Database.DBName) { + break + } + fmt.Println(" Invalid database name. Start with letter, use alphanumeric and underscores.") + } + + for { + cfg.Database.SSLMode = promptString(reader, "SSL Mode", "disable") + if cliValidateSSLMode(cfg.Database.SSLMode) { + break + } + fmt.Println(" Invalid SSL mode. Use: disable, require, verify-ca, or verify-full.") + } + + fmt.Println() + fmt.Print("Testing database connection... ") + if err := TestDatabaseConnection(&cfg.Database); err != nil { + fmt.Println("FAILED") + return fmt.Errorf("database connection failed: %w", err) + } + fmt.Println("OK") + + // Redis configuration with validation + fmt.Println() + fmt.Println("── Redis Configuration ──") + + for { + cfg.Redis.Host = promptString(reader, "Redis Host", "localhost") + if cliValidateHostname(cfg.Redis.Host) { + break + } + fmt.Println(" Invalid hostname format. Use alphanumeric, dots, hyphens only.") + } + + for { + cfg.Redis.Port = promptInt(reader, "Redis Port", 6379) + if cliValidatePort(cfg.Redis.Port) { + break + } + fmt.Println(" Invalid port. Must be between 1 and 65535.") + } + + cfg.Redis.Password = promptPassword("Redis Password (optional)") + + for { + cfg.Redis.DB = promptInt(reader, "Redis DB", 0) + if cfg.Redis.DB >= 0 && cfg.Redis.DB <= 15 { + break + } + fmt.Println(" Invalid Redis DB. Must be between 0 and 15.") + } + + fmt.Println() + fmt.Print("Testing Redis connection... ") + if err := TestRedisConnection(&cfg.Redis); err != nil { + fmt.Println("FAILED") + return fmt.Errorf("redis connection failed: %w", err) + } + fmt.Println("OK") + + // Admin configuration with validation + fmt.Println() + fmt.Println("── Admin Account ──") + + for { + cfg.Admin.Email = promptString(reader, "Admin Email", "admin@example.com") + if cliValidateEmail(cfg.Admin.Email) { + break + } + fmt.Println(" Invalid email format.") + } + + for { + cfg.Admin.Password = promptPassword("Admin Password") + // SECURITY: Match Web API requirement of 8 characters minimum + if len(cfg.Admin.Password) < 8 { + fmt.Println(" Password must be at least 8 characters") + continue + } + if len(cfg.Admin.Password) > 128 { + fmt.Println(" Password must be at most 128 characters") + continue + } + confirm := promptPassword("Confirm Password") + if cfg.Admin.Password != confirm { + fmt.Println(" Passwords do not match") + continue + } + break + } + + // Server configuration with validation + fmt.Println() + fmt.Println("── Server Configuration ──") + + for { + cfg.Server.Port = promptInt(reader, "Server Port", 8080) + if cliValidatePort(cfg.Server.Port) { + break + } + fmt.Println(" Invalid port. Must be between 1 and 65535.") + } + + // Confirm and install + fmt.Println() + fmt.Println("── Configuration Summary ──") + fmt.Printf("Database: %s@%s:%d/%s\n", cfg.Database.User, cfg.Database.Host, cfg.Database.Port, cfg.Database.DBName) + fmt.Printf("Redis: %s:%d\n", cfg.Redis.Host, cfg.Redis.Port) + fmt.Printf("Admin: %s\n", cfg.Admin.Email) + fmt.Printf("Server: :%d\n", cfg.Server.Port) + fmt.Println() + + if !promptConfirm(reader, "Proceed with installation?") { + fmt.Println("Installation cancelled") + return nil + } + + fmt.Println() + fmt.Print("Installing... ") + if err := Install(cfg); err != nil { + fmt.Println("FAILED") + return err + } + fmt.Println("OK") + + fmt.Println() + fmt.Println("╔═══════════════════════════════════════════╗") + fmt.Println("║ Installation Complete! ║") + fmt.Println("╚═══════════════════════════════════════════╝") + fmt.Println() + fmt.Println("Start the server with:") + fmt.Println(" ./sub2api") + fmt.Println() + fmt.Printf("Admin panel: http://localhost:%d\n", cfg.Server.Port) + fmt.Println() + + return nil +} + +func promptString(reader *bufio.Reader, prompt, defaultVal string) string { + if defaultVal != "" { + fmt.Printf(" %s [%s]: ", prompt, defaultVal) + } else { + fmt.Printf(" %s: ", prompt) + } + + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(input) + + if input == "" { + return defaultVal + } + return input +} + +func promptInt(reader *bufio.Reader, prompt string, defaultVal int) int { + fmt.Printf(" %s [%d]: ", prompt, defaultVal) + + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(input) + + if input == "" { + return defaultVal + } + + val, err := strconv.Atoi(input) + if err != nil { + return defaultVal + } + return val +} + +func promptPassword(prompt string) string { + fmt.Printf(" %s: ", prompt) + + // Try to read password without echo + if term.IsTerminal(int(os.Stdin.Fd())) { + password, err := term.ReadPassword(int(os.Stdin.Fd())) + fmt.Println() + if err == nil { + return string(password) + } + } + + // Fallback to regular input + reader := bufio.NewReader(os.Stdin) + input, _ := reader.ReadString('\n') + return strings.TrimSpace(input) +} + +func promptConfirm(reader *bufio.Reader, prompt string) bool { + fmt.Printf("%s [y/N]: ", prompt) + input, _ := reader.ReadString('\n') + input = strings.TrimSpace(strings.ToLower(input)) + return input == "y" || input == "yes" +} diff --git a/backend/internal/setup/handler.go b/backend/internal/setup/handler.go new file mode 100644 index 00000000..1c613dfd --- /dev/null +++ b/backend/internal/setup/handler.go @@ -0,0 +1,354 @@ +package setup + +import ( + "fmt" + "net/http" + "net/mail" + "regexp" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/pkg/sysutil" + + "github.com/gin-gonic/gin" +) + +// installMutex prevents concurrent installation attempts (TOCTOU protection) +var installMutex sync.Mutex + +// RegisterRoutes registers setup wizard routes +func RegisterRoutes(r *gin.Engine) { + setup := r.Group("/setup") + { + // Status endpoint is always accessible (read-only) + setup.GET("/status", getStatus) + + // All modification endpoints are protected by setupGuard + protected := setup.Group("") + protected.Use(setupGuard()) + { + protected.POST("/test-db", testDatabase) + protected.POST("/test-redis", testRedis) + protected.POST("/install", install) + } + } +} + +// SetupStatus represents the current setup state +type SetupStatus struct { + NeedsSetup bool `json:"needs_setup"` + Step string `json:"step"` +} + +// getStatus returns the current setup status +func getStatus(c *gin.Context) { + response.Success(c, SetupStatus{ + NeedsSetup: NeedsSetup(), + Step: "welcome", + }) +} + +// setupGuard middleware ensures setup endpoints are only accessible during setup mode +func setupGuard() gin.HandlerFunc { + return func(c *gin.Context) { + if !NeedsSetup() { + response.Error(c, http.StatusForbidden, "Setup is not allowed: system is already installed") + c.Abort() + return + } + c.Next() + } +} + +// validateHostname checks if a hostname/IP is safe (no injection characters) +func validateHostname(host string) bool { + // Allow only alphanumeric, dots, hyphens, and colons (for IPv6) + validHost := regexp.MustCompile(`^[a-zA-Z0-9.\-:]+$`) + return validHost.MatchString(host) && len(host) <= 253 +} + +// validateDBName checks if database name is safe +func validateDBName(name string) bool { + // Allow only alphanumeric and underscores, starting with letter + validName := regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9_]*$`) + return validName.MatchString(name) && len(name) <= 63 +} + +// validateUsername checks if username is safe +func validateUsername(name string) bool { + // Allow only alphanumeric and underscores + validName := regexp.MustCompile(`^[a-zA-Z0-9_]+$`) + return validName.MatchString(name) && len(name) <= 63 +} + +// validateEmail checks if email format is valid +func validateEmail(email string) bool { + _, err := mail.ParseAddress(email) + return err == nil && len(email) <= 254 +} + +// validatePassword checks password strength +func validatePassword(password string) error { + if len(password) < 8 { + return fmt.Errorf("password must be at least 8 characters") + } + if len(password) > 128 { + return fmt.Errorf("password must be at most 128 characters") + } + return nil +} + +// validatePort checks if port is in valid range +func validatePort(port int) bool { + return port > 0 && port <= 65535 +} + +// validateSSLMode checks if SSL mode is valid +func validateSSLMode(mode string) bool { + validModes := map[string]bool{ + "disable": true, "require": true, "verify-ca": true, "verify-full": true, + } + return validModes[mode] +} + +// TestDatabaseRequest represents database test request +type TestDatabaseRequest struct { + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required"` + User string `json:"user" binding:"required"` + Password string `json:"password"` + DBName string `json:"dbname" binding:"required"` + SSLMode string `json:"sslmode"` +} + +// testDatabase tests database connection +func testDatabase(c *gin.Context) { + var req TestDatabaseRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.Error(c, http.StatusBadRequest, "Invalid request: "+err.Error()) + return + } + + // Security: Validate all inputs to prevent injection attacks + if !validateHostname(req.Host) { + response.Error(c, http.StatusBadRequest, "Invalid hostname format") + return + } + if !validatePort(req.Port) { + response.Error(c, http.StatusBadRequest, "Invalid port number") + return + } + if !validateUsername(req.User) { + response.Error(c, http.StatusBadRequest, "Invalid username format") + return + } + if !validateDBName(req.DBName) { + response.Error(c, http.StatusBadRequest, "Invalid database name format") + return + } + + if req.SSLMode == "" { + req.SSLMode = "disable" + } + if !validateSSLMode(req.SSLMode) { + response.Error(c, http.StatusBadRequest, "Invalid SSL mode") + return + } + + cfg := &DatabaseConfig{ + Host: req.Host, + Port: req.Port, + User: req.User, + Password: req.Password, + DBName: req.DBName, + SSLMode: req.SSLMode, + } + + if err := TestDatabaseConnection(cfg); err != nil { + response.Error(c, http.StatusBadRequest, "Connection failed: "+err.Error()) + return + } + + response.Success(c, gin.H{"message": "Connection successful"}) +} + +// TestRedisRequest represents Redis test request +type TestRedisRequest struct { + Host string `json:"host" binding:"required"` + Port int `json:"port" binding:"required"` + Password string `json:"password"` + DB int `json:"db"` +} + +// testRedis tests Redis connection +func testRedis(c *gin.Context) { + var req TestRedisRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.Error(c, http.StatusBadRequest, "Invalid request: "+err.Error()) + return + } + + // Security: Validate inputs + if !validateHostname(req.Host) { + response.Error(c, http.StatusBadRequest, "Invalid hostname format") + return + } + if !validatePort(req.Port) { + response.Error(c, http.StatusBadRequest, "Invalid port number") + return + } + if req.DB < 0 || req.DB > 15 { + response.Error(c, http.StatusBadRequest, "Invalid Redis database number (0-15)") + return + } + + cfg := &RedisConfig{ + Host: req.Host, + Port: req.Port, + Password: req.Password, + DB: req.DB, + } + + if err := TestRedisConnection(cfg); err != nil { + response.Error(c, http.StatusBadRequest, "Connection failed: "+err.Error()) + return + } + + response.Success(c, gin.H{"message": "Connection successful"}) +} + +// InstallRequest represents installation request +type InstallRequest struct { + Database DatabaseConfig `json:"database" binding:"required"` + Redis RedisConfig `json:"redis" binding:"required"` + Admin AdminConfig `json:"admin" binding:"required"` + Server ServerConfig `json:"server"` +} + +// install performs the installation +func install(c *gin.Context) { + // TOCTOU Protection: Acquire mutex to prevent concurrent installation + installMutex.Lock() + defer installMutex.Unlock() + + // Double-check after acquiring lock + if !NeedsSetup() { + response.Error(c, http.StatusForbidden, "Setup is not allowed: system is already installed") + return + } + + var req InstallRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.Error(c, http.StatusBadRequest, "Invalid request: "+err.Error()) + return + } + + // ========== COMPREHENSIVE INPUT VALIDATION ========== + // Database validation + if !validateHostname(req.Database.Host) { + response.Error(c, http.StatusBadRequest, "Invalid database hostname") + return + } + if !validatePort(req.Database.Port) { + response.Error(c, http.StatusBadRequest, "Invalid database port") + return + } + if !validateUsername(req.Database.User) { + response.Error(c, http.StatusBadRequest, "Invalid database username") + return + } + if !validateDBName(req.Database.DBName) { + response.Error(c, http.StatusBadRequest, "Invalid database name") + return + } + + // Redis validation + if !validateHostname(req.Redis.Host) { + response.Error(c, http.StatusBadRequest, "Invalid Redis hostname") + return + } + if !validatePort(req.Redis.Port) { + response.Error(c, http.StatusBadRequest, "Invalid Redis port") + return + } + if req.Redis.DB < 0 || req.Redis.DB > 15 { + response.Error(c, http.StatusBadRequest, "Invalid Redis database number") + return + } + + // Admin validation + if !validateEmail(req.Admin.Email) { + response.Error(c, http.StatusBadRequest, "Invalid admin email format") + return + } + if err := validatePassword(req.Admin.Password); err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + + // Server validation + if req.Server.Port != 0 && !validatePort(req.Server.Port) { + response.Error(c, http.StatusBadRequest, "Invalid server port") + return + } + + // ========== SET DEFAULTS ========== + if req.Database.SSLMode == "" { + req.Database.SSLMode = "disable" + } + if !validateSSLMode(req.Database.SSLMode) { + response.Error(c, http.StatusBadRequest, "Invalid SSL mode") + return + } + if req.Server.Host == "" { + req.Server.Host = "0.0.0.0" + } + if req.Server.Port == 0 { + req.Server.Port = 8080 + } + if req.Server.Mode == "" { + req.Server.Mode = "release" + } + // Validate server mode + if req.Server.Mode != "release" && req.Server.Mode != "debug" { + response.Error(c, http.StatusBadRequest, "Invalid server mode (must be 'release' or 'debug')") + return + } + + // Trim whitespace from string inputs + req.Admin.Email = strings.TrimSpace(req.Admin.Email) + req.Database.Host = strings.TrimSpace(req.Database.Host) + req.Database.User = strings.TrimSpace(req.Database.User) + req.Database.DBName = strings.TrimSpace(req.Database.DBName) + req.Redis.Host = strings.TrimSpace(req.Redis.Host) + + cfg := &SetupConfig{ + Database: req.Database, + Redis: req.Redis, + Admin: req.Admin, + Server: req.Server, + JWT: JWTConfig{ + ExpireHour: 24, + }, + } + + if err := Install(cfg); err != nil { + response.Error(c, http.StatusInternalServerError, "Installation failed: "+err.Error()) + return + } + + // Schedule service restart in background after sending response + // This ensures the client receives the success response before the service restarts + go func() { + // Wait a moment to ensure the response is sent + time.Sleep(500 * time.Millisecond) + sysutil.RestartServiceAsync() + }() + + response.Success(c, gin.H{ + "message": "Installation completed successfully. Service will restart automatically.", + "restart": true, + }) +} diff --git a/backend/internal/setup/setup.go b/backend/internal/setup/setup.go new file mode 100644 index 00000000..65118161 --- /dev/null +++ b/backend/internal/setup/setup.go @@ -0,0 +1,573 @@ +package setup + +import ( + "context" + "crypto/rand" + "database/sql" + "encoding/hex" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/repository" + "github.com/Wei-Shaw/sub2api/internal/service" + + _ "github.com/lib/pq" + "github.com/redis/go-redis/v9" + "gopkg.in/yaml.v3" +) + +// Config paths +const ( + ConfigFileName = "config.yaml" + InstallLockFile = ".installed" +) + +// GetDataDir returns the data directory for storing config and lock files. +// Priority: DATA_DIR env > /app/data (if exists and writable) > current directory +func GetDataDir() string { + // Check DATA_DIR environment variable first + if dir := os.Getenv("DATA_DIR"); dir != "" { + return dir + } + + // Check if /app/data exists and is writable (Docker environment) + dockerDataDir := "/app/data" + if info, err := os.Stat(dockerDataDir); err == nil && info.IsDir() { + // Try to check if writable by creating a temp file + testFile := dockerDataDir + "/.write_test" + if f, err := os.Create(testFile); err == nil { + _ = f.Close() + _ = os.Remove(testFile) + return dockerDataDir + } + } + + // Default to current directory + return "." +} + +// GetConfigFilePath returns the full path to config.yaml +func GetConfigFilePath() string { + return GetDataDir() + "/" + ConfigFileName +} + +// GetInstallLockPath returns the full path to .installed lock file +func GetInstallLockPath() string { + return GetDataDir() + "/" + InstallLockFile +} + +// SetupConfig holds the setup configuration +type SetupConfig struct { + Database DatabaseConfig `json:"database" yaml:"database"` + Redis RedisConfig `json:"redis" yaml:"redis"` + Admin AdminConfig `json:"admin" yaml:"-"` // Not stored in config file + Server ServerConfig `json:"server" yaml:"server"` + JWT JWTConfig `json:"jwt" yaml:"jwt"` + Timezone string `json:"timezone" yaml:"timezone"` // e.g. "Asia/Shanghai", "UTC" +} + +type DatabaseConfig struct { + Host string `json:"host" yaml:"host"` + Port int `json:"port" yaml:"port"` + User string `json:"user" yaml:"user"` + Password string `json:"password" yaml:"password"` + DBName string `json:"dbname" yaml:"dbname"` + SSLMode string `json:"sslmode" yaml:"sslmode"` +} + +type RedisConfig struct { + Host string `json:"host" yaml:"host"` + Port int `json:"port" yaml:"port"` + Password string `json:"password" yaml:"password"` + DB int `json:"db" yaml:"db"` +} + +type AdminConfig struct { + Email string `json:"email"` + Password string `json:"password"` +} + +type ServerConfig struct { + Host string `json:"host" yaml:"host"` + Port int `json:"port" yaml:"port"` + Mode string `json:"mode" yaml:"mode"` +} + +type JWTConfig struct { + Secret string `json:"secret" yaml:"secret"` + ExpireHour int `json:"expire_hour" yaml:"expire_hour"` +} + +// NeedsSetup checks if the system needs initial setup +// Uses multiple checks to prevent attackers from forcing re-setup by deleting config +func NeedsSetup() bool { + // Check 1: Config file must not exist + if _, err := os.Stat(GetConfigFilePath()); !os.IsNotExist(err) { + return false // Config exists, no setup needed + } + + // Check 2: Installation lock file (harder to bypass) + if _, err := os.Stat(GetInstallLockPath()); !os.IsNotExist(err) { + return false // Lock file exists, already installed + } + + return true +} + +// TestDatabaseConnection tests the database connection and creates database if not exists +func TestDatabaseConnection(cfg *DatabaseConfig) error { + // First, connect to the default 'postgres' database to check/create target database + defaultDSN := fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=postgres sslmode=%s", + cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.SSLMode, + ) + + db, err := sql.Open("postgres", defaultDSN) + if err != nil { + return fmt.Errorf("failed to connect to PostgreSQL: %w", err) + } + + defer func() { + if db == nil { + return + } + if err := db.Close(); err != nil { + log.Printf("failed to close postgres connection: %v", err) + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := db.PingContext(ctx); err != nil { + return fmt.Errorf("ping failed: %w", err) + } + + // Check if target database exists + var exists bool + row := db.QueryRowContext(ctx, "SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = $1)", cfg.DBName) + if err := row.Scan(&exists); err != nil { + return fmt.Errorf("failed to check database existence: %w", err) + } + + // Create database if not exists + if !exists { + // 注意:数据库名不能参数化,依赖前置输入校验保障安全。 + // Note: Database names cannot be parameterized, but we've already validated cfg.DBName + // in the handler using validateDBName() which only allows [a-zA-Z][a-zA-Z0-9_]* + _, err := db.ExecContext(ctx, fmt.Sprintf("CREATE DATABASE %s", cfg.DBName)) + if err != nil { + return fmt.Errorf("failed to create database '%s': %w", cfg.DBName, err) + } + log.Printf("Database '%s' created successfully", cfg.DBName) + } + + // Now connect to the target database to verify + if err := db.Close(); err != nil { + log.Printf("failed to close postgres connection: %v", err) + } + db = nil + + targetDSN := fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", + cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.DBName, cfg.SSLMode, + ) + + targetDB, err := sql.Open("postgres", targetDSN) + if err != nil { + return fmt.Errorf("failed to connect to database '%s': %w", cfg.DBName, err) + } + + defer func() { + if err := targetDB.Close(); err != nil { + log.Printf("failed to close postgres connection: %v", err) + } + }() + + ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel2() + + if err := targetDB.PingContext(ctx2); err != nil { + return fmt.Errorf("ping target database failed: %w", err) + } + + return nil +} + +// TestRedisConnection tests the Redis connection +func TestRedisConnection(cfg *RedisConfig) error { + rdb := redis.NewClient(&redis.Options{ + Addr: fmt.Sprintf("%s:%d", cfg.Host, cfg.Port), + Password: cfg.Password, + DB: cfg.DB, + }) + defer func() { + if err := rdb.Close(); err != nil { + log.Printf("failed to close redis client: %v", err) + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := rdb.Ping(ctx).Err(); err != nil { + return fmt.Errorf("ping failed: %w", err) + } + + return nil +} + +// Install performs the installation with the given configuration +func Install(cfg *SetupConfig) error { + // Security check: prevent re-installation if already installed + if !NeedsSetup() { + return fmt.Errorf("system is already installed, re-installation is not allowed") + } + + // Generate JWT secret if not provided + if cfg.JWT.Secret == "" { + secret, err := generateSecret(32) + if err != nil { + return fmt.Errorf("failed to generate jwt secret: %w", err) + } + cfg.JWT.Secret = secret + log.Println("Warning: JWT secret auto-generated. Consider setting a fixed secret for production.") + } + + // Test connections + if err := TestDatabaseConnection(&cfg.Database); err != nil { + return fmt.Errorf("database connection failed: %w", err) + } + + if err := TestRedisConnection(&cfg.Redis); err != nil { + return fmt.Errorf("redis connection failed: %w", err) + } + + // Initialize database + if err := initializeDatabase(cfg); err != nil { + return fmt.Errorf("database initialization failed: %w", err) + } + + // Create admin user + if err := createAdminUser(cfg); err != nil { + return fmt.Errorf("admin user creation failed: %w", err) + } + + // Write config file + if err := writeConfigFile(cfg); err != nil { + return fmt.Errorf("config file creation failed: %w", err) + } + + // Create installation lock file to prevent re-setup attacks + if err := createInstallLock(); err != nil { + return fmt.Errorf("failed to create install lock: %w", err) + } + + return nil +} + +// createInstallLock creates a lock file to prevent re-installation attacks +func createInstallLock() error { + content := fmt.Sprintf("installed_at=%s\n", time.Now().UTC().Format(time.RFC3339)) + return os.WriteFile(GetInstallLockPath(), []byte(content), 0400) // Read-only for owner +} + +func initializeDatabase(cfg *SetupConfig) error { + dsn := fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", + cfg.Database.Host, cfg.Database.Port, cfg.Database.User, + cfg.Database.Password, cfg.Database.DBName, cfg.Database.SSLMode, + ) + + db, err := sql.Open("postgres", dsn) + if err != nil { + return err + } + + defer func() { + if err := db.Close(); err != nil { + log.Printf("failed to close postgres connection: %v", err) + } + }() + + migrationCtx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + return repository.ApplyMigrations(migrationCtx, db) +} + +func createAdminUser(cfg *SetupConfig) error { + dsn := fmt.Sprintf( + "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", + cfg.Database.Host, cfg.Database.Port, cfg.Database.User, + cfg.Database.Password, cfg.Database.DBName, cfg.Database.SSLMode, + ) + + db, err := sql.Open("postgres", dsn) + if err != nil { + return err + } + + defer func() { + if err := db.Close(); err != nil { + log.Printf("failed to close postgres connection: %v", err) + } + }() + + // 使用超时上下文避免安装流程因数据库异常而长时间阻塞。 + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // Check if admin already exists + var count int64 + if err := db.QueryRowContext(ctx, "SELECT COUNT(1) FROM users WHERE role = $1", service.RoleAdmin).Scan(&count); err != nil { + return err + } + if count > 0 { + return nil // Admin already exists + } + + admin := &service.User{ + Email: cfg.Admin.Email, + Role: service.RoleAdmin, + Status: service.StatusActive, + Balance: 0, + Concurrency: 5, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + if err := admin.SetPassword(cfg.Admin.Password); err != nil { + return err + } + + _, err = db.ExecContext( + ctx, + `INSERT INTO users (email, password_hash, role, balance, concurrency, status, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, + admin.Email, + admin.PasswordHash, + admin.Role, + admin.Balance, + admin.Concurrency, + admin.Status, + admin.CreatedAt, + admin.UpdatedAt, + ) + return err +} + +func writeConfigFile(cfg *SetupConfig) error { + // Ensure timezone has a default value + tz := cfg.Timezone + if tz == "" { + tz = "Asia/Shanghai" + } + + // Prepare config for YAML (exclude sensitive data and admin config) + yamlConfig := struct { + Server ServerConfig `yaml:"server"` + Database DatabaseConfig `yaml:"database"` + Redis RedisConfig `yaml:"redis"` + JWT struct { + Secret string `yaml:"secret"` + ExpireHour int `yaml:"expire_hour"` + } `yaml:"jwt"` + Default struct { + UserConcurrency int `yaml:"user_concurrency"` + UserBalance float64 `yaml:"user_balance"` + APIKeyPrefix string `yaml:"api_key_prefix"` + RateMultiplier float64 `yaml:"rate_multiplier"` + } `yaml:"default"` + RateLimit struct { + RequestsPerMinute int `yaml:"requests_per_minute"` + BurstSize int `yaml:"burst_size"` + } `yaml:"rate_limit"` + Timezone string `yaml:"timezone"` + }{ + Server: cfg.Server, + Database: cfg.Database, + Redis: cfg.Redis, + JWT: struct { + Secret string `yaml:"secret"` + ExpireHour int `yaml:"expire_hour"` + }{ + Secret: cfg.JWT.Secret, + ExpireHour: cfg.JWT.ExpireHour, + }, + Default: struct { + UserConcurrency int `yaml:"user_concurrency"` + UserBalance float64 `yaml:"user_balance"` + APIKeyPrefix string `yaml:"api_key_prefix"` + RateMultiplier float64 `yaml:"rate_multiplier"` + }{ + UserConcurrency: 5, + UserBalance: 0, + APIKeyPrefix: "sk-", + RateMultiplier: 1.0, + }, + RateLimit: struct { + RequestsPerMinute int `yaml:"requests_per_minute"` + BurstSize int `yaml:"burst_size"` + }{ + RequestsPerMinute: 60, + BurstSize: 10, + }, + Timezone: tz, + } + + data, err := yaml.Marshal(&yamlConfig) + if err != nil { + return err + } + + return os.WriteFile(GetConfigFilePath(), data, 0600) +} + +func generateSecret(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} + +// ============================================================================= +// Auto Setup for Docker Deployment +// ============================================================================= + +// AutoSetupEnabled checks if auto setup is enabled via environment variable +func AutoSetupEnabled() bool { + val := os.Getenv("AUTO_SETUP") + return val == "true" || val == "1" || val == "yes" +} + +// getEnvOrDefault gets environment variable or returns default value +func getEnvOrDefault(key, defaultValue string) string { + if val := os.Getenv(key); val != "" { + return val + } + return defaultValue +} + +// getEnvIntOrDefault gets environment variable as int or returns default value +func getEnvIntOrDefault(key string, defaultValue int) int { + if val := os.Getenv(key); val != "" { + if i, err := strconv.Atoi(val); err == nil { + return i + } + } + return defaultValue +} + +// AutoSetupFromEnv performs automatic setup using environment variables +// This is designed for Docker deployment where all config is passed via env vars +func AutoSetupFromEnv() error { + log.Println("Auto setup enabled, configuring from environment variables...") + log.Printf("Data directory: %s", GetDataDir()) + + // Get timezone from TZ or TIMEZONE env var (TZ is standard for Docker) + tz := getEnvOrDefault("TZ", "") + if tz == "" { + tz = getEnvOrDefault("TIMEZONE", "Asia/Shanghai") + } + + // Build config from environment variables + cfg := &SetupConfig{ + Database: DatabaseConfig{ + Host: getEnvOrDefault("DATABASE_HOST", "localhost"), + Port: getEnvIntOrDefault("DATABASE_PORT", 5432), + User: getEnvOrDefault("DATABASE_USER", "postgres"), + Password: getEnvOrDefault("DATABASE_PASSWORD", ""), + DBName: getEnvOrDefault("DATABASE_DBNAME", "sub2api"), + SSLMode: getEnvOrDefault("DATABASE_SSLMODE", "disable"), + }, + Redis: RedisConfig{ + Host: getEnvOrDefault("REDIS_HOST", "localhost"), + Port: getEnvIntOrDefault("REDIS_PORT", 6379), + Password: getEnvOrDefault("REDIS_PASSWORD", ""), + DB: getEnvIntOrDefault("REDIS_DB", 0), + }, + Admin: AdminConfig{ + Email: getEnvOrDefault("ADMIN_EMAIL", "admin@sub2api.local"), + Password: getEnvOrDefault("ADMIN_PASSWORD", ""), + }, + Server: ServerConfig{ + Host: getEnvOrDefault("SERVER_HOST", "0.0.0.0"), + Port: getEnvIntOrDefault("SERVER_PORT", 8080), + Mode: getEnvOrDefault("SERVER_MODE", "release"), + }, + JWT: JWTConfig{ + Secret: getEnvOrDefault("JWT_SECRET", ""), + ExpireHour: getEnvIntOrDefault("JWT_EXPIRE_HOUR", 24), + }, + Timezone: tz, + } + + // Generate JWT secret if not provided + if cfg.JWT.Secret == "" { + secret, err := generateSecret(32) + if err != nil { + return fmt.Errorf("failed to generate jwt secret: %w", err) + } + cfg.JWT.Secret = secret + log.Println("Warning: JWT secret auto-generated. Consider setting a fixed secret for production.") + } + + // Generate admin password if not provided + if cfg.Admin.Password == "" { + password, err := generateSecret(16) + if err != nil { + return fmt.Errorf("failed to generate admin password: %w", err) + } + cfg.Admin.Password = password + fmt.Printf("Generated admin password (one-time): %s\n", cfg.Admin.Password) + fmt.Println("IMPORTANT: Save this password! It will not be shown again.") + } + + // Test database connection + log.Println("Testing database connection...") + if err := TestDatabaseConnection(&cfg.Database); err != nil { + return fmt.Errorf("database connection failed: %w", err) + } + log.Println("Database connection successful") + + // Test Redis connection + log.Println("Testing Redis connection...") + if err := TestRedisConnection(&cfg.Redis); err != nil { + return fmt.Errorf("redis connection failed: %w", err) + } + log.Println("Redis connection successful") + + // Initialize database + log.Println("Initializing database...") + if err := initializeDatabase(cfg); err != nil { + return fmt.Errorf("database initialization failed: %w", err) + } + log.Println("Database initialized successfully") + + // Create admin user + log.Println("Creating admin user...") + if err := createAdminUser(cfg); err != nil { + return fmt.Errorf("admin user creation failed: %w", err) + } + log.Printf("Admin user created: %s", cfg.Admin.Email) + + // Write config file + log.Println("Writing configuration file...") + if err := writeConfigFile(cfg); err != nil { + return fmt.Errorf("config file creation failed: %w", err) + } + log.Println("Configuration file created") + + // Create installation lock file + if err := createInstallLock(); err != nil { + return fmt.Errorf("failed to create install lock: %w", err) + } + log.Println("Installation lock created") + + log.Println("Auto setup completed successfully!") + return nil +} diff --git a/backend/internal/util/logredact/redact.go b/backend/internal/util/logredact/redact.go new file mode 100644 index 00000000..b2d2429f --- /dev/null +++ b/backend/internal/util/logredact/redact.go @@ -0,0 +1,100 @@ +package logredact + +import ( + "encoding/json" + "strings" +) + +// maxRedactDepth 限制递归深度以防止栈溢出 +const maxRedactDepth = 32 + +var defaultSensitiveKeys = map[string]struct{}{ + "authorization_code": {}, + "code": {}, + "code_verifier": {}, + "access_token": {}, + "refresh_token": {}, + "id_token": {}, + "client_secret": {}, + "password": {}, +} + +func RedactMap(input map[string]any, extraKeys ...string) map[string]any { + if input == nil { + return map[string]any{} + } + keys := buildKeySet(extraKeys) + redacted, ok := redactValueWithDepth(input, keys, 0).(map[string]any) + if !ok { + return map[string]any{} + } + return redacted +} + +func RedactJSON(raw []byte, extraKeys ...string) string { + if len(raw) == 0 { + return "" + } + var value any + if err := json.Unmarshal(raw, &value); err != nil { + return "" + } + keys := buildKeySet(extraKeys) + redacted := redactValueWithDepth(value, keys, 0) + encoded, err := json.Marshal(redacted) + if err != nil { + return "" + } + return string(encoded) +} + +func buildKeySet(extraKeys []string) map[string]struct{} { + keys := make(map[string]struct{}, len(defaultSensitiveKeys)+len(extraKeys)) + for k := range defaultSensitiveKeys { + keys[k] = struct{}{} + } + for _, key := range extraKeys { + normalized := normalizeKey(key) + if normalized == "" { + continue + } + keys[normalized] = struct{}{} + } + return keys +} + +func redactValueWithDepth(value any, keys map[string]struct{}, depth int) any { + if depth > maxRedactDepth { + return "" + } + + switch v := value.(type) { + case map[string]any: + out := make(map[string]any, len(v)) + for k, val := range v { + if isSensitiveKey(k, keys) { + out[k] = "***" + continue + } + out[k] = redactValueWithDepth(val, keys, depth+1) + } + return out + case []any: + out := make([]any, len(v)) + for i, item := range v { + out[i] = redactValueWithDepth(item, keys, depth+1) + } + return out + default: + return value + } +} + +func isSensitiveKey(key string, keys map[string]struct{}) bool { + _, ok := keys[normalizeKey(key)] + return ok +} + +func normalizeKey(key string) string { + return strings.ToLower(strings.TrimSpace(key)) +} diff --git a/backend/internal/util/responseheaders/responseheaders.go b/backend/internal/util/responseheaders/responseheaders.go new file mode 100644 index 00000000..86c3f624 --- /dev/null +++ b/backend/internal/util/responseheaders/responseheaders.go @@ -0,0 +1,99 @@ +package responseheaders + +import ( + "net/http" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +// defaultAllowed 定义允许透传的响应头白名单 +// 注意:以下头部由 Go HTTP 包自动处理,不应手动设置: +// - content-length: 由 ResponseWriter 根据实际写入数据自动设置 +// - transfer-encoding: 由 HTTP 库根据需要自动添加/移除 +// - connection: 由 HTTP 库管理连接复用 +var defaultAllowed = map[string]struct{}{ + "content-type": {}, + "content-encoding": {}, + "content-language": {}, + "cache-control": {}, + "etag": {}, + "last-modified": {}, + "expires": {}, + "vary": {}, + "date": {}, + "x-request-id": {}, + "x-ratelimit-limit-requests": {}, + "x-ratelimit-limit-tokens": {}, + "x-ratelimit-remaining-requests": {}, + "x-ratelimit-remaining-tokens": {}, + "x-ratelimit-reset-requests": {}, + "x-ratelimit-reset-tokens": {}, + "retry-after": {}, + "location": {}, + "www-authenticate": {}, +} + +// hopByHopHeaders 是跳过的 hop-by-hop 头部,这些头部由 HTTP 库自动处理 +var hopByHopHeaders = map[string]struct{}{ + "content-length": {}, + "transfer-encoding": {}, + "connection": {}, +} + +func FilterHeaders(src http.Header, cfg config.ResponseHeaderConfig) http.Header { + allowed := make(map[string]struct{}, len(defaultAllowed)+len(cfg.AdditionalAllowed)) + for key := range defaultAllowed { + allowed[key] = struct{}{} + } + // 关闭时只使用默认白名单,additional/force_remove 不生效 + if cfg.Enabled { + for _, key := range cfg.AdditionalAllowed { + normalized := strings.ToLower(strings.TrimSpace(key)) + if normalized == "" { + continue + } + allowed[normalized] = struct{}{} + } + } + + forceRemove := map[string]struct{}{} + if cfg.Enabled { + forceRemove = make(map[string]struct{}, len(cfg.ForceRemove)) + for _, key := range cfg.ForceRemove { + normalized := strings.ToLower(strings.TrimSpace(key)) + if normalized == "" { + continue + } + forceRemove[normalized] = struct{}{} + } + } + + filtered := make(http.Header, len(src)) + for key, values := range src { + lower := strings.ToLower(key) + if _, blocked := forceRemove[lower]; blocked { + continue + } + if _, ok := allowed[lower]; !ok { + continue + } + // 跳过 hop-by-hop 头部,这些由 HTTP 库自动处理 + if _, isHopByHop := hopByHopHeaders[lower]; isHopByHop { + continue + } + for _, value := range values { + filtered.Add(key, value) + } + } + return filtered +} + +func WriteFilteredHeaders(dst http.Header, src http.Header, cfg config.ResponseHeaderConfig) { + filtered := FilterHeaders(src, cfg) + for key, values := range filtered { + for _, value := range values { + dst.Add(key, value) + } + } +} diff --git a/backend/internal/util/responseheaders/responseheaders_test.go b/backend/internal/util/responseheaders/responseheaders_test.go new file mode 100644 index 00000000..f7343267 --- /dev/null +++ b/backend/internal/util/responseheaders/responseheaders_test.go @@ -0,0 +1,67 @@ +package responseheaders + +import ( + "net/http" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +func TestFilterHeadersDisabledUsesDefaultAllowlist(t *testing.T) { + src := http.Header{} + src.Add("Content-Type", "application/json") + src.Add("X-Request-Id", "req-123") + src.Add("X-Test", "ok") + src.Add("Connection", "keep-alive") + src.Add("Content-Length", "123") + + cfg := config.ResponseHeaderConfig{ + Enabled: false, + ForceRemove: []string{"x-request-id"}, + } + + filtered := FilterHeaders(src, cfg) + if filtered.Get("Content-Type") != "application/json" { + t.Fatalf("expected Content-Type passthrough, got %q", filtered.Get("Content-Type")) + } + if filtered.Get("X-Request-Id") != "req-123" { + t.Fatalf("expected X-Request-Id allowed, got %q", filtered.Get("X-Request-Id")) + } + if filtered.Get("X-Test") != "" { + t.Fatalf("expected X-Test removed, got %q", filtered.Get("X-Test")) + } + if filtered.Get("Connection") != "" { + t.Fatalf("expected Connection to be removed, got %q", filtered.Get("Connection")) + } + if filtered.Get("Content-Length") != "" { + t.Fatalf("expected Content-Length to be removed, got %q", filtered.Get("Content-Length")) + } +} + +func TestFilterHeadersEnabledUsesAllowlist(t *testing.T) { + src := http.Header{} + src.Add("Content-Type", "application/json") + src.Add("X-Extra", "ok") + src.Add("X-Remove", "nope") + src.Add("X-Blocked", "nope") + + cfg := config.ResponseHeaderConfig{ + Enabled: true, + AdditionalAllowed: []string{"x-extra"}, + ForceRemove: []string{"x-remove"}, + } + + filtered := FilterHeaders(src, cfg) + if filtered.Get("Content-Type") != "application/json" { + t.Fatalf("expected Content-Type allowed, got %q", filtered.Get("Content-Type")) + } + if filtered.Get("X-Extra") != "ok" { + t.Fatalf("expected X-Extra allowed, got %q", filtered.Get("X-Extra")) + } + if filtered.Get("X-Remove") != "" { + t.Fatalf("expected X-Remove removed, got %q", filtered.Get("X-Remove")) + } + if filtered.Get("X-Blocked") != "" { + t.Fatalf("expected X-Blocked removed, got %q", filtered.Get("X-Blocked")) + } +} diff --git a/backend/internal/util/urlvalidator/validator.go b/backend/internal/util/urlvalidator/validator.go new file mode 100644 index 00000000..56a888b9 --- /dev/null +++ b/backend/internal/util/urlvalidator/validator.go @@ -0,0 +1,154 @@ +package urlvalidator + +import ( + "context" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + "time" +) + +type ValidationOptions struct { + AllowedHosts []string + RequireAllowlist bool + AllowPrivate bool +} + +func ValidateURLFormat(raw string, allowInsecureHTTP bool) (string, error) { + // 最小格式校验:仅保证 URL 可解析且 scheme 合规,不做白名单/私网/SSRF 校验 + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return "", errors.New("url is required") + } + + parsed, err := url.Parse(trimmed) + if err != nil || parsed.Scheme == "" || parsed.Host == "" { + return "", fmt.Errorf("invalid url: %s", trimmed) + } + + scheme := strings.ToLower(parsed.Scheme) + if scheme != "https" && (!allowInsecureHTTP || scheme != "http") { + return "", fmt.Errorf("invalid url scheme: %s", parsed.Scheme) + } + + host := strings.TrimSpace(parsed.Hostname()) + if host == "" { + return "", errors.New("invalid host") + } + + if port := parsed.Port(); port != "" { + num, err := strconv.Atoi(port) + if err != nil || num <= 0 || num > 65535 { + return "", fmt.Errorf("invalid port: %s", port) + } + } + + return trimmed, nil +} + +func ValidateHTTPSURL(raw string, opts ValidationOptions) (string, error) { + trimmed := strings.TrimSpace(raw) + if trimmed == "" { + return "", errors.New("url is required") + } + + parsed, err := url.Parse(trimmed) + if err != nil || parsed.Scheme == "" || parsed.Host == "" { + return "", fmt.Errorf("invalid url: %s", trimmed) + } + if !strings.EqualFold(parsed.Scheme, "https") { + return "", fmt.Errorf("invalid url scheme: %s", parsed.Scheme) + } + + host := strings.ToLower(strings.TrimSpace(parsed.Hostname())) + if host == "" { + return "", errors.New("invalid host") + } + if !opts.AllowPrivate && isBlockedHost(host) { + return "", fmt.Errorf("host is not allowed: %s", host) + } + + allowlist := normalizeAllowlist(opts.AllowedHosts) + if opts.RequireAllowlist && len(allowlist) == 0 { + return "", errors.New("allowlist is not configured") + } + if len(allowlist) > 0 && !isAllowedHost(host, allowlist) { + return "", fmt.Errorf("host is not allowed: %s", host) + } + + parsed.Path = strings.TrimRight(parsed.Path, "/") + parsed.RawPath = "" + return strings.TrimRight(parsed.String(), "/"), nil +} + +// ValidateResolvedIP 验证 DNS 解析后的 IP 地址是否安全 +// 用于防止 DNS Rebinding 攻击:在实际 HTTP 请求时调用此函数验证解析后的 IP +func ValidateResolvedIP(host string) error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + ips, err := net.DefaultResolver.LookupIP(ctx, "ip", host) + if err != nil { + return fmt.Errorf("dns resolution failed: %w", err) + } + + for _, ip := range ips { + if ip.IsLoopback() || ip.IsPrivate() || ip.IsLinkLocalUnicast() || + ip.IsLinkLocalMulticast() || ip.IsUnspecified() { + return fmt.Errorf("resolved ip %s is not allowed", ip.String()) + } + } + return nil +} + +func normalizeAllowlist(values []string) []string { + if len(values) == 0 { + return nil + } + normalized := make([]string, 0, len(values)) + for _, v := range values { + entry := strings.ToLower(strings.TrimSpace(v)) + if entry == "" { + continue + } + if host, _, err := net.SplitHostPort(entry); err == nil { + entry = host + } + normalized = append(normalized, entry) + } + return normalized +} + +func isAllowedHost(host string, allowlist []string) bool { + for _, entry := range allowlist { + if entry == "" { + continue + } + if strings.HasPrefix(entry, "*.") { + suffix := strings.TrimPrefix(entry, "*.") + if host == suffix || strings.HasSuffix(host, "."+suffix) { + return true + } + continue + } + if host == entry { + return true + } + } + return false +} + +func isBlockedHost(host string) bool { + if host == "localhost" || strings.HasSuffix(host, ".localhost") { + return true + } + if ip := net.ParseIP(host); ip != nil { + if ip.IsLoopback() || ip.IsPrivate() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsUnspecified() { + return true + } + } + return false +} diff --git a/backend/internal/util/urlvalidator/validator_test.go b/backend/internal/util/urlvalidator/validator_test.go new file mode 100644 index 00000000..b7f9ffed --- /dev/null +++ b/backend/internal/util/urlvalidator/validator_test.go @@ -0,0 +1,24 @@ +package urlvalidator + +import "testing" + +func TestValidateURLFormat(t *testing.T) { + if _, err := ValidateURLFormat("", false); err == nil { + t.Fatalf("expected empty url to fail") + } + if _, err := ValidateURLFormat("://bad", false); err == nil { + t.Fatalf("expected invalid url to fail") + } + if _, err := ValidateURLFormat("http://example.com", false); err == nil { + t.Fatalf("expected http to fail when allow_insecure_http is false") + } + if _, err := ValidateURLFormat("https://example.com", false); err != nil { + t.Fatalf("expected https to pass, got %v", err) + } + if _, err := ValidateURLFormat("http://example.com", true); err != nil { + t.Fatalf("expected http to pass when allow_insecure_http is true, got %v", err) + } + if _, err := ValidateURLFormat("https://example.com:bad", true); err == nil { + t.Fatalf("expected invalid port to fail") + } +} diff --git a/backend/internal/web/embed_off.go b/backend/internal/web/embed_off.go new file mode 100644 index 00000000..346c31e9 --- /dev/null +++ b/backend/internal/web/embed_off.go @@ -0,0 +1,48 @@ +//go:build !embed + +// Package web provides embedded web assets for the application. +package web + +import ( + "context" + "errors" + "net/http" + + "github.com/gin-gonic/gin" +) + +// PublicSettingsProvider is an interface to fetch public settings +// This stub is needed for compilation when frontend is not embedded +type PublicSettingsProvider interface { + GetPublicSettingsForInjection(ctx context.Context) (any, error) +} + +// FrontendServer is a stub for non-embed builds +type FrontendServer struct{} + +// NewFrontendServer returns an error when frontend is not embedded +func NewFrontendServer(settingsProvider PublicSettingsProvider) (*FrontendServer, error) { + return nil, errors.New("frontend not embedded") +} + +// InvalidateCache is a no-op for non-embed builds +func (s *FrontendServer) InvalidateCache() {} + +// Middleware returns a handler that returns 404 for non-embed builds +func (s *FrontendServer) Middleware() gin.HandlerFunc { + return func(c *gin.Context) { + c.String(http.StatusNotFound, "Frontend not embedded. Build with -tags embed to include frontend.") + c.Abort() + } +} + +func ServeEmbeddedFrontend() gin.HandlerFunc { + return func(c *gin.Context) { + c.String(http.StatusNotFound, "Frontend not embedded. Build with -tags embed to include frontend.") + c.Abort() + } +} + +func HasEmbeddedFrontend() bool { + return false +} diff --git a/backend/internal/web/embed_on.go b/backend/internal/web/embed_on.go new file mode 100644 index 00000000..35697fbb --- /dev/null +++ b/backend/internal/web/embed_on.go @@ -0,0 +1,238 @@ +//go:build embed + +package web + +import ( + "bytes" + "context" + "embed" + "encoding/json" + "io" + "io/fs" + "net/http" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +//go:embed all:dist +var frontendFS embed.FS + +// PublicSettingsProvider is an interface to fetch public settings +type PublicSettingsProvider interface { + GetPublicSettingsForInjection(ctx context.Context) (any, error) +} + +// FrontendServer serves the embedded frontend with settings injection +type FrontendServer struct { + distFS fs.FS + fileServer http.Handler + baseHTML []byte + cache *HTMLCache + settings PublicSettingsProvider +} + +// NewFrontendServer creates a new frontend server with settings injection +func NewFrontendServer(settingsProvider PublicSettingsProvider) (*FrontendServer, error) { + distFS, err := fs.Sub(frontendFS, "dist") + if err != nil { + return nil, err + } + + // Read base HTML once + file, err := distFS.Open("index.html") + if err != nil { + return nil, err + } + defer func() { _ = file.Close() }() + + baseHTML, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + cache := NewHTMLCache() + cache.SetBaseHTML(baseHTML) + + return &FrontendServer{ + distFS: distFS, + fileServer: http.FileServer(http.FS(distFS)), + baseHTML: baseHTML, + cache: cache, + settings: settingsProvider, + }, nil +} + +// InvalidateCache invalidates the HTML cache (call when settings change) +func (s *FrontendServer) InvalidateCache() { + if s != nil && s.cache != nil { + s.cache.Invalidate() + } +} + +// Middleware returns the Gin middleware handler +func (s *FrontendServer) Middleware() gin.HandlerFunc { + return func(c *gin.Context) { + path := c.Request.URL.Path + + // Skip API routes + if strings.HasPrefix(path, "/api/") || + strings.HasPrefix(path, "/v1/") || + strings.HasPrefix(path, "/v1beta/") || + strings.HasPrefix(path, "/antigravity/") || + strings.HasPrefix(path, "/setup/") || + path == "/health" || + path == "/responses" { + c.Next() + return + } + + cleanPath := strings.TrimPrefix(path, "/") + if cleanPath == "" { + cleanPath = "index.html" + } + + // For index.html or SPA routes, serve with injected settings + if cleanPath == "index.html" || !s.fileExists(cleanPath) { + s.serveIndexHTML(c) + return + } + + // Serve static files normally + s.fileServer.ServeHTTP(c.Writer, c.Request) + c.Abort() + } +} + +func (s *FrontendServer) fileExists(path string) bool { + file, err := s.distFS.Open(path) + if err != nil { + return false + } + _ = file.Close() + return true +} + +func (s *FrontendServer) serveIndexHTML(c *gin.Context) { + // Check cache first + cached := s.cache.Get() + if cached != nil { + // Check If-None-Match for 304 response + if match := c.GetHeader("If-None-Match"); match == cached.ETag { + c.Status(http.StatusNotModified) + c.Abort() + return + } + + c.Header("ETag", cached.ETag) + c.Header("Cache-Control", "no-cache") // Must revalidate + c.Data(http.StatusOK, "text/html; charset=utf-8", cached.Content) + c.Abort() + return + } + + // Cache miss - fetch settings and render + ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second) + defer cancel() + + settings, err := s.settings.GetPublicSettingsForInjection(ctx) + if err != nil { + // Fallback: serve without injection + c.Data(http.StatusOK, "text/html; charset=utf-8", s.baseHTML) + c.Abort() + return + } + + settingsJSON, err := json.Marshal(settings) + if err != nil { + // Fallback: serve without injection + c.Data(http.StatusOK, "text/html; charset=utf-8", s.baseHTML) + c.Abort() + return + } + + rendered := s.injectSettings(settingsJSON) + s.cache.Set(rendered, settingsJSON) + + cached = s.cache.Get() + if cached != nil { + c.Header("ETag", cached.ETag) + } + c.Header("Cache-Control", "no-cache") + c.Data(http.StatusOK, "text/html; charset=utf-8", rendered) + c.Abort() +} + +func (s *FrontendServer) injectSettings(settingsJSON []byte) []byte { + // Create the script tag to inject + script := []byte(``) + + // Inject before + headClose := []byte("") + return bytes.Replace(s.baseHTML, headClose, append(script, headClose...), 1) +} + +// ServeEmbeddedFrontend returns a middleware for serving embedded frontend +// This is the legacy function for backward compatibility when no settings provider is available +func ServeEmbeddedFrontend() gin.HandlerFunc { + distFS, err := fs.Sub(frontendFS, "dist") + if err != nil { + panic("failed to get dist subdirectory: " + err.Error()) + } + fileServer := http.FileServer(http.FS(distFS)) + + return func(c *gin.Context) { + path := c.Request.URL.Path + + if strings.HasPrefix(path, "/api/") || + strings.HasPrefix(path, "/v1/") || + strings.HasPrefix(path, "/v1beta/") || + strings.HasPrefix(path, "/antigravity/") || + strings.HasPrefix(path, "/setup/") || + path == "/health" || + path == "/responses" { + c.Next() + return + } + + cleanPath := strings.TrimPrefix(path, "/") + if cleanPath == "" { + cleanPath = "index.html" + } + + if file, err := distFS.Open(cleanPath); err == nil { + _ = file.Close() + fileServer.ServeHTTP(c.Writer, c.Request) + c.Abort() + return + } + + serveIndexHTML(c, distFS) + } +} + +func serveIndexHTML(c *gin.Context, fsys fs.FS) { + file, err := fsys.Open("index.html") + if err != nil { + c.String(http.StatusNotFound, "Frontend not found") + c.Abort() + return + } + defer func() { _ = file.Close() }() + + content, err := io.ReadAll(file) + if err != nil { + c.String(http.StatusInternalServerError, "Failed to read index.html") + c.Abort() + return + } + + c.Data(http.StatusOK, "text/html; charset=utf-8", content) + c.Abort() +} + +func HasEmbeddedFrontend() bool { + _, err := frontendFS.ReadFile("dist/index.html") + return err == nil +} diff --git a/backend/internal/web/html_cache.go b/backend/internal/web/html_cache.go new file mode 100644 index 00000000..28269c89 --- /dev/null +++ b/backend/internal/web/html_cache.go @@ -0,0 +1,77 @@ +//go:build embed + +package web + +import ( + "crypto/sha256" + "encoding/hex" + "sync" +) + +// HTMLCache manages the cached index.html with injected settings +type HTMLCache struct { + mu sync.RWMutex + cachedHTML []byte + etag string + baseHTMLHash string // Hash of the original index.html (immutable after build) + settingsVersion uint64 // Incremented when settings change +} + +// CachedHTML represents the cache state +type CachedHTML struct { + Content []byte + ETag string +} + +// NewHTMLCache creates a new HTML cache instance +func NewHTMLCache() *HTMLCache { + return &HTMLCache{} +} + +// SetBaseHTML initializes the cache with the base HTML template +func (c *HTMLCache) SetBaseHTML(baseHTML []byte) { + c.mu.Lock() + defer c.mu.Unlock() + + hash := sha256.Sum256(baseHTML) + c.baseHTMLHash = hex.EncodeToString(hash[:8]) // First 8 bytes for brevity +} + +// Invalidate marks the cache as stale +func (c *HTMLCache) Invalidate() { + c.mu.Lock() + defer c.mu.Unlock() + + c.settingsVersion++ + c.cachedHTML = nil + c.etag = "" +} + +// Get returns the cached HTML or nil if cache is stale +func (c *HTMLCache) Get() *CachedHTML { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.cachedHTML == nil { + return nil + } + return &CachedHTML{ + Content: c.cachedHTML, + ETag: c.etag, + } +} + +// Set updates the cache with new rendered HTML +func (c *HTMLCache) Set(html []byte, settingsJSON []byte) { + c.mu.Lock() + defer c.mu.Unlock() + + c.cachedHTML = html + c.etag = c.generateETag(settingsJSON) +} + +// generateETag creates an ETag from base HTML hash + settings hash +func (c *HTMLCache) generateETag(settingsJSON []byte) string { + settingsHash := sha256.Sum256(settingsJSON) + return `"` + c.baseHTMLHash + "-" + hex.EncodeToString(settingsHash[:8]) + `"` +} diff --git a/backend/migrations/001_init.sql b/backend/migrations/001_init.sql new file mode 100644 index 00000000..64078c42 --- /dev/null +++ b/backend/migrations/001_init.sql @@ -0,0 +1,172 @@ +-- Sub2API 初始化数据库迁移脚本 +-- PostgreSQL 15+ + +-- 1. proxies 代理IP表(无外键依赖) +CREATE TABLE IF NOT EXISTS proxies ( + id BIGSERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + protocol VARCHAR(20) NOT NULL, -- http/https/socks5 + host VARCHAR(255) NOT NULL, + port INT NOT NULL, + username VARCHAR(100), + password VARCHAR(100), + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_proxies_status ON proxies(status); +CREATE INDEX IF NOT EXISTS idx_proxies_deleted_at ON proxies(deleted_at); + +-- 2. groups 分组表(无外键依赖) +CREATE TABLE IF NOT EXISTS groups ( + id BIGSERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL UNIQUE, + description TEXT, + rate_multiplier DECIMAL(10, 4) NOT NULL DEFAULT 1.0, -- 费率倍率 + is_exclusive BOOLEAN NOT NULL DEFAULT FALSE, -- 是否专属分组 + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_groups_name ON groups(name); +CREATE INDEX IF NOT EXISTS idx_groups_status ON groups(status); +CREATE INDEX IF NOT EXISTS idx_groups_is_exclusive ON groups(is_exclusive); +CREATE INDEX IF NOT EXISTS idx_groups_deleted_at ON groups(deleted_at); + +-- 3. users 用户表(无外键依赖) +CREATE TABLE IF NOT EXISTS users ( + id BIGSERIAL PRIMARY KEY, + email VARCHAR(255) NOT NULL UNIQUE, + password_hash VARCHAR(255) NOT NULL, + role VARCHAR(20) NOT NULL DEFAULT 'user', -- admin/user + balance DECIMAL(20, 8) NOT NULL DEFAULT 0, -- 余额(可为负数) + concurrency INT NOT NULL DEFAULT 5, -- 并发数限制 + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled + allowed_groups BIGINT[] DEFAULT NULL, -- 允许绑定的分组ID列表 + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_users_email ON users(email); +CREATE INDEX IF NOT EXISTS idx_users_status ON users(status); +CREATE INDEX IF NOT EXISTS idx_users_deleted_at ON users(deleted_at); + +-- 4. accounts 上游账号表(依赖proxies) +CREATE TABLE IF NOT EXISTS accounts ( + id BIGSERIAL PRIMARY KEY, + name VARCHAR(100) NOT NULL, + platform VARCHAR(50) NOT NULL, -- anthropic/openai/gemini + type VARCHAR(20) NOT NULL, -- oauth/apikey + credentials JSONB NOT NULL DEFAULT '{}', -- 凭证信息(加密存储) + extra JSONB NOT NULL DEFAULT '{}', -- 扩展信息 + proxy_id BIGINT REFERENCES proxies(id) ON DELETE SET NULL, + concurrency INT NOT NULL DEFAULT 3, -- 账号并发限制 + priority INT NOT NULL DEFAULT 50, -- 调度优先级(1-100,越小越高) + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled/error + error_message TEXT, + last_used_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_accounts_platform ON accounts(platform); +CREATE INDEX IF NOT EXISTS idx_accounts_type ON accounts(type); +CREATE INDEX IF NOT EXISTS idx_accounts_status ON accounts(status); +CREATE INDEX IF NOT EXISTS idx_accounts_proxy_id ON accounts(proxy_id); +CREATE INDEX IF NOT EXISTS idx_accounts_priority ON accounts(priority); +CREATE INDEX IF NOT EXISTS idx_accounts_last_used_at ON accounts(last_used_at); +CREATE INDEX IF NOT EXISTS idx_accounts_deleted_at ON accounts(deleted_at); + +-- 5. api_keys API密钥表(依赖users, groups) +CREATE TABLE IF NOT EXISTS api_keys ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + key VARCHAR(64) NOT NULL UNIQUE, -- sk-xxx格式 + name VARCHAR(100) NOT NULL, + group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL, + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/disabled + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +CREATE INDEX IF NOT EXISTS idx_api_keys_key ON api_keys(key); +CREATE INDEX IF NOT EXISTS idx_api_keys_user_id ON api_keys(user_id); +CREATE INDEX IF NOT EXISTS idx_api_keys_group_id ON api_keys(group_id); +CREATE INDEX IF NOT EXISTS idx_api_keys_status ON api_keys(status); +CREATE INDEX IF NOT EXISTS idx_api_keys_deleted_at ON api_keys(deleted_at); + +-- 6. account_groups 账号-分组关联表(依赖accounts, groups) +CREATE TABLE IF NOT EXISTS account_groups ( + account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + priority INT NOT NULL DEFAULT 50, -- 分组内优先级 + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (account_id, group_id) +); + +CREATE INDEX IF NOT EXISTS idx_account_groups_group_id ON account_groups(group_id); +CREATE INDEX IF NOT EXISTS idx_account_groups_priority ON account_groups(priority); + +-- 7. redeem_codes 卡密表(依赖users) +CREATE TABLE IF NOT EXISTS redeem_codes ( + id BIGSERIAL PRIMARY KEY, + code VARCHAR(32) NOT NULL UNIQUE, -- 兑换码 + type VARCHAR(20) NOT NULL DEFAULT 'balance', -- balance + value DECIMAL(20, 8) NOT NULL, -- 面值(USD) + status VARCHAR(20) NOT NULL DEFAULT 'unused', -- unused/used + used_by BIGINT REFERENCES users(id) ON DELETE SET NULL, + used_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_redeem_codes_code ON redeem_codes(code); +CREATE INDEX IF NOT EXISTS idx_redeem_codes_status ON redeem_codes(status); +CREATE INDEX IF NOT EXISTS idx_redeem_codes_used_by ON redeem_codes(used_by); + +-- 8. usage_logs 使用记录表(依赖users, api_keys, accounts) +CREATE TABLE IF NOT EXISTS usage_logs ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + api_key_id BIGINT NOT NULL REFERENCES api_keys(id) ON DELETE CASCADE, + account_id BIGINT NOT NULL REFERENCES accounts(id) ON DELETE CASCADE, + request_id VARCHAR(64), + model VARCHAR(100) NOT NULL, + + -- Token使用量(4类) + input_tokens INT NOT NULL DEFAULT 0, + output_tokens INT NOT NULL DEFAULT 0, + cache_creation_tokens INT NOT NULL DEFAULT 0, + cache_read_tokens INT NOT NULL DEFAULT 0, + + -- 详细的缓存创建分类 + cache_creation_5m_tokens INT NOT NULL DEFAULT 0, + cache_creation_1h_tokens INT NOT NULL DEFAULT 0, + + -- 费用(USD) + input_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + output_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + cache_creation_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + cache_read_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 原始总费用 + actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 实际扣除费用 + + -- 元数据 + stream BOOLEAN NOT NULL DEFAULT FALSE, + duration_ms INT, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_logs_user_id ON usage_logs(user_id); +CREATE INDEX IF NOT EXISTS idx_usage_logs_api_key_id ON usage_logs(api_key_id); +CREATE INDEX IF NOT EXISTS idx_usage_logs_account_id ON usage_logs(account_id); +CREATE INDEX IF NOT EXISTS idx_usage_logs_model ON usage_logs(model); +CREATE INDEX IF NOT EXISTS idx_usage_logs_created_at ON usage_logs(created_at); +CREATE INDEX IF NOT EXISTS idx_usage_logs_user_created ON usage_logs(user_id, created_at); diff --git a/backend/migrations/002_account_type_migration.sql b/backend/migrations/002_account_type_migration.sql new file mode 100644 index 00000000..b1c955ef --- /dev/null +++ b/backend/migrations/002_account_type_migration.sql @@ -0,0 +1,33 @@ +-- Sub2API 账号类型迁移脚本 +-- 将 'official' 类型账号迁移为 'oauth' 或 'setup-token' +-- 根据 credentials->>'scope' 字段判断: +-- - 包含 'user:profile' 的是 'oauth' 类型 +-- - 只有 'user:inference' 的是 'setup-token' 类型 + +-- 1. 将包含 profile scope 的 official 账号迁移为 oauth +UPDATE accounts +SET type = 'oauth', + updated_at = NOW() +WHERE type = 'official' + AND credentials->>'scope' LIKE '%user:profile%'; + +-- 2. 将只有 inference scope 的 official 账号迁移为 setup-token +UPDATE accounts +SET type = 'setup-token', + updated_at = NOW() +WHERE type = 'official' + AND ( + credentials->>'scope' = 'user:inference' + OR credentials->>'scope' NOT LIKE '%user:profile%' + ); + +-- 3. 处理没有 scope 字段的旧账号(默认为 oauth) +UPDATE accounts +SET type = 'oauth', + updated_at = NOW() +WHERE type = 'official' + AND (credentials->>'scope' IS NULL OR credentials->>'scope' = ''); + +-- 4. 验证迁移结果(查询是否还有 official 类型账号) +-- SELECT COUNT(*) FROM accounts WHERE type = 'official'; +-- 如果结果为 0,说明迁移成功 diff --git a/backend/migrations/003_subscription.sql b/backend/migrations/003_subscription.sql new file mode 100644 index 00000000..d9c54a32 --- /dev/null +++ b/backend/migrations/003_subscription.sql @@ -0,0 +1,65 @@ +-- Sub2API 订阅功能迁移脚本 +-- 添加订阅分组和用户订阅功能 + +-- 1. 扩展 groups 表添加订阅相关字段 +ALTER TABLE groups ADD COLUMN IF NOT EXISTS platform VARCHAR(50) NOT NULL DEFAULT 'anthropic'; +ALTER TABLE groups ADD COLUMN IF NOT EXISTS subscription_type VARCHAR(20) NOT NULL DEFAULT 'standard'; +ALTER TABLE groups ADD COLUMN IF NOT EXISTS daily_limit_usd DECIMAL(20, 8) DEFAULT NULL; +ALTER TABLE groups ADD COLUMN IF NOT EXISTS weekly_limit_usd DECIMAL(20, 8) DEFAULT NULL; +ALTER TABLE groups ADD COLUMN IF NOT EXISTS monthly_limit_usd DECIMAL(20, 8) DEFAULT NULL; +ALTER TABLE groups ADD COLUMN IF NOT EXISTS default_validity_days INT NOT NULL DEFAULT 30; + +-- 添加索引 +CREATE INDEX IF NOT EXISTS idx_groups_platform ON groups(platform); +CREATE INDEX IF NOT EXISTS idx_groups_subscription_type ON groups(subscription_type); + +-- 2. 创建 user_subscriptions 用户订阅表 +CREATE TABLE IF NOT EXISTS user_subscriptions ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + + -- 订阅有效期 + starts_at TIMESTAMPTZ NOT NULL, + expires_at TIMESTAMPTZ NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'active', -- active/expired/suspended + + -- 滑动窗口起始时间(NULL=未激活) + daily_window_start TIMESTAMPTZ, + weekly_window_start TIMESTAMPTZ, + monthly_window_start TIMESTAMPTZ, + + -- 当前窗口已用额度(USD,基于 total_cost 计算) + daily_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0, + weekly_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0, + monthly_usage_usd DECIMAL(20, 10) NOT NULL DEFAULT 0, + + -- 管理员分配信息 + assigned_by BIGINT REFERENCES users(id) ON DELETE SET NULL, + assigned_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + notes TEXT, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + -- 唯一约束:每个用户对每个分组只能有一个订阅 + UNIQUE(user_id, group_id) +); + +-- user_subscriptions 索引 +CREATE INDEX IF NOT EXISTS idx_user_subscriptions_user_id ON user_subscriptions(user_id); +CREATE INDEX IF NOT EXISTS idx_user_subscriptions_group_id ON user_subscriptions(group_id); +CREATE INDEX IF NOT EXISTS idx_user_subscriptions_status ON user_subscriptions(status); +CREATE INDEX IF NOT EXISTS idx_user_subscriptions_expires_at ON user_subscriptions(expires_at); +CREATE INDEX IF NOT EXISTS idx_user_subscriptions_assigned_by ON user_subscriptions(assigned_by); + +-- 3. 扩展 usage_logs 表添加分组和订阅关联 +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL; +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS subscription_id BIGINT REFERENCES user_subscriptions(id) ON DELETE SET NULL; +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10, 4) NOT NULL DEFAULT 1; +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS first_token_ms INT; + +-- usage_logs 新索引 +CREATE INDEX IF NOT EXISTS idx_usage_logs_group_id ON usage_logs(group_id); +CREATE INDEX IF NOT EXISTS idx_usage_logs_subscription_id ON usage_logs(subscription_id); +CREATE INDEX IF NOT EXISTS idx_usage_logs_sub_created ON usage_logs(subscription_id, created_at); diff --git a/backend/migrations/004_add_redeem_code_notes.sql b/backend/migrations/004_add_redeem_code_notes.sql new file mode 100644 index 00000000..eeb37b10 --- /dev/null +++ b/backend/migrations/004_add_redeem_code_notes.sql @@ -0,0 +1,6 @@ +-- 为 redeem_codes 表添加备注字段 + +ALTER TABLE redeem_codes +ADD COLUMN IF NOT EXISTS notes TEXT DEFAULT NULL; + +COMMENT ON COLUMN redeem_codes.notes IS '备注说明(管理员调整时的原因说明)'; diff --git a/backend/migrations/005_schema_parity.sql b/backend/migrations/005_schema_parity.sql new file mode 100644 index 00000000..0ee3f121 --- /dev/null +++ b/backend/migrations/005_schema_parity.sql @@ -0,0 +1,42 @@ +-- Align SQL migrations with current GORM persistence models. +-- This file is designed to be safe on both fresh installs and existing databases. + +-- users: add fields added after initial migration +ALTER TABLE users ADD COLUMN IF NOT EXISTS username VARCHAR(100) NOT NULL DEFAULT ''; +ALTER TABLE users ADD COLUMN IF NOT EXISTS wechat VARCHAR(100) NOT NULL DEFAULT ''; +ALTER TABLE users ADD COLUMN IF NOT EXISTS notes TEXT NOT NULL DEFAULT ''; + +-- api_keys: allow longer keys (GORM model uses size:128) +ALTER TABLE api_keys ALTER COLUMN key TYPE VARCHAR(128); + +-- accounts: scheduling and rate-limit fields used by repository queries +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS schedulable BOOLEAN NOT NULL DEFAULT TRUE; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limited_at TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS rate_limit_reset_at TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS overload_until TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_start TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_end TIMESTAMPTZ; +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS session_window_status VARCHAR(20); + +CREATE INDEX IF NOT EXISTS idx_accounts_schedulable ON accounts(schedulable); +CREATE INDEX IF NOT EXISTS idx_accounts_rate_limited_at ON accounts(rate_limited_at); +CREATE INDEX IF NOT EXISTS idx_accounts_rate_limit_reset_at ON accounts(rate_limit_reset_at); +CREATE INDEX IF NOT EXISTS idx_accounts_overload_until ON accounts(overload_until); + +-- redeem_codes: subscription redeem fields +ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL; +ALTER TABLE redeem_codes ADD COLUMN IF NOT EXISTS validity_days INT NOT NULL DEFAULT 30; +CREATE INDEX IF NOT EXISTS idx_redeem_codes_group_id ON redeem_codes(group_id); + +-- usage_logs: billing type used by filters and stats +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS billing_type SMALLINT NOT NULL DEFAULT 0; +CREATE INDEX IF NOT EXISTS idx_usage_logs_billing_type ON usage_logs(billing_type); + +-- settings: key-value store +CREATE TABLE IF NOT EXISTS settings ( + id BIGSERIAL PRIMARY KEY, + key VARCHAR(100) NOT NULL UNIQUE, + value TEXT NOT NULL, + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + diff --git a/backend/migrations/006_fix_invalid_subscription_expires_at.sql b/backend/migrations/006_fix_invalid_subscription_expires_at.sql new file mode 100644 index 00000000..7a0c2642 --- /dev/null +++ b/backend/migrations/006_fix_invalid_subscription_expires_at.sql @@ -0,0 +1,10 @@ +-- Fix legacy subscription records with invalid expires_at (year > 2099). +DO $$ +BEGIN + IF to_regclass('public.user_subscriptions') IS NOT NULL THEN + UPDATE user_subscriptions + SET expires_at = TIMESTAMPTZ '2099-12-31 23:59:59+00' + WHERE expires_at > TIMESTAMPTZ '2099-12-31 23:59:59+00'; + END IF; +END $$; + diff --git a/backend/migrations/007_add_user_allowed_groups.sql b/backend/migrations/007_add_user_allowed_groups.sql new file mode 100644 index 00000000..a61400d2 --- /dev/null +++ b/backend/migrations/007_add_user_allowed_groups.sql @@ -0,0 +1,20 @@ +-- Add user_allowed_groups join table to replace users.allowed_groups (BIGINT[]). +-- Phase 1: create table + backfill from the legacy array column. + +CREATE TABLE IF NOT EXISTS user_allowed_groups ( + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + group_id BIGINT NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + PRIMARY KEY (user_id, group_id) +); + +CREATE INDEX IF NOT EXISTS idx_user_allowed_groups_group_id ON user_allowed_groups(group_id); + +-- Backfill from the legacy users.allowed_groups array. +INSERT INTO user_allowed_groups (user_id, group_id) +SELECT u.id, x.group_id +FROM users u +CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) +JOIN groups g ON g.id = x.group_id +WHERE u.allowed_groups IS NOT NULL +ON CONFLICT DO NOTHING; diff --git a/backend/migrations/008_seed_default_group.sql b/backend/migrations/008_seed_default_group.sql new file mode 100644 index 00000000..cfe2640f --- /dev/null +++ b/backend/migrations/008_seed_default_group.sql @@ -0,0 +1,4 @@ +-- Seed a default group for fresh installs. +INSERT INTO groups (name, description, created_at, updated_at) +SELECT 'default', 'Default group', NOW(), NOW() +WHERE NOT EXISTS (SELECT 1 FROM groups); diff --git a/backend/migrations/009_fix_usage_logs_cache_columns.sql b/backend/migrations/009_fix_usage_logs_cache_columns.sql new file mode 100644 index 00000000..979405af --- /dev/null +++ b/backend/migrations/009_fix_usage_logs_cache_columns.sql @@ -0,0 +1,37 @@ +-- Ensure usage_logs cache token columns use the underscored names expected by code. +-- Backfill from legacy column names if they exist. + +ALTER TABLE usage_logs + ADD COLUMN IF NOT EXISTS cache_creation_5m_tokens INT NOT NULL DEFAULT 0; + +ALTER TABLE usage_logs + ADD COLUMN IF NOT EXISTS cache_creation_1h_tokens INT NOT NULL DEFAULT 0; + +DO $$ +BEGIN + IF EXISTS ( + SELECT 1 + FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'usage_logs' + AND column_name = 'cache_creation5m_tokens' + ) THEN + UPDATE usage_logs + SET cache_creation_5m_tokens = cache_creation5m_tokens + WHERE cache_creation_5m_tokens = 0 + AND cache_creation5m_tokens <> 0; + END IF; + + IF EXISTS ( + SELECT 1 + FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = 'usage_logs' + AND column_name = 'cache_creation1h_tokens' + ) THEN + UPDATE usage_logs + SET cache_creation_1h_tokens = cache_creation1h_tokens + WHERE cache_creation_1h_tokens = 0 + AND cache_creation1h_tokens <> 0; + END IF; +END $$; diff --git a/backend/migrations/010_add_usage_logs_aggregated_indexes.sql b/backend/migrations/010_add_usage_logs_aggregated_indexes.sql new file mode 100644 index 00000000..ab2dbbc1 --- /dev/null +++ b/backend/migrations/010_add_usage_logs_aggregated_indexes.sql @@ -0,0 +1,4 @@ +-- 为聚合查询补充复合索引 +CREATE INDEX IF NOT EXISTS idx_usage_logs_account_created_at ON usage_logs(account_id, created_at); +CREATE INDEX IF NOT EXISTS idx_usage_logs_api_key_created_at ON usage_logs(api_key_id, created_at); +CREATE INDEX IF NOT EXISTS idx_usage_logs_model_created_at ON usage_logs(model, created_at); diff --git a/backend/migrations/011_remove_duplicate_unique_indexes.sql b/backend/migrations/011_remove_duplicate_unique_indexes.sql new file mode 100644 index 00000000..8fd62710 --- /dev/null +++ b/backend/migrations/011_remove_duplicate_unique_indexes.sql @@ -0,0 +1,39 @@ +-- 011_remove_duplicate_unique_indexes.sql +-- 移除重复的唯一索引 +-- 这些字段在 ent schema 的 Fields() 中已声明 .Unique(), +-- 因此在 Indexes() 中再次声明 index.Fields("x").Unique() 会创建重复索引。 +-- 本迁移脚本清理这些冗余索引。 + +-- 重复索引命名约定(由 Ent 自动生成/历史迁移遗留): +-- - 字段级 Unique() 创建的索引名: __key +-- - Indexes() 中的 Unique() 创建的索引名:
_ +-- - 初始化迁移中的非唯一索引: idx_
_ + +-- 仅当索引存在时才删除(幂等操作) + +-- api_keys 表: key 字段 +DROP INDEX IF EXISTS apikey_key; +DROP INDEX IF EXISTS api_keys_key; +DROP INDEX IF EXISTS idx_api_keys_key; + +-- users 表: email 字段 +DROP INDEX IF EXISTS user_email; +DROP INDEX IF EXISTS users_email; +DROP INDEX IF EXISTS idx_users_email; + +-- settings 表: key 字段 +DROP INDEX IF EXISTS settings_key; +DROP INDEX IF EXISTS idx_settings_key; + +-- redeem_codes 表: code 字段 +DROP INDEX IF EXISTS redeemcode_code; +DROP INDEX IF EXISTS redeem_codes_code; +DROP INDEX IF EXISTS idx_redeem_codes_code; + +-- groups 表: name 字段 +DROP INDEX IF EXISTS group_name; +DROP INDEX IF EXISTS groups_name; +DROP INDEX IF EXISTS idx_groups_name; + +-- 注意: 每个字段的唯一约束仍由字段级 Unique() 创建的约束保留, +-- 如 api_keys_key_key、users_email_key 等。 diff --git a/backend/migrations/012_add_user_subscription_soft_delete.sql b/backend/migrations/012_add_user_subscription_soft_delete.sql new file mode 100644 index 00000000..b6cb7366 --- /dev/null +++ b/backend/migrations/012_add_user_subscription_soft_delete.sql @@ -0,0 +1,13 @@ +-- 012: 为 user_subscriptions 表添加软删除支持 +-- 任务:fix-medium-data-hygiene 1.1 + +-- 添加 deleted_at 字段 +ALTER TABLE user_subscriptions +ADD COLUMN IF NOT EXISTS deleted_at TIMESTAMPTZ DEFAULT NULL; + +-- 添加 deleted_at 索引以优化软删除查询 +CREATE INDEX IF NOT EXISTS usersubscription_deleted_at +ON user_subscriptions (deleted_at); + +-- 注释:与其他使用软删除的实体保持一致 +COMMENT ON COLUMN user_subscriptions.deleted_at IS '软删除时间戳,NULL 表示未删除'; diff --git a/backend/migrations/013_log_orphan_allowed_groups.sql b/backend/migrations/013_log_orphan_allowed_groups.sql new file mode 100644 index 00000000..976c0aca --- /dev/null +++ b/backend/migrations/013_log_orphan_allowed_groups.sql @@ -0,0 +1,32 @@ +-- 013: 记录 users.allowed_groups 中的孤立 group_id +-- 任务:fix-medium-data-hygiene 3.1 +-- +-- 目的:在删除 legacy allowed_groups 列前,记录所有引用了不存在 group 的孤立记录 +-- 这些记录可用于审计或后续数据修复 + +-- 创建审计表存储孤立的 allowed_groups 记录 +CREATE TABLE IF NOT EXISTS orphan_allowed_groups_audit ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL, + group_id BIGINT NOT NULL, + recorded_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE (user_id, group_id) +); + +-- 记录孤立的 group_id(存在于 users.allowed_groups 但不存在于 groups 表) +INSERT INTO orphan_allowed_groups_audit (user_id, group_id) +SELECT u.id, x.group_id +FROM users u +CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) +LEFT JOIN groups g ON g.id = x.group_id +WHERE u.allowed_groups IS NOT NULL + AND g.id IS NULL +ON CONFLICT (user_id, group_id) DO NOTHING; + +-- 添加索引便于查询 +CREATE INDEX IF NOT EXISTS idx_orphan_allowed_groups_audit_user_id +ON orphan_allowed_groups_audit(user_id); + +-- 记录迁移完成信息 +COMMENT ON TABLE orphan_allowed_groups_audit IS +'审计表:记录 users.allowed_groups 中引用的不存在的 group_id,用于数据清理前的审计'; diff --git a/backend/migrations/014_drop_legacy_allowed_groups.sql b/backend/migrations/014_drop_legacy_allowed_groups.sql new file mode 100644 index 00000000..2c2a3d45 --- /dev/null +++ b/backend/migrations/014_drop_legacy_allowed_groups.sql @@ -0,0 +1,15 @@ +-- 014: 删除 legacy users.allowed_groups 列 +-- 任务:fix-medium-data-hygiene 3.3 +-- +-- 前置条件: +-- - 迁移 007 已将数据回填到 user_allowed_groups 联接表 +-- - 迁移 013 已记录所有孤立的 group_id 到审计表 +-- - 应用代码已停止写入该列(3.2 完成) +-- +-- 该列现已废弃,所有读写操作均使用 user_allowed_groups 联接表。 + +-- 删除 allowed_groups 列 +ALTER TABLE users DROP COLUMN IF EXISTS allowed_groups; + +-- 添加注释记录删除原因 +COMMENT ON TABLE users IS '用户表。注:原 allowed_groups BIGINT[] 列已迁移至 user_allowed_groups 联接表'; diff --git a/backend/migrations/015_fix_settings_unique_constraint.sql b/backend/migrations/015_fix_settings_unique_constraint.sql new file mode 100644 index 00000000..60f8fcad --- /dev/null +++ b/backend/migrations/015_fix_settings_unique_constraint.sql @@ -0,0 +1,19 @@ +-- 015_fix_settings_unique_constraint.sql +-- 修复 settings 表 key 字段缺失的唯一约束 +-- 此约束是 ON CONFLICT ("key") DO UPDATE 语句所必需的 + +-- 检查并添加唯一约束(如果不存在) +DO $$ +BEGIN + -- 检查是否已存在唯一约束 + IF NOT EXISTS ( + SELECT 1 FROM pg_constraint + WHERE conrelid = 'settings'::regclass + AND contype = 'u' + AND conname = 'settings_key_key' + ) THEN + -- 添加唯一约束 + ALTER TABLE settings ADD CONSTRAINT settings_key_key UNIQUE (key); + END IF; +END +$$; diff --git a/backend/migrations/016_soft_delete_partial_unique_indexes.sql b/backend/migrations/016_soft_delete_partial_unique_indexes.sql new file mode 100644 index 00000000..b006b775 --- /dev/null +++ b/backend/migrations/016_soft_delete_partial_unique_indexes.sql @@ -0,0 +1,51 @@ +-- 016_soft_delete_partial_unique_indexes.sql +-- 修复软删除 + 唯一约束冲突问题 +-- 将普通唯一约束替换为部分唯一索引(WHERE deleted_at IS NULL) +-- 这样软删除的记录不会占用唯一约束位置,允许删后重建同名/同邮箱/同订阅关系 + +-- ============================================================================ +-- 1. users 表: email 字段 +-- ============================================================================ + +-- 删除旧的唯一约束(可能的命名方式) +ALTER TABLE users DROP CONSTRAINT IF EXISTS users_email_key; +DROP INDEX IF EXISTS users_email_key; +DROP INDEX IF EXISTS user_email_key; + +-- 创建部分唯一索引:只对未删除的记录建立唯一约束 +CREATE UNIQUE INDEX IF NOT EXISTS users_email_unique_active + ON users(email) + WHERE deleted_at IS NULL; + +-- ============================================================================ +-- 2. groups 表: name 字段 +-- ============================================================================ + +-- 删除旧的唯一约束 +ALTER TABLE groups DROP CONSTRAINT IF EXISTS groups_name_key; +DROP INDEX IF EXISTS groups_name_key; +DROP INDEX IF EXISTS group_name_key; + +-- 创建部分唯一索引 +CREATE UNIQUE INDEX IF NOT EXISTS groups_name_unique_active + ON groups(name) + WHERE deleted_at IS NULL; + +-- ============================================================================ +-- 3. user_subscriptions 表: (user_id, group_id) 组合字段 +-- ============================================================================ + +-- 删除旧的唯一约束/索引 +ALTER TABLE user_subscriptions DROP CONSTRAINT IF EXISTS user_subscriptions_user_id_group_id_key; +DROP INDEX IF EXISTS user_subscriptions_user_id_group_id_key; +DROP INDEX IF EXISTS usersubscription_user_id_group_id; + +-- 创建部分唯一索引 +CREATE UNIQUE INDEX IF NOT EXISTS user_subscriptions_user_group_unique_active + ON user_subscriptions(user_id, group_id) + WHERE deleted_at IS NULL; + +-- ============================================================================ +-- 注意: api_keys 表的 key 字段保留普通唯一约束 +-- API Key 即使软删除后也不应该重复使用(安全考虑) +-- ============================================================================ diff --git a/backend/migrations/018_user_attributes.sql b/backend/migrations/018_user_attributes.sql new file mode 100644 index 00000000..d2dad80d --- /dev/null +++ b/backend/migrations/018_user_attributes.sql @@ -0,0 +1,48 @@ +-- Add user attribute definitions and values tables for custom user attributes. + +-- User Attribute Definitions table (with soft delete support) +CREATE TABLE IF NOT EXISTS user_attribute_definitions ( + id BIGSERIAL PRIMARY KEY, + key VARCHAR(100) NOT NULL, + name VARCHAR(255) NOT NULL, + description TEXT DEFAULT '', + type VARCHAR(20) NOT NULL, + options JSONB DEFAULT '[]'::jsonb, + required BOOLEAN NOT NULL DEFAULT FALSE, + validation JSONB DEFAULT '{}'::jsonb, + placeholder VARCHAR(255) DEFAULT '', + display_order INT NOT NULL DEFAULT 0, + enabled BOOLEAN NOT NULL DEFAULT TRUE, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ +); + +-- Partial unique index for key (only for non-deleted records) +-- Allows reusing keys after soft delete +CREATE UNIQUE INDEX IF NOT EXISTS idx_user_attribute_definitions_key_unique + ON user_attribute_definitions(key) WHERE deleted_at IS NULL; + +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_enabled + ON user_attribute_definitions(enabled); +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_display_order + ON user_attribute_definitions(display_order); +CREATE INDEX IF NOT EXISTS idx_user_attribute_definitions_deleted_at + ON user_attribute_definitions(deleted_at); + +-- User Attribute Values table (hard delete only, no deleted_at) +CREATE TABLE IF NOT EXISTS user_attribute_values ( + id BIGSERIAL PRIMARY KEY, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + attribute_id BIGINT NOT NULL REFERENCES user_attribute_definitions(id) ON DELETE CASCADE, + value TEXT DEFAULT '', + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + UNIQUE(user_id, attribute_id) +); + +CREATE INDEX IF NOT EXISTS idx_user_attribute_values_user_id + ON user_attribute_values(user_id); +CREATE INDEX IF NOT EXISTS idx_user_attribute_values_attribute_id + ON user_attribute_values(attribute_id); diff --git a/backend/migrations/019_migrate_wechat_to_attributes.sql b/backend/migrations/019_migrate_wechat_to_attributes.sql new file mode 100644 index 00000000..765ca498 --- /dev/null +++ b/backend/migrations/019_migrate_wechat_to_attributes.sql @@ -0,0 +1,83 @@ +-- Migration: Move wechat field from users table to user_attribute_values +-- This migration: +-- 1. Creates a "wechat" attribute definition +-- 2. Migrates existing wechat data to user_attribute_values +-- 3. Does NOT drop the wechat column (for rollback safety, can be done in a later migration) + +-- +goose Up +-- +goose StatementBegin + +-- Step 1: Insert wechat attribute definition if not exists +INSERT INTO user_attribute_definitions (key, name, description, type, options, required, validation, placeholder, display_order, enabled, created_at, updated_at) +SELECT 'wechat', '微信', '用户微信号', 'text', '[]'::jsonb, false, '{}'::jsonb, '请输入微信号', 0, true, NOW(), NOW() +WHERE NOT EXISTS ( + SELECT 1 FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL +); + +-- Step 2: Migrate existing wechat values to user_attribute_values +-- Only migrate non-empty values +INSERT INTO user_attribute_values (user_id, attribute_id, value, created_at, updated_at) +SELECT + u.id, + (SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL LIMIT 1), + u.wechat, + NOW(), + NOW() +FROM users u +WHERE u.wechat IS NOT NULL + AND u.wechat != '' + AND u.deleted_at IS NULL + AND NOT EXISTS ( + SELECT 1 FROM user_attribute_values uav + WHERE uav.user_id = u.id + AND uav.attribute_id = (SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL LIMIT 1) + ); + +-- Step 3: Update display_order to ensure wechat appears first +UPDATE user_attribute_definitions +SET display_order = -1 +WHERE key = 'wechat' AND deleted_at IS NULL; + +-- Reorder all attributes starting from 0 +WITH ordered AS ( + SELECT id, ROW_NUMBER() OVER (ORDER BY display_order, id) - 1 as new_order + FROM user_attribute_definitions + WHERE deleted_at IS NULL +) +UPDATE user_attribute_definitions +SET display_order = ordered.new_order +FROM ordered +WHERE user_attribute_definitions.id = ordered.id; + +-- Step 4: Drop the redundant wechat column from users table +ALTER TABLE users DROP COLUMN IF EXISTS wechat; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Restore wechat column +ALTER TABLE users ADD COLUMN IF NOT EXISTS wechat VARCHAR(100) DEFAULT ''; + +-- Copy attribute values back to users.wechat column +UPDATE users u +SET wechat = uav.value +FROM user_attribute_values uav +JOIN user_attribute_definitions uad ON uav.attribute_id = uad.id +WHERE uav.user_id = u.id + AND uad.key = 'wechat' + AND uad.deleted_at IS NULL; + +-- Delete migrated attribute values +DELETE FROM user_attribute_values +WHERE attribute_id IN ( + SELECT id FROM user_attribute_definitions WHERE key = 'wechat' AND deleted_at IS NULL +); + +-- Soft-delete the wechat attribute definition +UPDATE user_attribute_definitions +SET deleted_at = NOW() +WHERE key = 'wechat' AND deleted_at IS NULL; + +-- +goose StatementEnd diff --git a/backend/migrations/020_add_temp_unschedulable.sql b/backend/migrations/020_add_temp_unschedulable.sql new file mode 100644 index 00000000..5e1d78ac --- /dev/null +++ b/backend/migrations/020_add_temp_unschedulable.sql @@ -0,0 +1,15 @@ +-- 020_add_temp_unschedulable.sql +-- 添加临时不可调度功能相关字段 + +-- 添加临时不可调度状态解除时间字段 +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS temp_unschedulable_until timestamptz; + +-- 添加临时不可调度原因字段(用于排障和审计) +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS temp_unschedulable_reason text; + +-- 添加索引以优化调度查询性能 +CREATE INDEX IF NOT EXISTS idx_accounts_temp_unschedulable_until ON accounts(temp_unschedulable_until) WHERE deleted_at IS NULL; + +-- 添加注释说明字段用途 +COMMENT ON COLUMN accounts.temp_unschedulable_until IS '临时不可调度状态解除时间,当触发临时不可调度规则时设置(基于错误码或错误描述关键词)'; +COMMENT ON COLUMN accounts.temp_unschedulable_reason IS '临时不可调度原因,记录触发临时不可调度的具体原因(用于排障和审计)'; diff --git a/backend/migrations/024_add_gemini_tier_id.sql b/backend/migrations/024_add_gemini_tier_id.sql new file mode 100644 index 00000000..d9ac7afe --- /dev/null +++ b/backend/migrations/024_add_gemini_tier_id.sql @@ -0,0 +1,30 @@ +-- +goose Up +-- +goose StatementBegin +-- 为 Gemini Code Assist OAuth 账号添加默认 tier_id +-- 包括显式标记为 code_assist 的账号,以及 legacy 账号(oauth_type 为空但 project_id 存在) +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{tier_id}', + '"LEGACY"', + true +) +WHERE platform = 'gemini' + AND type = 'oauth' + AND jsonb_typeof(credentials) = 'object' + AND credentials->>'tier_id' IS NULL + AND ( + credentials->>'oauth_type' = 'code_assist' + OR (credentials->>'oauth_type' IS NULL AND credentials->>'project_id' IS NOT NULL) + ); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- 回滚:删除 tier_id 字段 +UPDATE accounts +SET credentials = credentials - 'tier_id' +WHERE platform = 'gemini' + AND type = 'oauth' + AND credentials ? 'tier_id'; +-- +goose StatementEnd diff --git a/backend/migrations/026_ops_metrics_aggregation_tables.sql b/backend/migrations/026_ops_metrics_aggregation_tables.sql new file mode 100644 index 00000000..e0e47265 --- /dev/null +++ b/backend/migrations/026_ops_metrics_aggregation_tables.sql @@ -0,0 +1,104 @@ +-- Ops monitoring: pre-aggregation tables for dashboard queries +-- +-- Problem: +-- The ops dashboard currently runs percentile_cont + GROUP BY queries over large raw tables +-- (usage_logs, ops_error_logs). These will get slower as data grows. +-- +-- This migration adds schema-only aggregation tables that can be populated by a future background job. +-- No triggers/functions/jobs are created here (schema only). + +-- ============================================ +-- Hourly aggregates (per provider/platform) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_hourly ( + -- Start of the hour bucket (recommended: UTC). + bucket_start TIMESTAMPTZ NOT NULL, + + -- Provider/platform label (e.g. anthropic/openai/gemini). Mirrors ops_* queries that GROUP BY platform. + platform VARCHAR(50) NOT NULL, + + -- Traffic counts (use these to compute rates reliably across ranges). + request_count BIGINT NOT NULL DEFAULT 0, + success_count BIGINT NOT NULL DEFAULT 0, + error_count BIGINT NOT NULL DEFAULT 0, + + -- Error breakdown used by provider health UI. + error_4xx_count BIGINT NOT NULL DEFAULT 0, + error_5xx_count BIGINT NOT NULL DEFAULT 0, + timeout_count BIGINT NOT NULL DEFAULT 0, + + -- Latency aggregates (ms). + avg_latency_ms DOUBLE PRECISION, + p99_latency_ms DOUBLE PRECISION, + + -- Convenience rate (percentage, 0-100). Still keep counts as source of truth. + error_rate DOUBLE PRECISION NOT NULL DEFAULT 0, + + -- When this row was last (re)computed by the background job. + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + PRIMARY KEY (bucket_start, platform) +); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_platform_bucket_start + ON ops_metrics_hourly (platform, bucket_start DESC); + +COMMENT ON TABLE ops_metrics_hourly IS 'Pre-aggregated hourly ops metrics by provider/platform to speed up dashboard queries.'; +COMMENT ON COLUMN ops_metrics_hourly.bucket_start IS 'Start timestamp of the hour bucket (recommended UTC).'; +COMMENT ON COLUMN ops_metrics_hourly.platform IS 'Provider/platform label (anthropic/openai/gemini, etc).'; +COMMENT ON COLUMN ops_metrics_hourly.error_rate IS 'Error rate percentage for the bucket (0-100). Counts remain the source of truth.'; +COMMENT ON COLUMN ops_metrics_hourly.computed_at IS 'When the row was last computed/refreshed.'; + +-- ============================================ +-- Daily aggregates (per provider/platform) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_daily ( + -- Day bucket (recommended: UTC date). + bucket_date DATE NOT NULL, + platform VARCHAR(50) NOT NULL, + + request_count BIGINT NOT NULL DEFAULT 0, + success_count BIGINT NOT NULL DEFAULT 0, + error_count BIGINT NOT NULL DEFAULT 0, + + error_4xx_count BIGINT NOT NULL DEFAULT 0, + error_5xx_count BIGINT NOT NULL DEFAULT 0, + timeout_count BIGINT NOT NULL DEFAULT 0, + + avg_latency_ms DOUBLE PRECISION, + p99_latency_ms DOUBLE PRECISION, + + error_rate DOUBLE PRECISION NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + PRIMARY KEY (bucket_date, platform) +); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_platform_bucket_date + ON ops_metrics_daily (platform, bucket_date DESC); + +COMMENT ON TABLE ops_metrics_daily IS 'Pre-aggregated daily ops metrics by provider/platform for longer-term trends.'; +COMMENT ON COLUMN ops_metrics_daily.bucket_date IS 'UTC date of the day bucket (recommended).'; + +-- ============================================ +-- Population strategy (future background job) +-- ============================================ +-- +-- Suggested approach: +-- 1) Compute hourly buckets from raw logs using UTC time-bucketing, then UPSERT into ops_metrics_hourly. +-- 2) Compute daily buckets either directly from raw logs or by rolling up ops_metrics_hourly. +-- +-- Notes: +-- - Ensure the job uses a consistent timezone (recommended: SET TIME ZONE ''UTC'') to avoid bucket drift. +-- - Derive the provider/platform similarly to existing dashboard queries: +-- usage_logs: COALESCE(NULLIF(groups.platform, ''), accounts.platform, '') +-- ops_error_logs: COALESCE(NULLIF(ops_error_logs.platform, ''), groups.platform, accounts.platform, '') +-- - Keep request_count/success_count/error_count as the authoritative values; compute error_rate from counts. +-- +-- Example (hourly) shape (pseudo-SQL): +-- INSERT INTO ops_metrics_hourly (...) +-- SELECT date_trunc('hour', created_at) AS bucket_start, platform, ... +-- FROM (/* aggregate usage_logs + ops_error_logs */) s +-- ON CONFLICT (bucket_start, platform) DO UPDATE SET ...; diff --git a/backend/migrations/027_usage_billing_consistency.sql b/backend/migrations/027_usage_billing_consistency.sql new file mode 100644 index 00000000..eba68512 --- /dev/null +++ b/backend/migrations/027_usage_billing_consistency.sql @@ -0,0 +1,58 @@ +-- 027_usage_billing_consistency.sql +-- Ensure usage_logs idempotency (request_id, api_key_id) and add reconciliation infrastructure. + +-- ----------------------------------------------------------------------------- +-- 1) Normalize legacy request_id values +-- ----------------------------------------------------------------------------- +-- Historically request_id may be inserted as empty string. Convert it to NULL so +-- the upcoming unique index does not break on repeated "" values. +UPDATE usage_logs +SET request_id = NULL +WHERE request_id = ''; + +-- If duplicates already exist for the same (request_id, api_key_id), keep the +-- first row and NULL-out request_id for the rest so the unique index can be +-- created without deleting historical logs. +WITH ranked AS ( + SELECT + id, + ROW_NUMBER() OVER (PARTITION BY api_key_id, request_id ORDER BY id) AS rn + FROM usage_logs + WHERE request_id IS NOT NULL +) +UPDATE usage_logs ul +SET request_id = NULL +FROM ranked r +WHERE ul.id = r.id + AND r.rn > 1; + +-- ----------------------------------------------------------------------------- +-- 2) Idempotency constraint for usage_logs +-- ----------------------------------------------------------------------------- +CREATE UNIQUE INDEX IF NOT EXISTS idx_usage_logs_request_id_api_key_unique + ON usage_logs (request_id, api_key_id); + +-- ----------------------------------------------------------------------------- +-- 3) Reconciliation infrastructure: billing ledger for usage charges +-- ----------------------------------------------------------------------------- +CREATE TABLE IF NOT EXISTS billing_usage_entries ( + id BIGSERIAL PRIMARY KEY, + usage_log_id BIGINT NOT NULL REFERENCES usage_logs(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + api_key_id BIGINT NOT NULL REFERENCES api_keys(id) ON DELETE CASCADE, + subscription_id BIGINT REFERENCES user_subscriptions(id) ON DELETE SET NULL, + billing_type SMALLINT NOT NULL, + applied BOOLEAN NOT NULL DEFAULT TRUE, + delta_usd DECIMAL(20, 10) NOT NULL DEFAULT 0, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS billing_usage_entries_usage_log_id_unique + ON billing_usage_entries (usage_log_id); + +CREATE INDEX IF NOT EXISTS idx_billing_usage_entries_user_time + ON billing_usage_entries (user_id, created_at); + +CREATE INDEX IF NOT EXISTS idx_billing_usage_entries_created_at + ON billing_usage_entries (created_at); + diff --git a/backend/migrations/028_add_account_notes.sql b/backend/migrations/028_add_account_notes.sql new file mode 100644 index 00000000..0715ec01 --- /dev/null +++ b/backend/migrations/028_add_account_notes.sql @@ -0,0 +1,7 @@ +-- 028_add_account_notes.sql +-- Add optional admin notes for accounts. + +ALTER TABLE accounts +ADD COLUMN IF NOT EXISTS notes TEXT; + +COMMENT ON COLUMN accounts.notes IS 'Admin-only notes for account'; diff --git a/backend/migrations/028_add_usage_logs_user_agent.sql b/backend/migrations/028_add_usage_logs_user_agent.sql new file mode 100644 index 00000000..e7e1a581 --- /dev/null +++ b/backend/migrations/028_add_usage_logs_user_agent.sql @@ -0,0 +1,10 @@ +-- Add user_agent column to usage_logs table +-- Records the User-Agent header from API requests for analytics and debugging + +ALTER TABLE usage_logs + ADD COLUMN IF NOT EXISTS user_agent VARCHAR(512); + +-- Optional: Add index for user_agent queries (uncomment if needed for analytics) +-- CREATE INDEX IF NOT EXISTS idx_usage_logs_user_agent ON usage_logs(user_agent); + +COMMENT ON COLUMN usage_logs.user_agent IS 'User-Agent header from the API request'; diff --git a/backend/migrations/028_group_image_pricing.sql b/backend/migrations/028_group_image_pricing.sql new file mode 100644 index 00000000..19961d1c --- /dev/null +++ b/backend/migrations/028_group_image_pricing.sql @@ -0,0 +1,10 @@ +-- 为 Antigravity 分组添加图片生成计费配置 +-- 支持 gemini-3-pro-image 模型的 1K/2K/4K 分辨率按次计费 + +ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_1k DECIMAL(20,8); +ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_2k DECIMAL(20,8); +ALTER TABLE groups ADD COLUMN IF NOT EXISTS image_price_4k DECIMAL(20,8); + +COMMENT ON COLUMN groups.image_price_1k IS '1K 分辨率图片生成单价 (USD),仅 antigravity 平台使用'; +COMMENT ON COLUMN groups.image_price_2k IS '2K 分辨率图片生成单价 (USD),仅 antigravity 平台使用'; +COMMENT ON COLUMN groups.image_price_4k IS '4K 分辨率图片生成单价 (USD),仅 antigravity 平台使用'; diff --git a/backend/migrations/029_add_group_claude_code_restriction.sql b/backend/migrations/029_add_group_claude_code_restriction.sql new file mode 100644 index 00000000..6185704d --- /dev/null +++ b/backend/migrations/029_add_group_claude_code_restriction.sql @@ -0,0 +1,21 @@ +-- 029_add_group_claude_code_restriction.sql +-- 添加分组级别的 Claude Code 客户端限制功能 + +-- 添加 claude_code_only 字段:是否仅允许 Claude Code 客户端 +ALTER TABLE groups +ADD COLUMN IF NOT EXISTS claude_code_only BOOLEAN NOT NULL DEFAULT FALSE; + +-- 添加 fallback_group_id 字段:非 Claude Code 请求降级到的分组 +ALTER TABLE groups +ADD COLUMN IF NOT EXISTS fallback_group_id BIGINT REFERENCES groups(id) ON DELETE SET NULL; + +-- 添加索引优化查询 +CREATE INDEX IF NOT EXISTS idx_groups_claude_code_only +ON groups(claude_code_only) WHERE deleted_at IS NULL; + +CREATE INDEX IF NOT EXISTS idx_groups_fallback_group_id +ON groups(fallback_group_id) WHERE deleted_at IS NULL AND fallback_group_id IS NOT NULL; + +-- 添加字段注释 +COMMENT ON COLUMN groups.claude_code_only IS '是否仅允许 Claude Code 客户端访问此分组'; +COMMENT ON COLUMN groups.fallback_group_id IS '非 Claude Code 请求降级使用的分组 ID'; diff --git a/backend/migrations/029_usage_log_image_fields.sql b/backend/migrations/029_usage_log_image_fields.sql new file mode 100644 index 00000000..16304d24 --- /dev/null +++ b/backend/migrations/029_usage_log_image_fields.sql @@ -0,0 +1,5 @@ +-- 为使用日志添加图片生成统计字段 +-- 用于记录 gemini-3-pro-image 等图片生成模型的使用情况 + +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS image_count INT DEFAULT 0; +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS image_size VARCHAR(10); diff --git a/backend/migrations/030_add_account_expires_at.sql b/backend/migrations/030_add_account_expires_at.sql new file mode 100644 index 00000000..905220e9 --- /dev/null +++ b/backend/migrations/030_add_account_expires_at.sql @@ -0,0 +1,10 @@ +-- Add expires_at for account expiration configuration +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS expires_at timestamptz; +-- Document expires_at meaning +COMMENT ON COLUMN accounts.expires_at IS 'Account expiration time (NULL means no expiration).'; +-- Add auto_pause_on_expired for account expiration scheduling control +ALTER TABLE accounts ADD COLUMN IF NOT EXISTS auto_pause_on_expired boolean NOT NULL DEFAULT true; +-- Document auto_pause_on_expired meaning +COMMENT ON COLUMN accounts.auto_pause_on_expired IS 'Auto pause scheduling when account expires.'; +-- Ensure existing accounts are enabled by default +UPDATE accounts SET auto_pause_on_expired = true; diff --git a/backend/migrations/031_add_ip_address.sql b/backend/migrations/031_add_ip_address.sql new file mode 100644 index 00000000..7f557830 --- /dev/null +++ b/backend/migrations/031_add_ip_address.sql @@ -0,0 +1,5 @@ +-- Add IP address field to usage_logs table for request tracking (admin-only visibility) +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS ip_address VARCHAR(45); + +-- Create index for IP address queries +CREATE INDEX IF NOT EXISTS idx_usage_logs_ip_address ON usage_logs(ip_address); diff --git a/backend/migrations/032_add_api_key_ip_restriction.sql b/backend/migrations/032_add_api_key_ip_restriction.sql new file mode 100644 index 00000000..2dfe2c92 --- /dev/null +++ b/backend/migrations/032_add_api_key_ip_restriction.sql @@ -0,0 +1,9 @@ +-- Add IP restriction fields to api_keys table +-- ip_whitelist: JSON array of allowed IPs/CIDRs (if set, only these IPs can use the key) +-- ip_blacklist: JSON array of blocked IPs/CIDRs (these IPs are always blocked) + +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_whitelist JSONB DEFAULT NULL; +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_blacklist JSONB DEFAULT NULL; + +COMMENT ON COLUMN api_keys.ip_whitelist IS 'JSON array of allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]'; +COMMENT ON COLUMN api_keys.ip_blacklist IS 'JSON array of blocked IPs/CIDRs, e.g. ["1.2.3.4", "5.6.0.0/16"]'; diff --git a/backend/migrations/033_add_promo_codes.sql b/backend/migrations/033_add_promo_codes.sql new file mode 100644 index 00000000..7f6ae9a0 --- /dev/null +++ b/backend/migrations/033_add_promo_codes.sql @@ -0,0 +1,34 @@ +-- 创建注册优惠码表 +CREATE TABLE IF NOT EXISTS promo_codes ( + id BIGSERIAL PRIMARY KEY, + code VARCHAR(32) NOT NULL UNIQUE, + bonus_amount DECIMAL(20,8) NOT NULL DEFAULT 0, + max_uses INT NOT NULL DEFAULT 0, + used_count INT NOT NULL DEFAULT 0, + status VARCHAR(20) NOT NULL DEFAULT 'active', + expires_at TIMESTAMPTZ DEFAULT NULL, + notes TEXT DEFAULT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- 创建优惠码使用记录表 +CREATE TABLE IF NOT EXISTS promo_code_usages ( + id BIGSERIAL PRIMARY KEY, + promo_code_id BIGINT NOT NULL REFERENCES promo_codes(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + bonus_amount DECIMAL(20,8) NOT NULL, + used_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(promo_code_id, user_id) +); + +-- 索引 +CREATE INDEX IF NOT EXISTS idx_promo_codes_status ON promo_codes(status); +CREATE INDEX IF NOT EXISTS idx_promo_codes_expires_at ON promo_codes(expires_at); +CREATE INDEX IF NOT EXISTS idx_promo_code_usages_promo_code_id ON promo_code_usages(promo_code_id); +CREATE INDEX IF NOT EXISTS idx_promo_code_usages_user_id ON promo_code_usages(user_id); + +COMMENT ON TABLE promo_codes IS '注册优惠码'; +COMMENT ON TABLE promo_code_usages IS '优惠码使用记录'; +COMMENT ON COLUMN promo_codes.max_uses IS '最大使用次数,0表示无限制'; +COMMENT ON COLUMN promo_codes.status IS '状态: active, disabled'; diff --git a/backend/migrations/033_ops_monitoring_vnext.sql b/backend/migrations/033_ops_monitoring_vnext.sql new file mode 100644 index 00000000..a18c061d --- /dev/null +++ b/backend/migrations/033_ops_monitoring_vnext.sql @@ -0,0 +1,717 @@ +-- Ops Monitoring (vNext): squashed migration (030) +-- +-- This repository originally planned Ops vNext as migrations 030-036: +-- 030 drop legacy ops tables +-- 031 core schema +-- 032 pre-aggregation tables +-- 033 indexes + optional extensions +-- 034 add avg/max to preagg +-- 035 add notify_email to alert rules +-- 036 seed default alert rules +-- +-- Since these migrations have NOT been applied to any environment yet, we squash them +-- into a single 030 migration for easier review and a cleaner migration history. +-- +-- Notes: +-- - This is intentionally destructive for ops_* data (error logs / metrics / alerts). +-- - It is idempotent (DROP/CREATE/ALTER IF EXISTS/IF NOT EXISTS), but will wipe ops_* data if re-run. + +-- ===================================================================== +-- 030_ops_drop_legacy_ops_tables.sql +-- ===================================================================== + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- Legacy pre-aggregation tables (from 026 and/or previous branches) +DROP TABLE IF EXISTS ops_metrics_daily CASCADE; +DROP TABLE IF EXISTS ops_metrics_hourly CASCADE; + +-- Core ops tables that may exist in some deployments / branches +DROP TABLE IF EXISTS ops_system_metrics CASCADE; +DROP TABLE IF EXISTS ops_error_logs CASCADE; +DROP TABLE IF EXISTS ops_alert_events CASCADE; +DROP TABLE IF EXISTS ops_alert_rules CASCADE; +DROP TABLE IF EXISTS ops_job_heartbeats CASCADE; +DROP TABLE IF EXISTS ops_retry_attempts CASCADE; + +-- Optional legacy tables (best-effort cleanup) +DROP TABLE IF EXISTS ops_scheduled_reports CASCADE; +DROP TABLE IF EXISTS ops_group_availability_configs CASCADE; +DROP TABLE IF EXISTS ops_group_availability_events CASCADE; + +-- Optional legacy views/indexes +DROP VIEW IF EXISTS ops_latest_metrics CASCADE; + +-- ===================================================================== +-- 031_ops_core_schema.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): core schema (errors / retries / metrics / jobs / alerts) +-- +-- Design goals: +-- - Support global filtering (time/platform/group) across all ops modules. +-- - Persist enough context for two retry modes (client retry / pinned upstream retry). +-- - Make ops background jobs observable via job heartbeats. +-- - Keep schema stable and indexes targeted (high-write tables). +-- +-- Notes: +-- - This migration is idempotent. +-- - ops_* tables intentionally avoid strict foreign keys to reduce write amplification/locks. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_error_logs: error log details (high-write) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_error_logs ( + id BIGSERIAL PRIMARY KEY, + + -- Correlation / identities + request_id VARCHAR(64), + client_request_id VARCHAR(64), + user_id BIGINT, + api_key_id BIGINT, + account_id BIGINT, + group_id BIGINT, + client_ip inet, + + -- Dimensions for global filtering + platform VARCHAR(32), + + -- Request metadata + model VARCHAR(100), + request_path VARCHAR(256), + stream BOOLEAN NOT NULL DEFAULT false, + user_agent TEXT, + + -- Core error classification + error_phase VARCHAR(32) NOT NULL, + error_type VARCHAR(64) NOT NULL, + severity VARCHAR(8) NOT NULL DEFAULT 'P2', + status_code INT, + + -- vNext metric semantics + is_business_limited BOOLEAN NOT NULL DEFAULT false, + + -- Error details (sanitized/truncated at ingest time) + error_message TEXT, + error_body TEXT, + + -- Provider/upstream details (optional; useful for trends & account health) + error_source VARCHAR(64), + error_owner VARCHAR(32), + account_status VARCHAR(50), + upstream_status_code INT, + upstream_error_message TEXT, + upstream_error_detail TEXT, + provider_error_code VARCHAR(64), + provider_error_type VARCHAR(64), + network_error_type VARCHAR(50), + retry_after_seconds INT, + + -- Timings (ms) - optional + duration_ms INT, + time_to_first_token_ms BIGINT, + auth_latency_ms BIGINT, + routing_latency_ms BIGINT, + upstream_latency_ms BIGINT, + response_latency_ms BIGINT, + + -- Retry context (only stored for error requests) + request_body JSONB, + request_headers JSONB, + request_body_truncated BOOLEAN NOT NULL DEFAULT false, + request_body_bytes INT, + + -- Retryability flags (best-effort classification) + is_retryable BOOLEAN NOT NULL DEFAULT false, + retry_count INT NOT NULL DEFAULT 0, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +COMMENT ON TABLE ops_error_logs IS 'Ops error logs (vNext). Stores sanitized error details and request_body for retries (errors only).'; + +-- ============================================ +-- 2) ops_retry_attempts: audit log for retries +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_retry_attempts ( + id BIGSERIAL PRIMARY KEY, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + requested_by_user_id BIGINT, + source_error_id BIGINT, + + -- client|upstream + mode VARCHAR(16) NOT NULL, + pinned_account_id BIGINT, + + -- queued|running|succeeded|failed + status VARCHAR(16) NOT NULL DEFAULT 'queued', + started_at TIMESTAMPTZ, + finished_at TIMESTAMPTZ, + duration_ms BIGINT, + + -- Optional result correlation + result_request_id VARCHAR(64), + result_error_id BIGINT, + result_usage_request_id VARCHAR(64), + + error_message TEXT +); + +COMMENT ON TABLE ops_retry_attempts IS 'Audit table for ops retries (client retry / pinned upstream retry).'; + +-- ============================================ +-- 3) ops_system_metrics: system + request window snapshots +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_system_metrics ( + id BIGSERIAL PRIMARY KEY, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + window_minutes INT NOT NULL DEFAULT 1, + + -- Optional dimensions (only if collector chooses to write per-dimension snapshots) + platform VARCHAR(32), + group_id BIGINT, + + -- Core counts + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + -- Rates + qps DOUBLE PRECISION, + tps DOUBLE PRECISION, + + -- Duration percentiles (ms) - success requests + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + duration_avg_ms DOUBLE PRECISION, + duration_max_ms INT, + + -- TTFT percentiles (ms) - success requests (streaming) + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + ttft_avg_ms DOUBLE PRECISION, + ttft_max_ms INT, + + -- System resources + cpu_usage_percent DOUBLE PRECISION, + memory_used_mb BIGINT, + memory_total_mb BIGINT, + memory_usage_percent DOUBLE PRECISION, + + -- Dependency health (best-effort) + db_ok BOOLEAN, + redis_ok BOOLEAN, + + -- DB pool & runtime + db_conn_active INT, + db_conn_idle INT, + db_conn_waiting INT, + goroutine_count INT, + + -- Queue / concurrency + concurrency_queue_depth INT +); + +COMMENT ON TABLE ops_system_metrics IS 'Ops system/request metrics snapshots (vNext). Used for dashboard overview and realtime rates.'; + +-- ============================================ +-- 4) ops_job_heartbeats: background jobs health +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_job_heartbeats ( + job_name VARCHAR(64) PRIMARY KEY, + + last_run_at TIMESTAMPTZ, + last_success_at TIMESTAMPTZ, + last_error_at TIMESTAMPTZ, + last_error TEXT, + last_duration_ms BIGINT, + + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +COMMENT ON TABLE ops_job_heartbeats IS 'Ops background jobs heartbeats (vNext).'; + +-- ============================================ +-- 5) ops_alert_rules / ops_alert_events +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_alert_rules ( + id BIGSERIAL PRIMARY KEY, + + name VARCHAR(128) NOT NULL, + description TEXT, + enabled BOOLEAN NOT NULL DEFAULT true, + + severity VARCHAR(16) NOT NULL DEFAULT 'warning', + + -- Metric definition + -- Metric definition + metric_type VARCHAR(64) NOT NULL, + operator VARCHAR(8) NOT NULL, + threshold DOUBLE PRECISION NOT NULL, + + window_minutes INT NOT NULL DEFAULT 5, + sustained_minutes INT NOT NULL DEFAULT 5, + cooldown_minutes INT NOT NULL DEFAULT 10, + + -- Optional scoping: platform/group filters etc. + filters JSONB, + + last_triggered_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_alert_rules_name_unique + ON ops_alert_rules (name); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_enabled + ON ops_alert_rules (enabled); + +CREATE TABLE IF NOT EXISTS ops_alert_events ( + id BIGSERIAL PRIMARY KEY, + + rule_id BIGINT, + severity VARCHAR(16) NOT NULL, + status VARCHAR(16) NOT NULL DEFAULT 'firing', + + title VARCHAR(200), + description TEXT, + + metric_value DOUBLE PRECISION, + threshold_value DOUBLE PRECISION, + dimensions JSONB, + + fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + resolved_at TIMESTAMPTZ, + + email_sent BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_events_rule_status + ON ops_alert_events (rule_id, status); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_events_fired_at + ON ops_alert_events (fired_at DESC); + +-- ===================================================================== +-- 032_ops_preaggregation_tables.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): pre-aggregation tables +-- +-- Purpose: +-- - Provide stable query performance for 1–24h windows (and beyond), avoiding expensive +-- percentile_cont scans on raw logs for every dashboard refresh. +-- - Support global filter dimensions: overall / platform / group. +-- +-- Design note: +-- - We keep a single table with nullable platform/group_id, and enforce uniqueness via a +-- COALESCE-based unique index (because UNIQUE with NULLs allows duplicates in Postgres). + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_metrics_hourly +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_hourly ( + id BIGSERIAL PRIMARY KEY, + + bucket_start TIMESTAMPTZ NOT NULL, + platform VARCHAR(32), + group_id BIGINT, + + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + -- Duration percentiles (ms) + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + + -- TTFT percentiles (ms) + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Uniqueness across three “dimension modes” (overall / platform / group). +-- Postgres UNIQUE treats NULLs as distinct, so we enforce uniqueness via COALESCE. +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_hourly_unique_dim + ON ops_metrics_hourly ( + bucket_start, + COALESCE(platform, ''), + COALESCE(group_id, 0) + ); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_bucket + ON ops_metrics_hourly (bucket_start DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_platform_bucket + ON ops_metrics_hourly (platform, bucket_start DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_group_bucket + ON ops_metrics_hourly (group_id, bucket_start DESC) + WHERE group_id IS NOT NULL AND group_id <> 0; + +COMMENT ON TABLE ops_metrics_hourly IS 'vNext hourly pre-aggregated ops metrics (overall/platform/group).'; + +-- ============================================ +-- 2) ops_metrics_daily (optional; for longer windows) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_daily ( + id BIGSERIAL PRIMARY KEY, + + bucket_date DATE NOT NULL, + platform VARCHAR(32), + group_id BIGINT, + + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_daily_unique_dim + ON ops_metrics_daily ( + bucket_date, + COALESCE(platform, ''), + COALESCE(group_id, 0) + ); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_bucket + ON ops_metrics_daily (bucket_date DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_platform_bucket + ON ops_metrics_daily (platform, bucket_date DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_group_bucket + ON ops_metrics_daily (group_id, bucket_date DESC) + WHERE group_id IS NOT NULL AND group_id <> 0; + +COMMENT ON TABLE ops_metrics_daily IS 'vNext daily pre-aggregated ops metrics (overall/platform/group).'; + +-- ===================================================================== +-- 033_ops_indexes_and_extensions.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): indexes and optional extensions +-- +-- This migration intentionally keeps "optional" objects (like pg_trgm) best-effort, +-- so environments without extension privileges won't fail the whole migration chain. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) Core btree indexes (always safe) +-- ============================================ + +-- ops_error_logs +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_created_at + ON ops_error_logs (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_platform_time + ON ops_error_logs (platform, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_group_time + ON ops_error_logs (group_id, created_at DESC) + WHERE group_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_account_time + ON ops_error_logs (account_id, created_at DESC) + WHERE account_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_status_time + ON ops_error_logs (status_code, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase_time + ON ops_error_logs (error_phase, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_type_time + ON ops_error_logs (error_type, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id + ON ops_error_logs (request_id); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id + ON ops_error_logs (client_request_id); + +-- ops_system_metrics +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_created_at + ON ops_system_metrics (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_window_time + ON ops_system_metrics (window_minutes, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_platform_time + ON ops_system_metrics (platform, created_at DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_group_time + ON ops_system_metrics (group_id, created_at DESC) + WHERE group_id IS NOT NULL; + +-- ops_retry_attempts +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_created_at + ON ops_retry_attempts (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_source_error + ON ops_retry_attempts (source_error_id, created_at DESC) + WHERE source_error_id IS NOT NULL; + +-- Prevent concurrent retries for the same ops_error_logs row (race-free, multi-instance safe). +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_retry_attempts_unique_active + ON ops_retry_attempts (source_error_id) + WHERE source_error_id IS NOT NULL AND status IN ('queued', 'running'); + +-- ============================================ +-- 2) Optional: pg_trgm + trigram indexes for fuzzy search +-- ============================================ + +DO $$ +BEGIN + BEGIN + CREATE EXTENSION IF NOT EXISTS pg_trgm; + EXCEPTION WHEN OTHERS THEN + -- Missing privileges or extension package should not block migrations. + RAISE NOTICE 'pg_trgm extension not created: %', SQLERRM; + END; + + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_trgm') THEN + -- request_id / client_request_id fuzzy search + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id_trgm + ON ops_error_logs USING gin (request_id gin_trgm_ops)'; + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id_trgm + ON ops_error_logs USING gin (client_request_id gin_trgm_ops)'; + + -- error_message fuzzy search + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_error_message_trgm + ON ops_error_logs USING gin (error_message gin_trgm_ops)'; + END IF; +END $$; + +-- ===================================================================== +-- 034_ops_preaggregation_add_avg_max.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): extend pre-aggregation tables with avg/max latency fields +-- +-- Why: +-- - The dashboard overview returns avg/max for duration/TTFT. +-- - Hourly/daily pre-aggregation tables originally stored only p50/p90/p95/p99, which makes +-- it impossible to answer avg/max in preagg mode without falling back to raw scans. +-- +-- This migration is idempotent and safe to run multiple times. +-- +-- NOTE: We keep the existing p50/p90/p95/p99 columns as-is; these are still used for +-- approximate long-window summaries. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- Hourly table +ALTER TABLE ops_metrics_hourly + ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS duration_max_ms INT, + ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS ttft_max_ms INT; + +-- Daily table +ALTER TABLE ops_metrics_daily + ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS duration_max_ms INT, + ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS ttft_max_ms INT; + +-- ===================================================================== +-- 035_ops_alert_rules_notify_email.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): alert rule notify settings +-- +-- Adds notify_email flag to ops_alert_rules to keep UI parity with the backup Ops dashboard. +-- Migration is idempotent. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +ALTER TABLE ops_alert_rules + ADD COLUMN IF NOT EXISTS notify_email BOOLEAN NOT NULL DEFAULT true; + +-- ===================================================================== +-- 036_ops_seed_default_alert_rules.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): seed default alert rules (idempotent) +-- +-- Goal: +-- - Provide "out of the box" alert rules so the Ops dashboard can immediately show alert events. +-- - Keep inserts idempotent via ON CONFLICT (name) DO NOTHING. +-- +-- Notes: +-- - Thresholds are intentionally conservative defaults and should be tuned per deployment. +-- - Metric semantics follow vNext: +-- - success_rate / error_rate are based on SLA-scope counts (exclude is_business_limited). +-- - upstream_error_rate excludes 429/529. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- 1) High error rate (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '错误率过高', + '当错误率超过 5% 且持续 5 分钟时触发告警', + true, 'error_rate', '>', 5.0, 5, 5, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 2) Low success rate (P0) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '成功率过低', + '当成功率低于 95% 且持续 5 分钟时触发告警(服务可用性下降)', + true, 'success_rate', '<', 95.0, 5, 5, 'P0', true, 15, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 3) P99 latency too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'P99延迟过高', + '当 P99 延迟超过 3000ms 且持续 10 分钟时触发告警', + true, 'p99_latency_ms', '>', 3000.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 4) P95 latency too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'P95延迟过高', + '当 P95 延迟超过 2000ms 且持续 10 分钟时触发告警', + true, 'p95_latency_ms', '>', 2000.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 5) CPU usage too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'CPU使用率过高', + '当 CPU 使用率超过 85% 且持续 10 分钟时触发告警', + true, 'cpu_usage_percent', '>', 85.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 6) Memory usage too high (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '内存使用率过高', + '当内存使用率超过 90% 且持续 10 分钟时触发告警(可能导致 OOM)', + true, 'memory_usage_percent', '>', 90.0, 5, 10, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 7) Concurrency queue buildup (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '并发队列积压', + '当并发队列深度超过 100 且持续 5 分钟时触发告警(系统处理能力不足)', + true, 'concurrency_queue_depth', '>', 100.0, 5, 5, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 8) Extremely high error rate (P0) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '错误率极高', + '当错误率超过 20% 且持续 1 分钟时触发告警(服务严重异常)', + true, 'error_rate', '>', 20.0, 1, 1, 'P0', true, 15, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- Ops Monitoring vNext: add Redis pool stats fields to system metrics snapshots. +-- This migration is intentionally idempotent. + +ALTER TABLE ops_system_metrics + ADD COLUMN IF NOT EXISTS redis_conn_total INT, + ADD COLUMN IF NOT EXISTS redis_conn_idle INT; + +COMMENT ON COLUMN ops_system_metrics.redis_conn_total IS 'Redis pool total connections (go-redis PoolStats.TotalConns).'; +COMMENT ON COLUMN ops_system_metrics.redis_conn_idle IS 'Redis pool idle connections (go-redis PoolStats.IdleConns).'; diff --git a/backend/migrations/034_ops_upstream_error_events.sql b/backend/migrations/034_ops_upstream_error_events.sql new file mode 100644 index 00000000..f8bfa5e2 --- /dev/null +++ b/backend/migrations/034_ops_upstream_error_events.sql @@ -0,0 +1,9 @@ +-- Add upstream error events list (JSONB) to ops_error_logs for per-request correlation. +-- +-- This is intentionally idempotent. + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS upstream_errors JSONB; + +COMMENT ON COLUMN ops_error_logs.upstream_errors IS + 'Sanitized upstream error events list (JSON array), correlated per gateway request (request_id/client_request_id); used for per-request upstream debugging.'; diff --git a/backend/migrations/034_usage_dashboard_aggregation_tables.sql b/backend/migrations/034_usage_dashboard_aggregation_tables.sql new file mode 100644 index 00000000..64b383d4 --- /dev/null +++ b/backend/migrations/034_usage_dashboard_aggregation_tables.sql @@ -0,0 +1,77 @@ +-- Usage dashboard aggregation tables (hourly/daily) + active-user dedup + watermark. +-- These tables support Admin Dashboard statistics without full-table scans on usage_logs. + +-- Hourly aggregates (UTC buckets). +CREATE TABLE IF NOT EXISTS usage_dashboard_hourly ( + bucket_start TIMESTAMPTZ PRIMARY KEY, + total_requests BIGINT NOT NULL DEFAULT 0, + input_tokens BIGINT NOT NULL DEFAULT 0, + output_tokens BIGINT NOT NULL DEFAULT 0, + cache_creation_tokens BIGINT NOT NULL DEFAULT 0, + cache_read_tokens BIGINT NOT NULL DEFAULT 0, + total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + total_duration_ms BIGINT NOT NULL DEFAULT 0, + active_users BIGINT NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_bucket_start + ON usage_dashboard_hourly (bucket_start DESC); + +COMMENT ON TABLE usage_dashboard_hourly IS 'Pre-aggregated hourly usage metrics for admin dashboard (UTC buckets).'; +COMMENT ON COLUMN usage_dashboard_hourly.bucket_start IS 'UTC start timestamp of the hour bucket.'; +COMMENT ON COLUMN usage_dashboard_hourly.computed_at IS 'When the hourly row was last computed/refreshed.'; + +-- Daily aggregates (UTC dates). +CREATE TABLE IF NOT EXISTS usage_dashboard_daily ( + bucket_date DATE PRIMARY KEY, + total_requests BIGINT NOT NULL DEFAULT 0, + input_tokens BIGINT NOT NULL DEFAULT 0, + output_tokens BIGINT NOT NULL DEFAULT 0, + cache_creation_tokens BIGINT NOT NULL DEFAULT 0, + cache_read_tokens BIGINT NOT NULL DEFAULT 0, + total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + total_duration_ms BIGINT NOT NULL DEFAULT 0, + active_users BIGINT NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_bucket_date + ON usage_dashboard_daily (bucket_date DESC); + +COMMENT ON TABLE usage_dashboard_daily IS 'Pre-aggregated daily usage metrics for admin dashboard (UTC dates).'; +COMMENT ON COLUMN usage_dashboard_daily.bucket_date IS 'UTC date of the day bucket.'; +COMMENT ON COLUMN usage_dashboard_daily.computed_at IS 'When the daily row was last computed/refreshed.'; + +-- Hourly active user dedup table. +CREATE TABLE IF NOT EXISTS usage_dashboard_hourly_users ( + bucket_start TIMESTAMPTZ NOT NULL, + user_id BIGINT NOT NULL, + PRIMARY KEY (bucket_start, user_id) +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_users_bucket_start + ON usage_dashboard_hourly_users (bucket_start); + +-- Daily active user dedup table. +CREATE TABLE IF NOT EXISTS usage_dashboard_daily_users ( + bucket_date DATE NOT NULL, + user_id BIGINT NOT NULL, + PRIMARY KEY (bucket_date, user_id) +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_users_bucket_date + ON usage_dashboard_daily_users (bucket_date); + +-- Aggregation watermark table (single row). +CREATE TABLE IF NOT EXISTS usage_dashboard_aggregation_watermark ( + id INT PRIMARY KEY, + last_aggregated_at TIMESTAMPTZ NOT NULL DEFAULT TIMESTAMPTZ '1970-01-01 00:00:00+00', + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +INSERT INTO usage_dashboard_aggregation_watermark (id) +VALUES (1) +ON CONFLICT (id) DO NOTHING; diff --git a/backend/migrations/035_usage_logs_partitioning.sql b/backend/migrations/035_usage_logs_partitioning.sql new file mode 100644 index 00000000..e25a105e --- /dev/null +++ b/backend/migrations/035_usage_logs_partitioning.sql @@ -0,0 +1,54 @@ +-- usage_logs monthly partition bootstrap. +-- Only creates partitions when usage_logs is already partitioned. +-- Converting usage_logs to a partitioned table requires a manual migration plan. + +DO $$ +DECLARE + is_partitioned BOOLEAN := FALSE; + has_data BOOLEAN := FALSE; + month_start DATE; + prev_month DATE; + next_month DATE; +BEGIN + SELECT EXISTS( + SELECT 1 + FROM pg_partitioned_table pt + JOIN pg_class c ON c.oid = pt.partrelid + WHERE c.relname = 'usage_logs' + ) INTO is_partitioned; + + IF NOT is_partitioned THEN + SELECT EXISTS(SELECT 1 FROM usage_logs LIMIT 1) INTO has_data; + IF NOT has_data THEN + -- Automatic conversion is intentionally skipped; see manual migration plan. + RAISE NOTICE 'usage_logs is not partitioned; skip automatic partitioning'; + END IF; + END IF; + + IF is_partitioned THEN + month_start := date_trunc('month', now() AT TIME ZONE 'UTC')::date; + prev_month := (month_start - INTERVAL '1 month')::date; + next_month := (month_start + INTERVAL '1 month')::date; + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(prev_month, 'YYYYMM'), + prev_month, + month_start + ); + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(month_start, 'YYYYMM'), + month_start, + next_month + ); + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(next_month, 'YYYYMM'), + next_month, + (next_month + INTERVAL '1 month')::date + ); + END IF; +END $$; diff --git a/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql b/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql new file mode 100644 index 00000000..dedb1154 --- /dev/null +++ b/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql @@ -0,0 +1,16 @@ +-- Migration: 添加 is_count_tokens 字段到 ops_error_logs 表 +-- Purpose: 标记 count_tokens 请求的错误,以便在统计和告警中根据配置动态过滤 +-- Author: System +-- Date: 2026-01-12 + +-- Add is_count_tokens column to ops_error_logs table +ALTER TABLE ops_error_logs +ADD COLUMN is_count_tokens BOOLEAN NOT NULL DEFAULT FALSE; + +-- Add comment +COMMENT ON COLUMN ops_error_logs.is_count_tokens IS '是否为 count_tokens 请求的错误(用于统计过滤)'; + +-- Create index for filtering (optional, improves query performance) +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_is_count_tokens +ON ops_error_logs(is_count_tokens) +WHERE is_count_tokens = TRUE; diff --git a/backend/migrations/036_scheduler_outbox.sql b/backend/migrations/036_scheduler_outbox.sql new file mode 100644 index 00000000..a548841c --- /dev/null +++ b/backend/migrations/036_scheduler_outbox.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS scheduler_outbox ( + id BIGSERIAL PRIMARY KEY, + event_type TEXT NOT NULL, + account_id BIGINT NULL, + group_id BIGINT NULL, + payload JSONB NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_scheduler_outbox_created_at ON scheduler_outbox (created_at); diff --git a/backend/migrations/037_add_account_rate_multiplier.sql b/backend/migrations/037_add_account_rate_multiplier.sql new file mode 100644 index 00000000..06f5b090 --- /dev/null +++ b/backend/migrations/037_add_account_rate_multiplier.sql @@ -0,0 +1,14 @@ +-- Add account billing rate multiplier and per-usage snapshot. +-- +-- accounts.rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。 +-- usage_logs.account_rate_multiplier: 每条 usage log 的账号倍率快照,用于实现 +-- “倍率调整仅影响之后请求”,并支持同一天分段倍率加权统计。 +-- +-- 注意:usage_logs.account_rate_multiplier 不做回填、不设置 NOT NULL。 +-- 老数据为 NULL 时,统计口径按 1.0 处理(COALESCE)。 + +ALTER TABLE IF EXISTS accounts + ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10,4) NOT NULL DEFAULT 1.0; + +ALTER TABLE IF EXISTS usage_logs + ADD COLUMN IF NOT EXISTS account_rate_multiplier DECIMAL(10,4); diff --git a/backend/migrations/037_ops_alert_silences.sql b/backend/migrations/037_ops_alert_silences.sql new file mode 100644 index 00000000..95b61a09 --- /dev/null +++ b/backend/migrations/037_ops_alert_silences.sql @@ -0,0 +1,28 @@ +-- +goose Up +-- +goose StatementBegin +-- Ops alert silences: scoped (rule_id + platform + group_id + region) + +CREATE TABLE IF NOT EXISTS ops_alert_silences ( + id BIGSERIAL PRIMARY KEY, + + rule_id BIGINT NOT NULL, + platform VARCHAR(64) NOT NULL, + group_id BIGINT, + region VARCHAR(64), + + until TIMESTAMPTZ NOT NULL, + reason TEXT, + + created_by BIGINT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_silences_lookup + ON ops_alert_silences (rule_id, platform, group_id, region, until); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS ops_alert_silences; +-- +goose StatementEnd diff --git a/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql new file mode 100644 index 00000000..adaacf1c --- /dev/null +++ b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql @@ -0,0 +1,111 @@ +-- Add resolution tracking to ops_error_logs, persist retry results, and standardize error classification enums. +-- +-- This migration is intentionally idempotent. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_error_logs: resolution fields +-- ============================================ + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved BOOLEAN NOT NULL DEFAULT false; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_at TIMESTAMPTZ; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_by_user_id BIGINT; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_retry_id BIGINT; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_resolved_time + ON ops_error_logs (resolved, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_unresolved_time + ON ops_error_logs (created_at DESC) + WHERE resolved = false; + +-- ============================================ +-- 2) ops_retry_attempts: persist execution results +-- ============================================ + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS success BOOLEAN; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS http_status_code INT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS upstream_request_id VARCHAR(128); + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS used_account_id BIGINT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS response_preview TEXT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS response_truncated BOOLEAN NOT NULL DEFAULT false; + +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_success_time + ON ops_retry_attempts (success, created_at DESC); + +-- Backfill best-effort fields for existing rows. +UPDATE ops_retry_attempts +SET success = (LOWER(COALESCE(status, '')) = 'succeeded') +WHERE success IS NULL; + +UPDATE ops_retry_attempts +SET upstream_request_id = result_request_id +WHERE upstream_request_id IS NULL AND result_request_id IS NOT NULL; + +-- ============================================ +-- 3) Standardize classification enums in ops_error_logs +-- +-- New enums: +-- error_phase: request|auth|routing|upstream|network|internal +-- error_owner: client|provider|platform +-- error_source: client_request|upstream_http|gateway +-- ============================================ + +-- Owner: legacy sub2api => platform. +UPDATE ops_error_logs +SET error_owner = 'platform' +WHERE LOWER(COALESCE(error_owner, '')) = 'sub2api'; + +-- Owner: normalize empty/null to platform (best-effort). +UPDATE ops_error_logs +SET error_owner = 'platform' +WHERE COALESCE(TRIM(error_owner), '') = ''; + +-- Phase: map legacy phases. +UPDATE ops_error_logs +SET error_phase = CASE + WHEN COALESCE(TRIM(error_phase), '') = '' THEN 'internal' + WHEN LOWER(error_phase) IN ('billing', 'concurrency', 'response') THEN 'request' + WHEN LOWER(error_phase) IN ('scheduling') THEN 'routing' + WHEN LOWER(error_phase) IN ('request', 'auth', 'routing', 'upstream', 'network', 'internal') THEN LOWER(error_phase) + ELSE 'internal' +END; + +-- Source: map legacy sources. +UPDATE ops_error_logs +SET error_source = CASE + WHEN COALESCE(TRIM(error_source), '') = '' THEN 'gateway' + WHEN LOWER(error_source) IN ('billing', 'concurrency') THEN 'client_request' + WHEN LOWER(error_source) IN ('upstream_http') THEN 'upstream_http' + WHEN LOWER(error_source) IN ('upstream_network') THEN 'gateway' + WHEN LOWER(error_source) IN ('internal') THEN 'gateway' + WHEN LOWER(error_source) IN ('client_request', 'upstream_http', 'gateway') THEN LOWER(error_source) + ELSE 'gateway' +END; + +-- Auto-resolve recovered upstream errors (client status < 400). +UPDATE ops_error_logs +SET + resolved = true, + resolved_at = COALESCE(resolved_at, created_at) +WHERE resolved = false AND COALESCE(status_code, 0) > 0 AND COALESCE(status_code, 0) < 400; diff --git a/backend/migrations/README.md b/backend/migrations/README.md new file mode 100644 index 00000000..3fe328e6 --- /dev/null +++ b/backend/migrations/README.md @@ -0,0 +1,178 @@ +# Database Migrations + +## Overview + +This directory contains SQL migration files for database schema changes. The migration system uses SHA256 checksums to ensure migration immutability and consistency across environments. + +## Migration File Naming + +Format: `NNN_description.sql` +- `NNN`: Sequential number (e.g., 001, 002, 003) +- `description`: Brief description in snake_case + +Example: `017_add_gemini_tier_id.sql` + +## Migration File Structure + +```sql +-- +goose Up +-- +goose StatementBegin +-- Your forward migration SQL here +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- Your rollback migration SQL here +-- +goose StatementEnd +``` + +## Important Rules + +### ⚠️ Immutability Principle + +**Once a migration is applied to ANY environment (dev, staging, production), it MUST NOT be modified.** + +Why? +- Each migration has a SHA256 checksum stored in the `schema_migrations` table +- Modifying an applied migration causes checksum mismatch errors +- Different environments would have inconsistent database states +- Breaks audit trail and reproducibility + +### ✅ Correct Workflow + +1. **Create new migration** + ```bash + # Create new file with next sequential number + touch migrations/018_your_change.sql + ``` + +2. **Write Up and Down migrations** + - Up: Apply the change + - Down: Revert the change (should be symmetric with Up) + +3. **Test locally** + ```bash + # Apply migration + make migrate-up + + # Test rollback + make migrate-down + ``` + +4. **Commit and deploy** + ```bash + git add migrations/018_your_change.sql + git commit -m "feat(db): add your change" + ``` + +### ❌ What NOT to Do + +- ❌ Modify an already-applied migration file +- ❌ Delete migration files +- ❌ Change migration file names +- ❌ Reorder migration numbers + +### 🔧 If You Accidentally Modified an Applied Migration + +**Error message:** +``` +migration 017_add_gemini_tier_id.sql checksum mismatch (db=abc123... file=def456...) +``` + +**Solution:** +```bash +# 1. Find the original version +git log --oneline -- migrations/017_add_gemini_tier_id.sql + +# 2. Revert to the commit when it was first applied +git checkout -- migrations/017_add_gemini_tier_id.sql + +# 3. Create a NEW migration for your changes +touch migrations/018_your_new_change.sql +``` + +## Migration System Details + +- **Checksum Algorithm**: SHA256 of trimmed file content +- **Tracking Table**: `schema_migrations` (filename, checksum, applied_at) +- **Runner**: `internal/repository/migrations_runner.go` +- **Auto-run**: Migrations run automatically on service startup + +## Best Practices + +1. **Keep migrations small and focused** + - One logical change per migration + - Easier to review and rollback + +2. **Write reversible migrations** + - Always provide a working Down migration + - Test rollback before committing + +3. **Use transactions** + - Wrap DDL statements in transactions when possible + - Ensures atomicity + +4. **Add comments** + - Explain WHY the change is needed + - Document any special considerations + +5. **Test in development first** + - Apply migration locally + - Verify data integrity + - Test rollback + +## Example Migration + +```sql +-- +goose Up +-- +goose StatementBegin +-- Add tier_id field to Gemini OAuth accounts for quota tracking +UPDATE accounts +SET credentials = jsonb_set( + credentials, + '{tier_id}', + '"LEGACY"', + true +) +WHERE platform = 'gemini' + AND type = 'oauth' + AND credentials->>'tier_id' IS NULL; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +-- Remove tier_id field +UPDATE accounts +SET credentials = credentials - 'tier_id' +WHERE platform = 'gemini' + AND type = 'oauth' + AND credentials->>'tier_id' = 'LEGACY'; +-- +goose StatementEnd +``` + +## Troubleshooting + +### Checksum Mismatch +See "If You Accidentally Modified an Applied Migration" above. + +### Migration Failed +```bash +# Check migration status +psql -d sub2api -c "SELECT * FROM schema_migrations ORDER BY applied_at DESC;" + +# Manually rollback if needed (use with caution) +# Better to fix the migration and create a new one +``` + +### Need to Skip a Migration (Emergency Only) +```sql +-- DANGEROUS: Only use in development or with extreme caution +INSERT INTO schema_migrations (filename, checksum, applied_at) +VALUES ('NNN_migration.sql', 'calculated_checksum', NOW()); +``` + +## References + +- Migration runner: `internal/repository/migrations_runner.go` +- Goose syntax: https://github.com/pressly/goose +- PostgreSQL docs: https://www.postgresql.org/docs/ diff --git a/backend/migrations/migrations.go b/backend/migrations/migrations.go new file mode 100644 index 00000000..3cab7b03 --- /dev/null +++ b/backend/migrations/migrations.go @@ -0,0 +1,34 @@ +// Package migrations 包含嵌入的 SQL 数据库迁移文件。 +// +// 该包使用 Go 1.16+ 的 embed 功能将 SQL 文件嵌入到编译后的二进制文件中。 +// 这种方式的优点: +// - 部署时无需额外的迁移文件 +// - 迁移文件与代码版本一致 +// - 便于版本控制和代码审查 +package migrations + +import "embed" + +// FS 包含本目录下所有嵌入的 SQL 迁移文件。 +// +// 迁移命名规范: +// - 使用零填充的数字前缀确保正确的执行顺序 +// - 格式:NNN_description.sql(如 001_init.sql, 002_add_users.sql) +// - 描述部分使用下划线分隔的小写单词 +// +// 迁移文件要求: +// - 必须是幂等的(可重复执行而不产生错误) +// - 推荐使用 IF NOT EXISTS / IF EXISTS 语法 +// - 一旦应用,不应修改已有的迁移文件(通过 checksum 校验) +// +// 示例迁移文件: +// +// -- 001_init.sql +// CREATE TABLE IF NOT EXISTS users ( +// id BIGSERIAL PRIMARY KEY, +// email VARCHAR(255) NOT NULL UNIQUE, +// created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +// ); +// +//go:embed *.sql +var FS embed.FS diff --git a/backend/resources/model-pricing/README.md b/backend/resources/model-pricing/README.md new file mode 100644 index 00000000..d755de73 --- /dev/null +++ b/backend/resources/model-pricing/README.md @@ -0,0 +1,37 @@ +# Model Pricing Data + +This directory contains a local copy of the mirrored model pricing data as a fallback mechanism. + +## Source +The original file is maintained by the LiteLLM project and mirrored into the `price-mirror` branch of this repository via GitHub Actions: +- Mirror branch (configurable via `PRICE_MIRROR_REPO`): https://raw.githubusercontent.com//price-mirror/model_prices_and_context_window.json +- Upstream source: https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json + +## Purpose +This local copy serves as a fallback when the remote file cannot be downloaded due to: +- Network restrictions +- Firewall rules +- DNS resolution issues +- GitHub being blocked in certain regions +- Docker container network limitations + +## Update Process +The pricingService will: +1. First attempt to download the latest version from GitHub +2. If download fails, use this local copy as fallback +3. Log a warning when using the fallback file + +## Manual Update +To manually update this file with the latest pricing data (if automation is unavailable): +```bash +curl -s https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json -o model_prices_and_context_window.json +``` + +## File Format +The file contains JSON data with model pricing information including: +- Model names and identifiers +- Input/output token costs +- Context window sizes +- Model capabilities + +Last updated: 2025-08-10 diff --git a/backend/resources/model-pricing/model_prices_and_context_window.json b/backend/resources/model-pricing/model_prices_and_context_window.json new file mode 100644 index 00000000..ad2861df --- /dev/null +++ b/backend/resources/model-pricing/model_prices_and_context_window.json @@ -0,0 +1,31356 @@ +{ + "sample_spec": { + "code_interpreter_cost_per_session": 0.0, + "computer_use_input_cost_per_1k_tokens": 0.0, + "computer_use_output_cost_per_1k_tokens": 0.0, + "deprecation_date": "date when the model becomes deprecated in the format YYYY-MM-DD", + "file_search_cost_per_1k_calls": 0.0, + "file_search_cost_per_gb_per_day": 0.0, + "input_cost_per_audio_token": 0.0, + "input_cost_per_token": 0.0, + "litellm_provider": "one of https://docs.litellm.ai/docs/providers", + "max_input_tokens": "max input tokens, if the provider specifies it. if not default to max_tokens", + "max_output_tokens": "max output tokens, if the provider specifies it. if not default to max_tokens", + "max_tokens": "LEGACY parameter. set to max_output_tokens if provider specifies it. IF not set to max_input_tokens, if provider specifies it.", + "mode": "one of: chat, embedding, completion, image_generation, audio_transcription, audio_speech, image_generation, moderation, rerank, search", + "output_cost_per_reasoning_token": 0.0, + "output_cost_per_token": 0.0, + "search_context_cost_per_query": { + "search_context_size_high": 0.0, + "search_context_size_low": 0.0, + "search_context_size_medium": 0.0 + }, + "supported_regions": [ + "global", + "us-west-2", + "eu-west-1", + "ap-southeast-1", + "ap-northeast-1" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_vision": true, + "supports_web_search": true, + "vector_store_cost_per_gb_per_day": 0.0 + }, + "1024-x-1024/50-steps/bedrock/amazon.nova-canvas-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 2600, + "mode": "image_generation", + "output_cost_per_image": 0.06 + }, + "1024-x-1024/50-steps/stability.stable-diffusion-xl-v1": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.04 + }, + "1024-x-1024/dall-e-2": { + "input_cost_per_pixel": 1.9e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "1024-x-1024/max-steps/stability.stable-diffusion-xl-v1": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.08 + }, + "256-x-256/dall-e-2": { + "input_cost_per_pixel": 2.4414e-07, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "512-x-512/50-steps/stability.stable-diffusion-xl-v0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.018 + }, + "512-x-512/dall-e-2": { + "input_cost_per_pixel": 6.86e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "512-x-512/max-steps/stability.stable-diffusion-xl-v0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.036 + }, + "ai21.j2-mid-v1": { + "input_cost_per_token": 1.25e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 8191, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.25e-05 + }, + "ai21.j2-ultra-v1": { + "input_cost_per_token": 1.88e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 8191, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.88e-05 + }, + "ai21.jamba-1-5-large-v1:0": { + "input_cost_per_token": 2e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06 + }, + "ai21.jamba-1-5-mini-v1:0": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07 + }, + "ai21.jamba-instruct-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 70000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_system_messages": true + }, + "aiml/dall-e-2": { + "litellm_provider": "aiml", + "metadata": { + "notes": "DALL-E 2 via AI/ML API - Reliable text-to-image generation" + }, + "mode": "image_generation", + "output_cost_per_image": 0.021, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/dall-e-3": { + "litellm_provider": "aiml", + "metadata": { + "notes": "DALL-E 3 via AI/ML API - High-quality text-to-image generation" + }, + "mode": "image_generation", + "output_cost_per_image": 0.042, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux-pro": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Dev - Development version optimized for experimentation" + }, + "mode": "image_generation", + "output_cost_per_image": 0.053, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux-pro/v1.1": { + "litellm_provider": "aiml", + "mode": "image_generation", + "output_cost_per_image": 0.042, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux-pro/v1.1-ultra": { + "litellm_provider": "aiml", + "mode": "image_generation", + "output_cost_per_image": 0.063, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux-realism": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Pro - Professional-grade image generation model" + }, + "mode": "image_generation", + "output_cost_per_image": 0.037, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux/dev": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Dev - Development version optimized for experimentation" + }, + "mode": "image_generation", + "output_cost_per_image": 0.026, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux/kontext-max/text-to-image": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Pro v1.1 - Enhanced version with improved capabilities and 6x faster inference speed" + }, + "mode": "image_generation", + "output_cost_per_image": 0.084, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux/kontext-pro/text-to-image": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Pro v1.1 - Enhanced version with improved capabilities and 6x faster inference speed" + }, + "mode": "image_generation", + "output_cost_per_image": 0.042, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "aiml/flux/schnell": { + "litellm_provider": "aiml", + "metadata": { + "notes": "Flux Schnell - Fast generation model optimized for speed" + }, + "mode": "image_generation", + "output_cost_per_image": 0.003, + "source": "https://docs.aimlapi.com/", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "amazon.nova-canvas-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 2600, + "mode": "image_generation", + "output_cost_per_image": 0.06 + }, + "us.writer.palmyra-x4-v1:0": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_pdf_input": true + }, + "us.writer.palmyra-x5-v1:0": { + "input_cost_per_token": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_pdf_input": true + }, + "writer.palmyra-x4-v1:0": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_pdf_input": true + }, + "writer.palmyra-x5-v1:0": { + "input_cost_per_token": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_pdf_input": true + }, + "amazon.nova-lite-v1:0": { + "input_cost_per_token": 6e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 2.4e-07, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "amazon.nova-2-lite-v1:0": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, + "apac.amazon.nova-2-lite-v1:0": { + "cache_read_input_token_cost": 8.25e-08, + "input_cost_per_token": 3.3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, + "eu.amazon.nova-2-lite-v1:0": { + "cache_read_input_token_cost": 8.25e-08, + "input_cost_per_token": 3.3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, + "us.amazon.nova-2-lite-v1:0": { + "cache_read_input_token_cost": 8.25e-08, + "input_cost_per_token": 3.3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, + + "amazon.nova-micro-v1:0": { + "input_cost_per_token": 3.5e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.4e-07, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, + "amazon.nova-pro-v1:0": { + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.2e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "amazon.rerank-v1:0": { + "input_cost_per_query": 0.001, + "input_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "max_document_chunks_per_query": 100, + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_query_tokens": 32000, + "max_tokens": 32000, + "max_tokens_per_document_chunk": 512, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "amazon.titan-embed-image-v1": { + "input_cost_per_image": 6e-05, + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128, + "max_tokens": 128, + "metadata": { + "notes": "'supports_image_input' is a deprecated field. Use 'supports_embedding_image_input' instead." + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "source": "https://us-east-1.console.aws.amazon.com/bedrock/home?region=us-east-1#/providers?model=amazon.titan-image-generator-v1", + "supports_embedding_image_input": true, + "supports_image_input": true + }, + "amazon.titan-embed-text-v1": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536 + }, + "amazon.titan-embed-text-v2:0": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024 + }, + "amazon.titan-image-generator-v1": { + "input_cost_per_image": 0.0, + "output_cost_per_image": 0.008, + "output_cost_per_image_premium_image": 0.01, + "output_cost_per_image_above_512_and_512_pixels": 0.01, + "output_cost_per_image_above_512_and_512_pixels_and_premium_image": 0.012, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, + "amazon.titan-image-generator-v2": { + "input_cost_per_image": 0.0, + "output_cost_per_image": 0.008, + "output_cost_per_image_premium_image": 0.01, + "output_cost_per_image_above_1024_and_1024_pixels": 0.01, + "output_cost_per_image_above_1024_and_1024_pixels_and_premium_image": 0.012, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, + "amazon.titan-image-generator-v2:0": { + "input_cost_per_image": 0.0, + "output_cost_per_image": 0.008, + "output_cost_per_image_premium_image": 0.01, + "output_cost_per_image_above_1024_and_1024_pixels": 0.01, + "output_cost_per_image_above_1024_and_1024_pixels_and_premium_image": 0.012, + "litellm_provider": "bedrock", + "mode": "image_generation" + }, + "twelvelabs.marengo-embed-2-7-v1:0": { + "input_cost_per_token": 7e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "supports_embedding_image_input": true, + "supports_image_input": true + }, + "us.twelvelabs.marengo-embed-2-7-v1:0": { + "input_cost_per_token": 7e-05, + "input_cost_per_video_per_second": 0.0007, + "input_cost_per_audio_per_second": 0.00014, + "input_cost_per_image": 0.0001, + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "supports_embedding_image_input": true, + "supports_image_input": true + }, + "eu.twelvelabs.marengo-embed-2-7-v1:0": { + "input_cost_per_token": 7e-05, + "input_cost_per_video_per_second": 0.0007, + "input_cost_per_audio_per_second": 0.00014, + "input_cost_per_image": 0.0001, + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "supports_embedding_image_input": true, + "supports_image_input": true + }, + "twelvelabs.pegasus-1-2-v1:0": { + "input_cost_per_video_per_second": 0.00049, + "output_cost_per_token": 7.5e-06, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_video_input": true + }, + "us.twelvelabs.pegasus-1-2-v1:0": { + "input_cost_per_video_per_second": 0.00049, + "output_cost_per_token": 7.5e-06, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_video_input": true + }, + "eu.twelvelabs.pegasus-1-2-v1:0": { + "input_cost_per_video_per_second": 0.00049, + "output_cost_per_token": 7.5e-06, + "litellm_provider": "bedrock", + "mode": "chat", + "supports_video_input": true + }, + "amazon.titan-text-express-v1": { + "input_cost_per_token": 1.3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 8000, + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 1.7e-06 + }, + "amazon.titan-text-lite-v1": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 4000, + "max_tokens": 4000, + "mode": "chat", + "output_cost_per_token": 4e-07 + }, + "amazon.titan-text-premier-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.5e-06 + }, + "anthropic.claude-3-5-haiku-20241022-v1:0": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 8e-08, + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "anthropic.claude-haiku-4-5@20251001": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-5-sonnet-20241022-v2:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-7-sonnet-20240620-v1:0": { + "cache_creation_input_token_cost": 4.5e-06, + "cache_read_input_token_cost": 3.6e-07, + "input_cost_per_token": 3.6e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-7-sonnet-20250219-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-opus-20240229-v1:0": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-3-sonnet-20240229-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "anthropic.claude-instant-v1": { + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-06, + "supports_tool_choice": true + }, + "anthropic.claude-opus-4-1-20250805-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "anthropic.claude-opus-4-20250514-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "anthropic.claude-opus-4-5-20251101-v1:0": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "anthropic.claude-sonnet-4-20250514-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "anthropic.claude-v1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05 + }, + "anthropic.claude-v2:1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "anyscale/HuggingFaceH4/zephyr-7b-beta": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.5e-07 + }, + "anyscale/codellama/CodeLlama-34b-Instruct-hf": { + "input_cost_per_token": 1e-06, + "litellm_provider": "anyscale", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "anyscale/codellama/CodeLlama-70b-Instruct-hf": { + "input_cost_per_token": 1e-06, + "litellm_provider": "anyscale", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-06, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/codellama-CodeLlama-70b-Instruct-hf" + }, + "anyscale/google/gemma-7b-it": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/google-gemma-7b-it" + }, + "anyscale/meta-llama/Llama-2-13b-chat-hf": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.5e-07 + }, + "anyscale/meta-llama/Llama-2-70b-chat-hf": { + "input_cost_per_token": 1e-06, + "litellm_provider": "anyscale", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "anyscale/meta-llama/Llama-2-7b-chat-hf": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-07 + }, + "anyscale/meta-llama/Meta-Llama-3-70B-Instruct": { + "input_cost_per_token": 1e-06, + "litellm_provider": "anyscale", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1e-06, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/meta-llama-Meta-Llama-3-70B-Instruct" + }, + "anyscale/meta-llama/Meta-Llama-3-8B-Instruct": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/meta-llama-Meta-Llama-3-8B-Instruct" + }, + "anyscale/mistralai/Mistral-7B-Instruct-v0.1": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/mistralai-Mistral-7B-Instruct-v0.1", + "supports_function_calling": true + }, + "anyscale/mistralai/Mixtral-8x22B-Instruct-v0.1": { + "input_cost_per_token": 9e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/mistralai-Mixtral-8x22B-Instruct-v0.1", + "supports_function_calling": true + }, + "anyscale/mistralai/Mixtral-8x7B-Instruct-v0.1": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "anyscale", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://docs.anyscale.com/preview/endpoints/text-generation/supported-models/mistralai-Mixtral-8x7B-Instruct-v0.1", + "supports_function_calling": true + }, + "apac.amazon.nova-lite-v1:0": { + "input_cost_per_token": 6.3e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 2.52e-07, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "apac.amazon.nova-micro-v1:0": { + "input_cost_per_token": 3.7e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.48e-07, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, + "apac.amazon.nova-pro-v1:0": { + "input_cost_per_token": 8.4e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.36e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "apac.anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "apac.anthropic.claude-3-5-sonnet-20241022-v2:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "apac.anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "apac.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.375e-06, + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5.5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "apac.anthropic.claude-3-sonnet-20240229-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "apac.anthropic.claude-sonnet-4-20250514-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "assemblyai/best": { + "input_cost_per_second": 3.333e-05, + "litellm_provider": "assemblyai", + "mode": "audio_transcription", + "output_cost_per_second": 0.0 + }, + "assemblyai/nano": { + "input_cost_per_second": 0.00010278, + "litellm_provider": "assemblyai", + "mode": "audio_transcription", + "output_cost_per_second": 0.0 + }, + "au.anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 4.125e-06, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_token": 3.3e-06, + "input_cost_per_token_above_200k_tokens": 6.6e-06, + "output_cost_per_token_above_200k_tokens": 2.475e-05, + "cache_creation_input_token_cost_above_200k_tokens": 8.25e-06, + "cache_read_input_token_cost_above_200k_tokens": 6.6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "azure/ada": { + "input_cost_per_token": 1e-07, + "litellm_provider": "azure", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "azure/codex-mini": { + "cache_read_input_token_cost": 3.75e-07, + "input_cost_per_token": 1.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 6e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/command-r-plus": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true + }, + "azure_ai/claude-haiku-4-5": { + "input_cost_per_token": 1e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/claude-opus-4-1": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure_ai", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/claude-sonnet-4-5": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/computer-use-preview": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/container": { + "code_interpreter_cost_per_session": 0.03, + "litellm_provider": "azure", + "mode": "chat" + }, + "azure/eu/gpt-4o-2024-08-06": { + "deprecation_date": "2026-02-27", + "cache_read_input_token_cost": 1.375e-06, + "input_cost_per_token": 2.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-4o-2024-11-20": { + "deprecation_date": "2026-03-01", + "cache_creation_input_token_cost": 1.38e-06, + "input_cost_per_token": 2.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-4o-mini-2024-07-18": { + "cache_read_input_token_cost": 8.3e-08, + "input_cost_per_token": 1.65e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-4o-mini-realtime-preview-2024-12-17": { + "cache_creation_input_audio_token_cost": 3.3e-07, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_audio_token": 1.1e-05, + "input_cost_per_token": 6.6e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2.2e-05, + "output_cost_per_token": 2.64e-06, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-realtime-preview-2024-10-01": { + "cache_creation_input_audio_token_cost": 2.2e-05, + "cache_read_input_token_cost": 2.75e-06, + "input_cost_per_audio_token": 0.00011, + "input_cost_per_token": 5.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 0.00022, + "output_cost_per_token": 2.2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-4o-realtime-preview-2024-12-17": { + "cache_read_input_audio_token_cost": 2.5e-06, + "cache_read_input_token_cost": 2.75e-06, + "input_cost_per_audio_token": 4.4e-05, + "input_cost_per_token": 5.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2.2e-05, + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/eu/gpt-5-2025-08-07": { + "cache_read_input_token_cost": 1.375e-07, + "input_cost_per_token": 1.375e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5-mini-2025-08-07": { + "cache_read_input_token_cost": 2.75e-08, + "input_cost_per_token": 2.75e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.2e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5.1": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5.1-chat": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5.1-codex": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5.1-codex-mini": { + "cache_read_input_token_cost": 2.8e-08, + "input_cost_per_token": 2.75e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2.2e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/gpt-5-nano-2025-08-07": { + "cache_read_input_token_cost": 5.5e-09, + "input_cost_per_token": 5.5e-08, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4.4e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/o1-2024-12-17": { + "cache_read_input_token_cost": 8.25e-06, + "input_cost_per_token": 1.65e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6.6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/eu/o1-mini-2024-09-12": { + "cache_read_input_token_cost": 6.05e-07, + "input_cost_per_token": 1.21e-06, + "input_cost_per_token_batches": 6.05e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "output_cost_per_token_batches": 2.42e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_vision": false + }, + "azure/eu/o1-preview-2024-09-12": { + "cache_read_input_token_cost": 8.25e-06, + "input_cost_per_token": 1.65e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6.6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_vision": false + }, + "azure/eu/o3-mini-2025-01-31": { + "cache_read_input_token_cost": 6.05e-07, + "input_cost_per_token": 1.21e-06, + "input_cost_per_token_batches": 6.05e-07, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "output_cost_per_token_batches": 2.42e-06, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/global-standard/gpt-4o-2024-08-06": { + "cache_read_input_token_cost": 1.25e-06, + "deprecation_date": "2026-02-27", + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global-standard/gpt-4o-2024-11-20": { + "cache_read_input_token_cost": 1.25e-06, + "deprecation_date": "2026-03-01", + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global-standard/gpt-4o-mini": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-4o-2024-08-06": { + "deprecation_date": "2026-02-27", + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-4o-2024-11-20": { + "deprecation_date": "2026-03-01", + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-5.1": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-5.1-chat": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-5.1-codex": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/global/gpt-5.1-codex-mini": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-3.5-turbo": { + "input_cost_per_token": 5e-07, + "litellm_provider": "azure", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-3.5-turbo-0125": { + "deprecation_date": "2025-03-31", + "input_cost_per_token": 5e-07, + "litellm_provider": "azure", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-3.5-turbo-instruct-0914": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "azure_text", + "max_input_tokens": 4097, + "max_tokens": 4097, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "azure/gpt-35-turbo": { + "input_cost_per_token": 5e-07, + "litellm_provider": "azure", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-0125": { + "deprecation_date": "2025-05-31", + "input_cost_per_token": 5e-07, + "litellm_provider": "azure", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-0301": { + "deprecation_date": "2025-02-13", + "input_cost_per_token": 2e-07, + "litellm_provider": "azure", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4097, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-0613": { + "deprecation_date": "2025-02-13", + "input_cost_per_token": 1.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4097, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-1106": { + "deprecation_date": "2025-03-31", + "input_cost_per_token": 1e-06, + "litellm_provider": "azure", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-16k": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-16k-0613": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-35-turbo-instruct": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "azure_text", + "max_input_tokens": 4097, + "max_tokens": 4097, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "azure/gpt-35-turbo-instruct-0914": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "azure_text", + "max_input_tokens": 4097, + "max_tokens": 4097, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "azure/gpt-4": { + "input_cost_per_token": 3e-05, + "litellm_provider": "azure", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-4-0125-preview": { + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-4-0613": { + "input_cost_per_token": 3e-05, + "litellm_provider": "azure", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-4-1106-preview": { + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-4-32k": { + "input_cost_per_token": 6e-05, + "litellm_provider": "azure", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.00012, + "supports_tool_choice": true + }, + "azure/gpt-4-32k-0613": { + "input_cost_per_token": 6e-05, + "litellm_provider": "azure", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.00012, + "supports_tool_choice": true + }, + "azure/gpt-4-turbo": { + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "azure/gpt-4-turbo-2024-04-09": { + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4-turbo-vision-preview": { + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4.1": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/gpt-4.1-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/gpt-4.1-mini": { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "input_cost_per_token_batches": 2e-07, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "output_cost_per_token_batches": 8e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/gpt-4.1-mini-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "input_cost_per_token_batches": 2e-07, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "output_cost_per_token_batches": 8e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/gpt-4.1-nano": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "input_cost_per_token_batches": 5e-08, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_batches": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4.1-nano-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "input_cost_per_token_batches": 5e-08, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_batches": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4.5-preview": { + "cache_read_input_token_cost": 3.75e-05, + "input_cost_per_token": 7.5e-05, + "input_cost_per_token_batches": 3.75e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 0.00015, + "output_cost_per_token_batches": 7.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o": { + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o-2024-05-13": { + "input_cost_per_token": 5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o-2024-08-06": { + "deprecation_date": "2026-02-27", + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o-2024-11-20": { + "deprecation_date": "2026-03-01", + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-audio-2025-08-28": { + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/gpt-audio-mini-2025-10-06": { + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 6e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/gpt-4o-audio-preview-2024-12-17": { + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/gpt-4o-mini": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.65e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o-mini-2024-07-18": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.65e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-4o-mini-audio-preview-2024-12-17": { + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": false, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/gpt-4o-mini-realtime-preview-2024-12-17": { + "cache_creation_input_audio_token_cost": 3e-07, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 6e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-realtime-2025-08-28": { + "cache_creation_input_audio_token_cost": 4e-06, + "cache_read_input_token_cost": 4e-06, + "input_cost_per_audio_token": 3.2e-05, + "input_cost_per_image": 5e-06, + "input_cost_per_token": 4e-06, + "litellm_provider": "azure", + "max_input_tokens": 32000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 6.4e-05, + "output_cost_per_token": 1.6e-05, + "supported_endpoints": [ + "/v1/realtime" + ], + "supported_modalities": [ + "text", + "image", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-realtime-mini-2025-10-06": { + "cache_creation_input_audio_token_cost": 3e-07, + "cache_read_input_token_cost": 6e-08, + "input_cost_per_audio_token": 1e-05, + "input_cost_per_image": 8e-07, + "input_cost_per_token": 6e-07, + "litellm_provider": "azure", + "max_input_tokens": 32000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supported_endpoints": [ + "/v1/realtime" + ], + "supported_modalities": [ + "text", + "image", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-4o-mini-transcribe": { + "input_cost_per_audio_token": 3e-06, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 5e-06, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "azure/gpt-4o-mini-tts": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "mode": "audio_speech", + "output_cost_per_audio_token": 1.2e-05, + "output_cost_per_second": 0.00025, + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/speech" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "audio" + ] + }, + "azure/gpt-4o-realtime-preview-2024-10-01": { + "cache_creation_input_audio_token_cost": 2e-05, + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 0.0001, + "input_cost_per_token": 5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 0.0002, + "output_cost_per_token": 2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-4o-realtime-preview-2024-12-17": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2e-05, + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/gpt-4o-transcribe": { + "input_cost_per_audio_token": 6e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "azure/gpt-4o-transcribe-diarize": { + "input_cost_per_audio_token": 6e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "azure/gpt-5.1-2025-11-13": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "azure/gpt-5.1-chat-2025-11-13": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": false, + "supports_native_streaming": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "azure/gpt-5.1-codex-2025-11-13": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1-codex-mini-2025-11-13": { + "cache_read_input_token_cost": 2.5e-08, + "cache_read_input_token_cost_priority": 4.5e-08, + "input_cost_per_token": 2.5e-07, + "input_cost_per_token_priority": 4.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2e-06, + "output_cost_per_token_priority": 3.6e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-2025-08-07": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-chat": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "source": "https://azure.microsoft.com/en-us/blog/gpt-5-in-azure-ai-foundry-the-future-of-ai-apps-and-agents-starts-here/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "azure/gpt-5-chat-latest": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "azure/gpt-5-codex": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-mini": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-mini-2025-08-07": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-nano": { + "cache_read_input_token_cost": 5e-09, + "input_cost_per_token": 5e-08, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-nano-2025-08-07": { + "cache_read_input_token_cost": 5e-09, + "input_cost_per_token": 5e-08, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5-pro": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "mode": "responses", + "output_cost_per_token": 0.00012, + "source": "https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-models/concepts/models-sold-directly-by-azure?pivots=azure-openai&tabs=global-standard-aoai%2Cstandard-chat-completions%2Cglobal-standard#gpt-5", + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1-chat": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1-codex": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1-codex-max": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "azure", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.1-codex-mini": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.2": { + "cache_read_input_token_cost": 1.75e-07, + "input_cost_per_token": 1.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.2-2025-12-11": { + "cache_read_input_token_cost": 1.75e-07, + "cache_read_input_token_cost_priority": 3.5e-07, + "input_cost_per_token": 1.75e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "output_cost_per_token_priority": 2.8e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "azure/gpt-5.2-chat-2025-12-11": { + "cache_read_input_token_cost": 1.75e-07, + "cache_read_input_token_cost_priority": 3.5e-07, + "input_cost_per_token": 1.75e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "output_cost_per_token_priority": 2.8e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/gpt-5.2-pro": { + "input_cost_per_token": 2.1e-05, + "litellm_provider": "azure", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.68e-04, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "azure/gpt-5.2-pro-2025-12-11": { + "input_cost_per_token": 2.1e-05, + "litellm_provider": "azure", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.68e-04, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "azure/gpt-image-1": { + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/hd/1024-x-1024/dall-e-3": { + "input_cost_per_pixel": 7.629e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/hd/1024-x-1792/dall-e-3": { + "input_cost_per_pixel": 6.539e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/hd/1792-x-1024/dall-e-3": { + "input_cost_per_pixel": 6.539e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/high/1024-x-1024/gpt-image-1": { + "input_cost_per_pixel": 1.59263611e-07, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/high/1024-x-1536/gpt-image-1": { + "input_cost_per_pixel": 1.58945719e-07, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/high/1536-x-1024/gpt-image-1": { + "input_cost_per_pixel": 1.58945719e-07, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1024-x-1024/gpt-image-1": { + "input_cost_per_pixel": 1.0490417e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1024-x-1536/gpt-image-1": { + "input_cost_per_pixel": 1.0172526e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1536-x-1024/gpt-image-1": { + "input_cost_per_pixel": 1.0172526e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1024-x-1024/gpt-image-1": { + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1024-x-1536/gpt-image-1": { + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1536-x-1024/gpt-image-1": { + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/gpt-image-1-mini": { + "input_cost_per_pixel": 8.0566406e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1024-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 2.0751953125e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1024-x-1536/gpt-image-1-mini": { + "input_cost_per_pixel": 2.0751953125e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/low/1536-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 2.0345052083e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1024-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 8.056640625e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1024-x-1536/gpt-image-1-mini": { + "input_cost_per_pixel": 8.056640625e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/medium/1536-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 7.9752604167e-09, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/high/1024-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 3.173828125e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/high/1024-x-1536/gpt-image-1-mini": { + "input_cost_per_pixel": 3.173828125e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/high/1536-x-1024/gpt-image-1-mini": { + "input_cost_per_pixel": 3.1575520833e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure/mistral-large-2402": { + "input_cost_per_token": 8e-06, + "litellm_provider": "azure", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_function_calling": true + }, + "azure/mistral-large-latest": { + "input_cost_per_token": 8e-06, + "litellm_provider": "azure", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_function_calling": true + }, + "azure/o1": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o1-2024-12-17": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o1-mini": { + "cache_read_input_token_cost": 6.05e-07, + "input_cost_per_token": 1.21e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": false + }, + "azure/o1-mini-2024-09-12": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": false + }, + "azure/o1-preview": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": false + }, + "azure/o1-preview-2024-09-12": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": false + }, + "azure/o3": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o3-2025-04-16": { + "deprecation_date": "2026-04-16", + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o3-deep-research": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_token": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 4e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "azure/o3-mini": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/o3-mini-2025-01-31": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/o3-pro": { + "input_cost_per_token": 2e-05, + "input_cost_per_token_batches": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-05, + "output_cost_per_token_batches": 4e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o3-pro-2025-06-10": { + "input_cost_per_token": 2e-05, + "input_cost_per_token_batches": 1e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-05, + "output_cost_per_token_batches": 4e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o4-mini": { + "cache_read_input_token_cost": 2.75e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/o4-mini-2025-04-16": { + "cache_read_input_token_cost": 2.75e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/standard/1024-x-1024/dall-e-2": { + "input_cost_per_pixel": 0.0, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/standard/1024-x-1024/dall-e-3": { + "input_cost_per_pixel": 3.81469e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/standard/1024-x-1792/dall-e-3": { + "input_cost_per_pixel": 4.359e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/standard/1792-x-1024/dall-e-3": { + "input_cost_per_pixel": 4.359e-08, + "litellm_provider": "azure", + "mode": "image_generation", + "output_cost_per_token": 0.0 + }, + "azure/text-embedding-3-large": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "azure", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "azure/text-embedding-3-small": { + "deprecation_date": "2026-04-30", + "input_cost_per_token": 2e-08, + "litellm_provider": "azure", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "azure/text-embedding-ada-002": { + "input_cost_per_token": 1e-07, + "litellm_provider": "azure", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "azure/speech/azure-tts": { + "input_cost_per_character": 15e-06, + "litellm_provider": "azure", + "mode": "audio_speech", + "source": "https://azure.microsoft.com/en-us/pricing/calculator/" + }, + "azure/speech/azure-tts-hd": { + "input_cost_per_character": 30e-06, + "litellm_provider": "azure", + "mode": "audio_speech", + "source": "https://azure.microsoft.com/en-us/pricing/calculator/" + }, + "azure/tts-1": { + "input_cost_per_character": 1.5e-05, + "litellm_provider": "azure", + "mode": "audio_speech" + }, + "azure/tts-1-hd": { + "input_cost_per_character": 3e-05, + "litellm_provider": "azure", + "mode": "audio_speech" + }, + "azure/us/gpt-4.1-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 2.2e-06, + "input_cost_per_token_batches": 1.1e-06, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8.8e-06, + "output_cost_per_token_batches": 4.4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/us/gpt-4.1-mini-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 4.4e-07, + "input_cost_per_token_batches": 2.2e-07, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.76e-06, + "output_cost_per_token_batches": 8.8e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": false + }, + "azure/us/gpt-4.1-nano-2025-04-14": { + "deprecation_date": "2026-11-04", + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1.1e-07, + "input_cost_per_token_batches": 6e-08, + "litellm_provider": "azure", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4.4e-07, + "output_cost_per_token_batches": 2.2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-4o-2024-08-06": { + "deprecation_date": "2026-02-27", + "cache_read_input_token_cost": 1.375e-06, + "input_cost_per_token": 2.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-4o-2024-11-20": { + "deprecation_date": "2026-03-01", + "cache_creation_input_token_cost": 1.38e-06, + "input_cost_per_token": 2.75e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-4o-mini-2024-07-18": { + "cache_read_input_token_cost": 8.3e-08, + "input_cost_per_token": 1.65e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-4o-mini-realtime-preview-2024-12-17": { + "cache_creation_input_audio_token_cost": 3.3e-07, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_audio_token": 1.1e-05, + "input_cost_per_token": 6.6e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2.2e-05, + "output_cost_per_token": 2.64e-06, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-realtime-preview-2024-10-01": { + "cache_creation_input_audio_token_cost": 2.2e-05, + "cache_read_input_token_cost": 2.75e-06, + "input_cost_per_audio_token": 0.00011, + "input_cost_per_token": 5.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 0.00022, + "output_cost_per_token": 2.2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-4o-realtime-preview-2024-12-17": { + "cache_read_input_audio_token_cost": 2.5e-06, + "cache_read_input_token_cost": 2.75e-06, + "input_cost_per_audio_token": 4.4e-05, + "input_cost_per_token": 5.5e-06, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2.2e-05, + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "azure/us/gpt-5-2025-08-07": { + "cache_read_input_token_cost": 1.375e-07, + "input_cost_per_token": 1.375e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5-mini-2025-08-07": { + "cache_read_input_token_cost": 2.75e-08, + "input_cost_per_token": 2.75e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.2e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5-nano-2025-08-07": { + "cache_read_input_token_cost": 5.5e-09, + "input_cost_per_token": 5.5e-08, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4.4e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5.1": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5.1-chat": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5.1-codex": { + "cache_read_input_token_cost": 1.4e-07, + "input_cost_per_token": 1.38e-06, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/gpt-5.1-codex-mini": { + "cache_read_input_token_cost": 2.8e-08, + "input_cost_per_token": 2.75e-07, + "litellm_provider": "azure", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2.2e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/o1-2024-12-17": { + "cache_read_input_token_cost": 8.25e-06, + "input_cost_per_token": 1.65e-05, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6.6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/o1-mini-2024-09-12": { + "cache_read_input_token_cost": 6.05e-07, + "input_cost_per_token": 1.21e-06, + "input_cost_per_token_batches": 6.05e-07, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "output_cost_per_token_batches": 2.42e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_vision": false + }, + "azure/us/o1-preview-2024-09-12": { + "cache_read_input_token_cost": 8.25e-06, + "input_cost_per_token": 1.65e-05, + "litellm_provider": "azure", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6.6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_vision": false + }, + "azure/us/o3-2025-04-16": { + "deprecation_date": "2026-04-16", + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 2.2e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8.8e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/us/o3-mini-2025-01-31": { + "cache_read_input_token_cost": 6.05e-07, + "input_cost_per_token": 1.21e-06, + "input_cost_per_token_batches": 6.05e-07, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "output_cost_per_token_batches": 2.42e-06, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure/us/o4-mini-2025-04-16": { + "cache_read_input_token_cost": 3.1e-07, + "input_cost_per_token": 1.21e-06, + "litellm_provider": "azure", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.84e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure/whisper-1": { + "input_cost_per_second": 0.0001, + "litellm_provider": "azure", + "mode": "audio_transcription", + "output_cost_per_second": 0.0001 + }, + "azure_ai/Cohere-embed-v3-english": { + "input_cost_per_token": 1e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "source": "https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice", + "supports_embedding_image_input": true + }, + "azure_ai/Cohere-embed-v3-multilingual": { + "input_cost_per_token": 1e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024, + "source": "https://azuremarketplace.microsoft.com/en-us/marketplace/apps/cohere.cohere-embed-v3-english-offer?tab=PlansAndPrice", + "supports_embedding_image_input": true + }, + "azure_ai/FLUX-1.1-pro": { + "litellm_provider": "azure_ai", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/black-forest-labs-flux-1-kontext-pro-and-flux1-1-pro-now-available-in-azure-ai-f/4434659", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure_ai/FLUX.1-Kontext-pro": { + "litellm_provider": "azure_ai", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://azuremarketplace.microsoft.com/pt-br/marketplace/apps/cohere.cohere-embed-4-offer?tab=PlansAndPrice", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "azure_ai/Llama-3.2-11B-Vision-Instruct": { + "input_cost_per_token": 3.7e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 3.7e-07, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-11b-vision-instruct-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/Llama-3.2-90B-Vision-Instruct": { + "input_cost_per_token": 2.04e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 2.04e-06, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.meta-llama-3-2-90b-vision-instruct-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/Llama-3.3-70B-Instruct": { + "input_cost_per_token": 7.1e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 7.1e-07, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/metagenai.llama-3-3-70b-instruct-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/Llama-4-Maverick-17B-128E-Instruct-FP8": { + "input_cost_per_token": 1.41e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 1000000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 3.5e-07, + "source": "https://azure.microsoft.com/en-us/blog/introducing-the-llama-4-herd-in-azure-ai-foundry-and-azure-databricks/", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/Llama-4-Scout-17B-16E-Instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 10000000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 7.8e-07, + "source": "https://azure.microsoft.com/en-us/blog/introducing-the-llama-4-herd-in-azure-ai-foundry-and-azure-databricks/", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/Meta-Llama-3-70B-Instruct": { + "input_cost_per_token": 1.1e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 8192, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 3.7e-07, + "supports_tool_choice": true + }, + "azure_ai/Meta-Llama-3.1-405B-Instruct": { + "input_cost_per_token": 5.33e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 1.6e-05, + "source": "https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-405b-instruct-offer?tab=PlansAndPrice", + "supports_tool_choice": true + }, + "azure_ai/Meta-Llama-3.1-70B-Instruct": { + "input_cost_per_token": 2.68e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 3.54e-06, + "source": "https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-70b-instruct-offer?tab=PlansAndPrice", + "supports_tool_choice": true + }, + "azure_ai/Meta-Llama-3.1-8B-Instruct": { + "input_cost_per_token": 3e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 6.1e-07, + "source": "https://azuremarketplace.microsoft.com/en-us/marketplace/apps/metagenai.meta-llama-3-1-8b-instruct-offer?tab=PlansAndPrice", + "supports_tool_choice": true + }, + "azure_ai/Phi-3-medium-128k-instruct": { + "input_cost_per_token": 1.7e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6.8e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3-medium-4k-instruct": { + "input_cost_per_token": 1.7e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6.8e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3-mini-128k-instruct": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5.2e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3-mini-4k-instruct": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5.2e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3-small-128k-instruct": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3-small-8k-instruct": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3.5-MoE-instruct": { + "input_cost_per_token": 1.6e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6.4e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3.5-mini-instruct": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5.2e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-3.5-vision-instruct": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5.2e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/phi-3/", + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/Phi-4": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/affordable-innovation-unveiling-the-pricing-of-phi-3-slms-on-models-as-a-service/4156495", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "azure_ai/Phi-4-mini-instruct": { + "input_cost_per_token": 7.5e-08, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112", + "supports_function_calling": true + }, + "azure_ai/Phi-4-multimodal-instruct": { + "input_cost_per_audio_token": 4e-06, + "input_cost_per_token": 8e-08, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3.2e-07, + "source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_vision": true + }, + "azure_ai/Phi-4-mini-reasoning": { + "input_cost_per_token": 8e-08, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3.2e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-foundry-models/microsoft/", + "supports_function_calling": true + }, + "azure_ai/Phi-4-reasoning": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-foundry-models/microsoft/", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true + }, + "azure_ai/mistral-document-ai-2505": { + "litellm_provider": "azure_ai", + "ocr_cost_per_page": 3e-3, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://devblogs.microsoft.com/foundry/whats-new-in-azure-ai-foundry-august-2025/#mistral-document-ai-(ocr)-%E2%80%94-serverless-in-foundry" + }, + "azure_ai/doc-intelligence/prebuilt-read": { + "litellm_provider": "azure_ai", + "ocr_cost_per_page": 1.5e-3, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-document-intelligence/" + }, + "azure_ai/doc-intelligence/prebuilt-layout": { + "litellm_provider": "azure_ai", + "ocr_cost_per_page": 1e-2, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-document-intelligence/" + }, + "azure_ai/doc-intelligence/prebuilt-document": { + "litellm_provider": "azure_ai", + "ocr_cost_per_page": 1e-2, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-document-intelligence/" + }, + "azure_ai/MAI-DS-R1": { + "input_cost_per_token": 1.35e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5.4e-06, + "source": "https://azure.microsoft.com/en-us/pricing/details/ai-foundry-models/microsoft/", + "supports_reasoning": true, + "supports_tool_choice": true + }, + "azure_ai/cohere-rerank-v3-english": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "azure_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "azure_ai/cohere-rerank-v3-multilingual": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "azure_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "azure_ai/cohere-rerank-v3.5": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "azure_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "azure_ai/cohere-rerank-v4.0-pro": { + "input_cost_per_query": 0.0025, + "input_cost_per_token": 0.0, + "litellm_provider": "azure_ai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_query_tokens": 4096, + "max_tokens": 32768, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "azure_ai/cohere-rerank-v4.0-fast": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "azure_ai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_query_tokens": 4096, + "max_tokens": 32768, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "azure_ai/deepseek-v3.2": { + "input_cost_per_token": 5.8e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "azure_ai/deepseek-v3.2-speciale": { + "input_cost_per_token": 5.8e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "azure_ai/deepseek-r1": { + "input_cost_per_token": 1.35e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5.4e-06, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/deepseek-r1-improved-performance-higher-limits-and-transparent-pricing/4386367", + "supports_reasoning": true, + "supports_tool_choice": true + }, + "azure_ai/deepseek-v3": { + "input_cost_per_token": 1.14e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4.56e-06, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/announcing-deepseek-v3-on-azure-ai-foundry-and-github/4390438", + "supports_tool_choice": true + }, + "azure_ai/deepseek-v3-0324": { + "input_cost_per_token": 1.14e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4.56e-06, + "source": "https://techcommunity.microsoft.com/blog/machinelearningblog/announcing-deepseek-v3-on-azure-ai-foundry-and-github/4390438", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/embed-v-4-0": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 3072, + "source": "https://azuremarketplace.microsoft.com/pt-br/marketplace/apps/cohere.cohere-embed-4-offer?tab=PlansAndPrice", + "supported_endpoints": [ + "/v1/embeddings" + ], + "supported_modalities": [ + "text", + "image" + ], + "supports_embedding_image_input": true + }, + "azure_ai/global/grok-3": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://devblogs.microsoft.com/foundry/announcing-grok-3-and-grok-3-mini-on-azure-ai-foundry/", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/global/grok-3-mini": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.27e-06, + "source": "https://devblogs.microsoft.com/foundry/announcing-grok-3-and-grok-3-mini-on-azure-ai-foundry/", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-3": { + "input_cost_per_token": 3.3e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "source": "https://devblogs.microsoft.com/foundry/announcing-grok-3-and-grok-3-mini-on-azure-ai-foundry/", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-3-mini": { + "input_cost_per_token": 2.75e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.38e-06, + "source": "https://devblogs.microsoft.com/foundry/announcing-grok-3-and-grok-3-mini-on-azure-ai-foundry/", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-4": { + "input_cost_per_token": 5.5e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.75e-05, + "source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-4-fast-non-reasoning": { + "input_cost_per_token": 0.43e-06, + "output_cost_per_token": 1.73e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-4-fast-reasoning": { + "input_cost_per_token": 0.43e-06, + "output_cost_per_token": 1.73e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "source": "https://techcommunity.microsoft.com/blog/azure-ai-foundry-blog/announcing-the-grok-4-fast-models-from-xai-now-available-in-azure-ai-foundry/4456701", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/grok-code-fast-1": { + "input_cost_per_token": 3.5e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.75e-05, + "source": "https://azure.microsoft.com/en-us/blog/grok-4-is-now-available-in-azure-ai-foundry-unlock-frontier-intelligence-and-business-ready-capabilities/", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "azure_ai/jais-30b-chat": { + "input_cost_per_token": 0.0032, + "litellm_provider": "azure_ai", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.00971, + "source": "https://azure.microsoft.com/en-us/products/ai-services/ai-foundry/models/jais-30b-chat" + }, + "azure_ai/jamba-instruct": { + "input_cost_per_token": 5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 70000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_tool_choice": true + }, + "azure_ai/ministral-3b": { + "input_cost_per_token": 4e-08, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 4e-08, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.ministral-3b-2410-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-large": { + "input_cost_per_token": 4e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-large-2407": { + "input_cost_per_token": 2e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-large-latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-ai-large-2407-offer?tab=Overview", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-large-3": { + "input_cost_per_token": 5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 256000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://azure.microsoft.com/en-us/blog/introducing-mistral-large-3-in-microsoft-foundry-open-capable-and-ready-for-production-workloads/", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "azure_ai/mistral-medium-2505": { + "input_cost_per_token": 4e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-nemo": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "azure_ai", + "max_input_tokens": 131072, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://azuremarketplace.microsoft.com/en/marketplace/apps/000-000.mistral-nemo-12b-2407?tab=PlansAndPrice", + "supports_function_calling": true + }, + "azure_ai/mistral-small": { + "input_cost_per_token": 1e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "azure_ai/mistral-small-2503": { + "input_cost_per_token": 1e-06, + "litellm_provider": "azure_ai", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "babbage-002": { + "input_cost_per_token": 4e-07, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 16384, + "mode": "completion", + "output_cost_per_token": 4e-07 + }, + "bedrock/*/1-month-commitment/cohere.command-light-text-v14": { + "input_cost_per_second": 0.001902, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_second": 0.001902, + "supports_tool_choice": true + }, + "bedrock/*/1-month-commitment/cohere.command-text-v14": { + "input_cost_per_second": 0.011, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_second": 0.011, + "supports_tool_choice": true + }, + "bedrock/*/6-month-commitment/cohere.command-light-text-v14": { + "input_cost_per_second": 0.0011416, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_second": 0.0011416, + "supports_tool_choice": true + }, + "bedrock/*/6-month-commitment/cohere.command-text-v14": { + "input_cost_per_second": 0.0066027, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_second": 0.0066027, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/1-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.01475, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.01475, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/1-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.0455, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0455 + }, + "bedrock/ap-northeast-1/1-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.0455, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0455, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/6-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.008194, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.008194, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/6-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.02527, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.02527 + }, + "bedrock/ap-northeast-1/6-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.02527, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.02527, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/anthropic.claude-instant-v1": { + "input_cost_per_token": 2.23e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 7.55e-06, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/anthropic.claude-v1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/ap-northeast-1/anthropic.claude-v2:1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/ap-south-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 3.18e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4.2e-06 + }, + "bedrock/ap-south-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3.6e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.2e-07 + }, + "bedrock/ca-central-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 3.05e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4.03e-06 + }, + "bedrock/ca-central-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6.9e-07 + }, + "bedrock/eu-central-1/1-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.01635, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.01635, + "supports_tool_choice": true + }, + "bedrock/eu-central-1/1-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.0415, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0415 + }, + "bedrock/eu-central-1/1-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.0415, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0415, + "supports_tool_choice": true + }, + "bedrock/eu-central-1/6-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.009083, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.009083, + "supports_tool_choice": true + }, + "bedrock/eu-central-1/6-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.02305, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.02305 + }, + "bedrock/eu-central-1/6-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.02305, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.02305, + "supports_tool_choice": true + }, + "bedrock/eu-central-1/anthropic.claude-instant-v1": { + "input_cost_per_token": 2.48e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 8.38e-06, + "supports_tool_choice": true + }, + "bedrock/eu-central-1/anthropic.claude-v1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05 + }, + "bedrock/eu-central-1/anthropic.claude-v2:1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/eu-west-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.86e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.78e-06 + }, + "bedrock/eu-west-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3.2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6.5e-07 + }, + "bedrock/eu-west-2/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 3.45e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4.55e-06 + }, + "bedrock/eu-west-2/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3.9e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.8e-07 + }, + "bedrock/eu-west-3/mistral.mistral-7b-instruct-v0:2": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.6e-07, + "supports_tool_choice": true + }, + "bedrock/eu-west-3/mistral.mistral-large-2402-v1:0": { + "input_cost_per_token": 1.04e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3.12e-05, + "supports_function_calling": true + }, + "bedrock/eu-west-3/mistral.mixtral-8x7b-instruct-v0:1": { + "input_cost_per_token": 5.9e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 9.1e-07, + "supports_tool_choice": true + }, + "bedrock/invoke/anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Anthropic via Invoke route does not currently support pdf input." + }, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/sa-east-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 4.45e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5.88e-06 + }, + "bedrock/sa-east-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.01e-06 + }, + "bedrock/us-east-1/1-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.011, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.011, + "supports_tool_choice": true + }, + "bedrock/us-east-1/1-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.0175, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0175 + }, + "bedrock/us-east-1/1-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.0175, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0175, + "supports_tool_choice": true + }, + "bedrock/us-east-1/6-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.00611, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00611, + "supports_tool_choice": true + }, + "bedrock/us-east-1/6-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.00972, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00972 + }, + "bedrock/us-east-1/6-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.00972, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00972, + "supports_tool_choice": true + }, + "bedrock/us-east-1/anthropic.claude-instant-v1": { + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-06, + "supports_tool_choice": true + }, + "bedrock/us-east-1/anthropic.claude-v1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/us-east-1/anthropic.claude-v2:1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/us-east-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.65e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.5e-06 + }, + "bedrock/us-east-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "bedrock/us-east-1/mistral.mistral-7b-instruct-v0:2": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "bedrock/us-east-1/mistral.mistral-large-2402-v1:0": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_function_calling": true + }, + "bedrock/us-east-1/mistral.mixtral-8x7b-instruct-v0:1": { + "input_cost_per_token": 4.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_tool_choice": true + }, + "bedrock/us-gov-east-1/amazon.nova-pro-v1:0": { + "input_cost_per_token": 9.6e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.84e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "bedrock/us-gov-east-1/amazon.titan-embed-text-v1": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536 + }, + "bedrock/us-gov-east-1/amazon.titan-embed-text-v2:0": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024 + }, + "bedrock/us-gov-east-1/amazon.titan-text-express-v1": { + "input_cost_per_token": 1.3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 8000, + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 1.7e-06 + }, + "bedrock/us-gov-east-1/amazon.titan-text-lite-v1": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 4000, + "max_tokens": 4000, + "mode": "chat", + "output_cost_per_token": 4e-07 + }, + "bedrock/us-gov-east-1/amazon.titan-text-premier-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.5e-06 + }, + "bedrock/us-gov-east-1/anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3.6e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-east-1/anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-east-1/claude-sonnet-4-5-20250929-v1:0": { + "input_cost_per_token": 3.3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-east-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.65e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 3.5e-06, + "supports_pdf_input": true + }, + "bedrock/us-gov-east-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 2.65e-06, + "supports_pdf_input": true + }, + "bedrock/us-gov-west-1/amazon.nova-pro-v1:0": { + "input_cost_per_token": 9.6e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.84e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "bedrock/us-gov-west-1/amazon.titan-embed-text-v1": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536 + }, + "bedrock/us-gov-west-1/amazon.titan-embed-text-v2:0": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1024 + }, + "bedrock/us-gov-west-1/amazon.titan-text-express-v1": { + "input_cost_per_token": 1.3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 8000, + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 1.7e-06 + }, + "bedrock/us-gov-west-1/amazon.titan-text-lite-v1": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 4000, + "max_tokens": 4000, + "mode": "chat", + "output_cost_per_token": 4e-07 + }, + "bedrock/us-gov-west-1/amazon.titan-text-premier-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 42000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1.5e-06 + }, + "bedrock/us-gov-west-1/anthropic.claude-3-7-sonnet-20250219-v1:0": { + "cache_creation_input_token_cost": 4.5e-06, + "cache_read_input_token_cost": 3.6e-07, + "input_cost_per_token": 3.6e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-west-1/anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3.6e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-west-1/anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-west-1/claude-sonnet-4-5-20250929-v1:0": { + "input_cost_per_token": 3.3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "bedrock/us-gov-west-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.65e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 3.5e-06, + "supports_pdf_input": true + }, + "bedrock/us-gov-west-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8000, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 2.65e-06, + "supports_pdf_input": true + }, + "bedrock/us-west-1/meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.65e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.5e-06 + }, + "bedrock/us-west-1/meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "bedrock/us-west-2/1-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.011, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.011, + "supports_tool_choice": true + }, + "bedrock/us-west-2/1-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.0175, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0175 + }, + "bedrock/us-west-2/1-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.0175, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.0175, + "supports_tool_choice": true + }, + "bedrock/us-west-2/6-month-commitment/anthropic.claude-instant-v1": { + "input_cost_per_second": 0.00611, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00611, + "supports_tool_choice": true + }, + "bedrock/us-west-2/6-month-commitment/anthropic.claude-v1": { + "input_cost_per_second": 0.00972, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00972 + }, + "bedrock/us-west-2/6-month-commitment/anthropic.claude-v2:1": { + "input_cost_per_second": 0.00972, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_second": 0.00972, + "supports_tool_choice": true + }, + "bedrock/us-west-2/anthropic.claude-instant-v1": { + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-06, + "supports_tool_choice": true + }, + "bedrock/us-west-2/anthropic.claude-v1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/us-west-2/anthropic.claude-v2:1": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 100000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "bedrock/us-west-2/mistral.mistral-7b-instruct-v0:2": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "bedrock/us-west-2/mistral.mistral-large-2402-v1:0": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_function_calling": true + }, + "bedrock/us-west-2/mistral.mixtral-8x7b-instruct-v0:1": { + "input_cost_per_token": 4.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_tool_choice": true + }, + "bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 8e-08, + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "cerebras/llama-3.3-70b": { + "input_cost_per_token": 8.5e-07, + "litellm_provider": "cerebras", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "cerebras/llama3.1-70b": { + "input_cost_per_token": 6e-07, + "litellm_provider": "cerebras", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "cerebras/llama3.1-8b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cerebras", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "cerebras/gpt-oss-120b": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "cerebras", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6.9e-07, + "source": "https://www.cerebras.ai/blog/openai-gpt-oss-120b-runs-fastest-on-cerebras", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "cerebras/qwen-3-32b": { + "input_cost_per_token": 4e-07, + "litellm_provider": "cerebras", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 8e-07, + "source": "https://inference-docs.cerebras.ai/support/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "cerebras/zai-glm-4.6": { + "input_cost_per_token": 2.25e-06, + "litellm_provider": "cerebras", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "source": "https://www.cerebras.ai/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "chat-bison": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-chat-models", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "chat-bison-32k": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-chat-models", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "chat-bison-32k@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-chat-models", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "chat-bison@001": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-chat-models", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "chat-bison@002": { + "deprecation_date": "2025-04-09", + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-chat-models", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "chatdolphin": { + "input_cost_per_token": 5e-07, + "litellm_provider": "nlp_cloud", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 5e-07 + }, + "chatgpt-4o-latest": { + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4o-transcribe-diarize": { + "input_cost_per_audio_token": 6e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "claude-3-5-haiku-20241022": { + "cache_creation_input_token_cost": 1e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 8e-08, + "deprecation_date": "2025-10-01", + "input_cost_per_token": 8e-07, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 264 + }, + "claude-3-5-haiku-latest": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 1e-07, + "deprecation_date": "2025-10-01", + "input_cost_per_token": 1e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 264 + }, + "claude-haiku-4-5-20251001": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_creation_input_token_cost_above_1hr": 2e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_computer_use": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "claude-haiku-4-5": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_creation_input_token_cost_above_1hr": 2e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_computer_use": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "claude-3-5-sonnet-20240620": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2025-06-01", + "input_cost_per_token": 3e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-3-5-sonnet-20241022": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2025-10-01", + "input_cost_per_token": 3e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-3-5-sonnet-latest": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2025-06-01", + "input_cost_per_token": 3e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-3-7-sonnet-20250219": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2026-02-19", + "input_cost_per_token": 3e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-3-7-sonnet-latest": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2025-06-01", + "input_cost_per_token": 3e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-3-haiku-20240307": { + "cache_creation_input_token_cost": 3e-07, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 264 + }, + "claude-3-opus-20240229": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 1.5e-06, + "deprecation_date": "2026-05-01", + "input_cost_per_token": 1.5e-05, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 395 + }, + "claude-3-opus-latest": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 1.5e-06, + "deprecation_date": "2025-03-01", + "input_cost_per_token": 1.5e-05, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 395 + }, + "claude-4-opus-20250514": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-4-sonnet-20250514": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost": 3e-07, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-sonnet-4-5": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "claude-sonnet-4-5-20250929": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tool_use_system_prompt_tokens": 346 + }, + "claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-opus-4-1": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 3e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-opus-4-1-20250805": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 3e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "deprecation_date": "2026-08-05", + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-opus-4-20250514": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 3e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "deprecation_date": "2026-05-14", + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-opus-4-5-20251101": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_creation_input_token_cost_above_1hr": 1e-05, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-opus-4-5": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_creation_input_token_cost_above_1hr": 1e-05, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "anthropic", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "claude-sonnet-4-20250514": { + "deprecation_date": "2026-05-14", + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_1hr": 6e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "anthropic", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "cloudflare/@cf/meta/llama-2-7b-chat-fp16": { + "input_cost_per_token": 1.923e-06, + "litellm_provider": "cloudflare", + "max_input_tokens": 3072, + "max_output_tokens": 3072, + "max_tokens": 3072, + "mode": "chat", + "output_cost_per_token": 1.923e-06 + }, + "cloudflare/@cf/meta/llama-2-7b-chat-int8": { + "input_cost_per_token": 1.923e-06, + "litellm_provider": "cloudflare", + "max_input_tokens": 2048, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 1.923e-06 + }, + "cloudflare/@cf/mistral/mistral-7b-instruct-v0.1": { + "input_cost_per_token": 1.923e-06, + "litellm_provider": "cloudflare", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.923e-06 + }, + "cloudflare/@hf/thebloke/codellama-7b-instruct-awq": { + "input_cost_per_token": 1.923e-06, + "litellm_provider": "cloudflare", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.923e-06 + }, + "code-bison": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "code-bison-32k@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-bison32k": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-bison@001": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-bison@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-gecko": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 2048, + "max_output_tokens": 64, + "max_tokens": 64, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-gecko-latest": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 2048, + "max_output_tokens": 64, + "max_tokens": 64, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-gecko@001": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 2048, + "max_output_tokens": 64, + "max_tokens": 64, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "code-gecko@002": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-text-models", + "max_input_tokens": 2048, + "max_output_tokens": 64, + "max_tokens": 64, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "codechat-bison": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codechat-bison-32k": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codechat-bison-32k@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codechat-bison@001": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codechat-bison@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codechat-bison@latest": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-code-chat-models", + "max_input_tokens": 6144, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "codestral/codestral-2405": { + "input_cost_per_token": 0.0, + "litellm_provider": "codestral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://docs.mistral.ai/capabilities/code_generation/", + "supports_assistant_prefill": true, + "supports_tool_choice": true + }, + "codestral/codestral-latest": { + "input_cost_per_token": 0.0, + "litellm_provider": "codestral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://docs.mistral.ai/capabilities/code_generation/", + "supports_assistant_prefill": true, + "supports_tool_choice": true + }, + "codex-mini-latest": { + "cache_read_input_token_cost": 3.75e-07, + "input_cost_per_token": 1.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 6e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "cohere.command-light-text-v14": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_tool_choice": true + }, + "cohere.command-r-plus-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_tool_choice": true + }, + "cohere.command-r-v1:0": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_tool_choice": true + }, + "cohere.command-text-v14": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_tool_choice": true + }, + "cohere.embed-english-v3": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "supports_embedding_image_input": true + }, + "cohere.embed-multilingual-v3": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "supports_embedding_image_input": true + }, + "cohere.embed-v4:0": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536, + "supports_embedding_image_input": true + }, + "cohere/embed-v4.0": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "cohere", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536, + "supports_embedding_image_input": true + }, + "cohere.rerank-v3-5:0": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "bedrock", + "max_document_chunks_per_query": 100, + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_query_tokens": 32000, + "max_tokens": 32000, + "max_tokens_per_document_chunk": 512, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "command": { + "input_cost_per_token": 1e-06, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "command-a-03-2025": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "cohere_chat", + "max_input_tokens": 256000, + "max_output_tokens": 8000, + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "command-light": { + "input_cost_per_token": 3e-07, + "litellm_provider": "cohere_chat", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_tool_choice": true + }, + "command-nightly": { + "input_cost_per_token": 1e-06, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "command-r": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "cohere_chat", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "command-r-08-2024": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "cohere_chat", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "command-r-plus": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "cohere_chat", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "command-r-plus-08-2024": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "cohere_chat", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "command-r7b-12-2024": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "cohere_chat", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3.75e-08, + "source": "https://docs.cohere.com/v2/docs/command-r7b", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "computer-use-preview": { + "input_cost_per_token": 3e-06, + "litellm_provider": "azure", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "deepseek-chat": { + "cache_read_input_token_cost": 6e-08, + "input_cost_per_token": 6e-07, + "litellm_provider": "deepseek", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.7e-06, + "source": "https://api-docs.deepseek.com/quick_start/pricing", + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "deepseek-reasoner": { + "cache_read_input_token_cost": 6e-08, + "input_cost_per_token": 6e-07, + "litellm_provider": "deepseek", + "max_input_tokens": 131072, + "max_output_tokens": 65536, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.7e-06, + "source": "https://api-docs.deepseek.com/quick_start/pricing", + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supports_function_calling": false, + "supports_native_streaming": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false + }, + "dashscope/qwen-coder": { + "input_cost_per_token": 3e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 1000000, + "max_output_tokens": 16384, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-flash": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 32768, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 5e-08, + "output_cost_per_token": 4e-07, + "range": [ + 0, + 256000.0 + ] + }, + { + "input_cost_per_token": 2.5e-07, + "output_cost_per_token": 2e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen-flash-2025-07-28": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 32768, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 5e-08, + "output_cost_per_token": 4e-07, + "range": [ + 0, + 256000.0 + ] + }, + { + "input_cost_per_token": 2.5e-07, + "output_cost_per_token": 2e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen-max": { + "input_cost_per_token": 1.6e-06, + "litellm_provider": "dashscope", + "max_input_tokens": 30720, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6.4e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-plus": { + "input_cost_per_token": 4e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-plus-2025-01-25": { + "input_cost_per_token": 4e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-plus-2025-04-28": { + "input_cost_per_token": 4e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-06, + "output_cost_per_token": 1.2e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-plus-2025-07-14": { + "input_cost_per_token": 4e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-06, + "output_cost_per_token": 1.2e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-plus-2025-07-28": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 32768, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 4e-07, + "output_cost_per_reasoning_token": 4e-06, + "output_cost_per_token": 1.2e-06, + "range": [ + 0, + 256000.0 + ] + }, + { + "input_cost_per_token": 1.2e-06, + "output_cost_per_reasoning_token": 1.2e-05, + "output_cost_per_token": 3.6e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen-plus-2025-09-11": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 32768, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 4e-07, + "output_cost_per_reasoning_token": 4e-06, + "output_cost_per_token": 1.2e-06, + "range": [ + 0, + 256000.0 + ] + }, + { + "input_cost_per_token": 1.2e-06, + "output_cost_per_reasoning_token": 1.2e-05, + "output_cost_per_token": 3.6e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen-plus-latest": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 32768, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 4e-07, + "output_cost_per_reasoning_token": 4e-06, + "output_cost_per_token": 1.2e-06, + "range": [ + 0, + 256000.0 + ] + }, + { + "input_cost_per_token": 1.2e-06, + "output_cost_per_reasoning_token": 1.2e-05, + "output_cost_per_token": 3.6e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen-turbo": { + "input_cost_per_token": 5e-08, + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_reasoning_token": 5e-07, + "output_cost_per_token": 2e-07, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-turbo-2024-11-01": { + "input_cost_per_token": 5e-08, + "litellm_provider": "dashscope", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-turbo-2025-04-28": { + "input_cost_per_token": 5e-08, + "litellm_provider": "dashscope", + "max_input_tokens": 1000000, + "max_output_tokens": 16384, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_reasoning_token": 5e-07, + "output_cost_per_token": 2e-07, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen-turbo-latest": { + "input_cost_per_token": 5e-08, + "litellm_provider": "dashscope", + "max_input_tokens": 1000000, + "max_output_tokens": 16384, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_reasoning_token": 5e-07, + "output_cost_per_token": 2e-07, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen3-30b-a3b": { + "litellm_provider": "dashscope", + "max_input_tokens": 129024, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dashscope/qwen3-coder-flash": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 65536, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "cache_read_input_token_cost": 8e-08, + "input_cost_per_token": 3e-07, + "output_cost_per_token": 1.5e-06, + "range": [ + 0, + 32000.0 + ] + }, + { + "cache_read_input_token_cost": 1.2e-07, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 2.5e-06, + "range": [ + 32000.0, + 128000.0 + ] + }, + { + "cache_read_input_token_cost": 2e-07, + "input_cost_per_token": 8e-07, + "output_cost_per_token": 4e-06, + "range": [ + 128000.0, + 256000.0 + ] + }, + { + "cache_read_input_token_cost": 4e-07, + "input_cost_per_token": 1.6e-06, + "output_cost_per_token": 9.6e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen3-coder-flash-2025-07-28": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 65536, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 3e-07, + "output_cost_per_token": 1.5e-06, + "range": [ + 0, + 32000.0 + ] + }, + { + "input_cost_per_token": 5e-07, + "output_cost_per_token": 2.5e-06, + "range": [ + 32000.0, + 128000.0 + ] + }, + { + "input_cost_per_token": 8e-07, + "output_cost_per_token": 4e-06, + "range": [ + 128000.0, + 256000.0 + ] + }, + { + "input_cost_per_token": 1.6e-06, + "output_cost_per_token": 9.6e-06, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen3-coder-plus": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 65536, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "output_cost_per_token": 5e-06, + "range": [ + 0, + 32000.0 + ] + }, + { + "cache_read_input_token_cost": 1.8e-07, + "input_cost_per_token": 1.8e-06, + "output_cost_per_token": 9e-06, + "range": [ + 32000.0, + 128000.0 + ] + }, + { + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "output_cost_per_token": 1.5e-05, + "range": [ + 128000.0, + 256000.0 + ] + }, + { + "cache_read_input_token_cost": 6e-07, + "input_cost_per_token": 6e-06, + "output_cost_per_token": 6e-05, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen3-coder-plus-2025-07-22": { + "litellm_provider": "dashscope", + "max_input_tokens": 997952, + "max_output_tokens": 65536, + "max_tokens": 1000000, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 1e-06, + "output_cost_per_token": 5e-06, + "range": [ + 0, + 32000.0 + ] + }, + { + "input_cost_per_token": 1.8e-06, + "output_cost_per_token": 9e-06, + "range": [ + 32000.0, + 128000.0 + ] + }, + { + "input_cost_per_token": 3e-06, + "output_cost_per_token": 1.5e-05, + "range": [ + 128000.0, + 256000.0 + ] + }, + { + "input_cost_per_token": 6e-06, + "output_cost_per_token": 6e-05, + "range": [ + 256000.0, + 1000000.0 + ] + } + ] + }, + "dashscope/qwen3-max-preview": { + "litellm_provider": "dashscope", + "max_input_tokens": 258048, + "max_output_tokens": 65536, + "max_tokens": 262144, + "mode": "chat", + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "tiered_pricing": [ + { + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 6e-06, + "range": [ + 0, + 32000.0 + ] + }, + { + "input_cost_per_token": 2.4e-06, + "output_cost_per_token": 1.2e-05, + "range": [ + 32000.0, + 128000.0 + ] + }, + { + "input_cost_per_token": 3e-06, + "output_cost_per_token": 1.5e-05, + "range": [ + 128000.0, + 252000.0 + ] + } + ] + }, + "dashscope/qwq-plus": { + "input_cost_per_token": 8e-07, + "litellm_provider": "dashscope", + "max_input_tokens": 98304, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.4e-06, + "source": "https://www.alibabacloud.com/help/en/model-studio/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-bge-large-en": { + "input_cost_per_token": 1.0003e-07, + "input_dbu_cost_per_token": 1.429e-06, + "litellm_provider": "databricks", + "max_input_tokens": 512, + "max_tokens": 512, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_dbu_cost_per_token": 0.0, + "output_vector_size": 1024, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-claude-3-7-sonnet": { + "input_cost_per_token": 2.9999900000000002e-06, + "input_dbu_cost_per_token": 4.2857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000020000000002e-05, + "output_dbu_cost_per_token": 0.000214286, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-haiku-4-5": { + "input_cost_per_token": 1.00002e-06, + "input_dbu_cost_per_token": 1.4286e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 5.00003e-06, + "output_dbu_cost_per_token": 7.1429e-05, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-opus-4": { + "input_cost_per_token": 1.5000020000000002e-05, + "input_dbu_cost_per_token": 0.000214286, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 7.500003000000001e-05, + "output_dbu_cost_per_token": 0.001071429, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-opus-4-1": { + "input_cost_per_token": 1.5000020000000002e-05, + "input_dbu_cost_per_token": 0.000214286, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 7.500003000000001e-05, + "output_dbu_cost_per_token": 0.001071429, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-opus-4-5": { + "input_cost_per_token": 5.00003e-06, + "input_dbu_cost_per_token": 7.1429e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 2.5000010000000002e-05, + "output_dbu_cost_per_token": 0.000357143, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-sonnet-4": { + "input_cost_per_token": 2.9999900000000002e-06, + "input_dbu_cost_per_token": 4.2857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000020000000002e-05, + "output_dbu_cost_per_token": 0.000214286, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-sonnet-4-1": { + "input_cost_per_token": 2.9999900000000002e-06, + "input_dbu_cost_per_token": 4.2857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000020000000002e-05, + "output_dbu_cost_per_token": 0.000214286, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-claude-sonnet-4-5": { + "input_cost_per_token": 2.9999900000000002e-06, + "input_dbu_cost_per_token": 4.2857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000020000000002e-05, + "output_dbu_cost_per_token": 0.000214286, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "databricks/databricks-gemini-2-5-flash": { + "input_cost_per_token": 3.0001999999999996e-07, + "input_dbu_cost_per_token": 4.285999999999999e-06, + "litellm_provider": "databricks", + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_tokens": 1048576, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 2.49998e-06, + "output_dbu_cost_per_token": 3.5714e-05, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "databricks/databricks-gemini-2-5-pro": { + "input_cost_per_token": 1.24999e-06, + "input_dbu_cost_per_token": 1.7857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_tokens": 1048576, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 9.999990000000002e-06, + "output_dbu_cost_per_token": 0.000142857, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "databricks/databricks-gemma-3-12b": { + "input_cost_per_token": 1.5000999999999998e-07, + "input_dbu_cost_per_token": 2.1429999999999996e-06, + "litellm_provider": "databricks", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "max_tokens": 128000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 5.0001e-07, + "output_dbu_cost_per_token": 7.143e-06, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-gpt-5": { + "input_cost_per_token": 1.24999e-06, + "input_dbu_cost_per_token": 1.7857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 9.999990000000002e-06, + "output_dbu_cost_per_token": 0.000142857, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving" + }, + "databricks/databricks-gpt-5-1": { + "input_cost_per_token": 1.24999e-06, + "input_dbu_cost_per_token": 1.7857e-05, + "litellm_provider": "databricks", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 9.999990000000002e-06, + "output_dbu_cost_per_token": 0.000142857, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving" + }, + "databricks/databricks-gpt-5-mini": { + "input_cost_per_token": 2.4997000000000006e-07, + "input_dbu_cost_per_token": 3.571e-06, + "litellm_provider": "databricks", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.9999700000000004e-06, + "output_dbu_cost_per_token": 2.8571e-05, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving" + }, + "databricks/databricks-gpt-5-nano": { + "input_cost_per_token": 4.998e-08, + "input_dbu_cost_per_token": 7.14e-07, + "litellm_provider": "databricks", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 3.9998000000000007e-07, + "output_dbu_cost_per_token": 5.714000000000001e-06, + "source": "https://www.databricks.com/product/pricing/proprietary-foundation-model-serving" + }, + "databricks/databricks-gpt-oss-120b": { + "input_cost_per_token": 1.5000999999999998e-07, + "input_dbu_cost_per_token": 2.1429999999999996e-06, + "litellm_provider": "databricks", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 5.9997e-07, + "output_dbu_cost_per_token": 8.571e-06, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-gpt-oss-20b": { + "input_cost_per_token": 7e-08, + "input_dbu_cost_per_token": 1e-06, + "litellm_provider": "databricks", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 3.0001999999999996e-07, + "output_dbu_cost_per_token": 4.285999999999999e-06, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-gte-large-en": { + "input_cost_per_token": 1.2999000000000001e-07, + "input_dbu_cost_per_token": 1.857e-06, + "litellm_provider": "databricks", + "max_input_tokens": 8192, + "max_tokens": 8192, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_dbu_cost_per_token": 0.0, + "output_vector_size": 1024, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-llama-2-70b-chat": { + "input_cost_per_token": 5.0001e-07, + "input_dbu_cost_per_token": 7.143e-06, + "litellm_provider": "databricks", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000300000000002e-06, + "output_dbu_cost_per_token": 2.1429e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-llama-4-maverick": { + "input_cost_per_token": 5.0001e-07, + "input_dbu_cost_per_token": 7.143e-06, + "litellm_provider": "databricks", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "metadata": { + "notes": "Databricks documentation now provides both DBU costs (_dbu_cost_per_token) and dollar costs(_cost_per_token)." + }, + "mode": "chat", + "output_cost_per_token": 1.5000300000000002e-06, + "output_dbu_cost_per_token": 2.1429e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-meta-llama-3-1-405b-instruct": { + "input_cost_per_token": 5.00003e-06, + "input_dbu_cost_per_token": 7.1429e-05, + "litellm_provider": "databricks", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000020000000002e-05, + "output_dbu_cost_per_token": 0.000214286, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-meta-llama-3-1-8b-instruct": { + "input_cost_per_token": 1.5000999999999998e-07, + "input_dbu_cost_per_token": 2.1429999999999996e-06, + "litellm_provider": "databricks", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "max_tokens": 200000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 4.5003000000000007e-07, + "output_dbu_cost_per_token": 6.429000000000001e-06, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving" + }, + "databricks/databricks-meta-llama-3-3-70b-instruct": { + "input_cost_per_token": 5.0001e-07, + "input_dbu_cost_per_token": 7.143e-06, + "litellm_provider": "databricks", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.5000300000000002e-06, + "output_dbu_cost_per_token": 2.1429e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-meta-llama-3-70b-instruct": { + "input_cost_per_token": 1.00002e-06, + "input_dbu_cost_per_token": 1.4286e-05, + "litellm_provider": "databricks", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 2.9999900000000002e-06, + "output_dbu_cost_per_token": 4.2857e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-mixtral-8x7b-instruct": { + "input_cost_per_token": 5.0001e-07, + "input_dbu_cost_per_token": 7.143e-06, + "litellm_provider": "databricks", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.00002e-06, + "output_dbu_cost_per_token": 1.4286e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-mpt-30b-instruct": { + "input_cost_per_token": 1.00002e-06, + "input_dbu_cost_per_token": 1.4286e-05, + "litellm_provider": "databricks", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 1.00002e-06, + "output_dbu_cost_per_token": 1.4286e-05, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "databricks/databricks-mpt-7b-instruct": { + "input_cost_per_token": 5.0001e-07, + "input_dbu_cost_per_token": 7.143e-06, + "litellm_provider": "databricks", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "metadata": { + "notes": "Input/output cost per token is dbu cost * $0.070, based on databricks Llama 3.1 70B conversion. Number provided for reference, '*_dbu_cost_per_token' used in actual calculation." + }, + "mode": "chat", + "output_cost_per_token": 0.0, + "output_dbu_cost_per_token": 0.0, + "source": "https://www.databricks.com/product/pricing/foundation-model-serving", + "supports_tool_choice": true + }, + "dataforseo/search": { + "input_cost_per_query": 0.003, + "litellm_provider": "dataforseo", + "mode": "search" + }, + "davinci-002": { + "input_cost_per_token": 2e-06, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 16384, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "deepgram/base": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-conversationalai": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-finance": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-general": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-meeting": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-phonecall": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-video": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/base-voicemail": { + "input_cost_per_second": 0.00020833, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0125/60 seconds = $0.00020833 per second", + "original_pricing_per_minute": 0.0125 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/enhanced": { + "input_cost_per_second": 0.00024167, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0145/60 seconds = $0.00024167 per second", + "original_pricing_per_minute": 0.0145 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/enhanced-finance": { + "input_cost_per_second": 0.00024167, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0145/60 seconds = $0.00024167 per second", + "original_pricing_per_minute": 0.0145 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/enhanced-general": { + "input_cost_per_second": 0.00024167, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0145/60 seconds = $0.00024167 per second", + "original_pricing_per_minute": 0.0145 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/enhanced-meeting": { + "input_cost_per_second": 0.00024167, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0145/60 seconds = $0.00024167 per second", + "original_pricing_per_minute": 0.0145 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/enhanced-phonecall": { + "input_cost_per_second": 0.00024167, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0145/60 seconds = $0.00024167 per second", + "original_pricing_per_minute": 0.0145 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-atc": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-automotive": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-conversationalai": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-drivethru": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-finance": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-general": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-meeting": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-phonecall": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-video": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-2-voicemail": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-3": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-3-general": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-3-medical": { + "input_cost_per_second": 8.667e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0052/60 seconds = $0.00008667 per second (multilingual)", + "original_pricing_per_minute": 0.0052 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-general": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/nova-phonecall": { + "input_cost_per_second": 7.167e-05, + "litellm_provider": "deepgram", + "metadata": { + "calculation": "$0.0043/60 seconds = $0.00007167 per second", + "original_pricing_per_minute": 0.0043 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper-base": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper-large": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper-medium": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper-small": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepgram/whisper-tiny": { + "input_cost_per_second": 0.0001, + "litellm_provider": "deepgram", + "metadata": { + "notes": "Deepgram's hosted OpenAI Whisper models - pricing may differ from native Deepgram models" + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://deepgram.com/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "deepinfra/Gryphe/MythoMax-L2-13b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 8e-08, + "output_cost_per_token": 9e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/NousResearch/Hermes-3-Llama-3.1-405B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-06, + "output_cost_per_token": 1e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/NousResearch/Hermes-3-Llama-3.1-70B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 3e-07, + "output_cost_per_token": 3e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/Qwen/QwQ-32B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen2.5-72B-Instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1.2e-07, + "output_cost_per_token": 3.9e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen2.5-7B-Instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 1e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/Qwen/Qwen2.5-VL-32B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true, + "supports_vision": true + }, + "deepinfra/Qwen/Qwen3-14B": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 6e-08, + "output_cost_per_token": 2.4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-235B-A22B": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1.8e-07, + "output_cost_per_token": 5.4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-235B-A22B-Instruct-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 9e-08, + "output_cost_per_token": 6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-235B-A22B-Thinking-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 3e-07, + "output_cost_per_token": 2.9e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-30B-A3B": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 8e-08, + "output_cost_per_token": 2.9e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-32B": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 2.8e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-Coder-480B-A35B-Instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 4e-07, + "output_cost_per_token": 1.6e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-Coder-480B-A35B-Instruct-Turbo": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2.9e-07, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-Next-80B-A3B-Instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1.4e-07, + "output_cost_per_token": 1.4e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Qwen/Qwen3-Next-80B-A3B-Thinking": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1.4e-07, + "output_cost_per_token": 1.4e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/Sao10K/L3-8B-Lunaris-v1-Turbo": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 5e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/Sao10K/L3.1-70B-Euryale-v2.2": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 6.5e-07, + "output_cost_per_token": 7.5e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/Sao10K/L3.3-70B-Euryale-v2.3": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 6.5e-07, + "output_cost_per_token": 7.5e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/allenai/olmOCR-7B-0725-FP8": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2.7e-07, + "output_cost_per_token": 1.5e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/anthropic/claude-3-7-sonnet-latest": { + "max_tokens": 200000, + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "input_cost_per_token": 3.3e-06, + "output_cost_per_token": 1.65e-05, + "cache_read_input_token_cost": 3.3e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/anthropic/claude-4-opus": { + "max_tokens": 200000, + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "input_cost_per_token": 1.65e-05, + "output_cost_per_token": 8.25e-05, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/anthropic/claude-4-sonnet": { + "max_tokens": 200000, + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "input_cost_per_token": 3.3e-06, + "output_cost_per_token": 1.65e-05, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-R1": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 7e-07, + "output_cost_per_token": 2.4e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-R1-0528": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 2.15e-06, + "cache_read_input_token_cost": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-R1-0528-Turbo": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-06, + "output_cost_per_token": 3e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-R1-Distill-Llama-70B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2.7e-07, + "output_cost_per_token": 2.7e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-R1-Turbo": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1e-06, + "output_cost_per_token": 3e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-V3": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 3.8e-07, + "output_cost_per_token": 8.9e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-V3-0324": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 2.5e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/deepseek-ai/DeepSeek-V3.1": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 2.7e-07, + "output_cost_per_token": 1e-06, + "cache_read_input_token_cost": 2.16e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true, + "supports_reasoning": true + }, + "deepinfra/deepseek-ai/DeepSeek-V3.1-Terminus": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 2.7e-07, + "output_cost_per_token": 1e-06, + "cache_read_input_token_cost": 2.16e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemini-2.0-flash-001": { + "max_tokens": 1000000, + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemini-2.5-flash": { + "max_tokens": 1000000, + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "input_cost_per_token": 3e-07, + "output_cost_per_token": 2.5e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemini-2.5-pro": { + "max_tokens": 1000000, + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "input_cost_per_token": 1.25e-06, + "output_cost_per_token": 1e-05, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemma-3-12b-it": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 5e-08, + "output_cost_per_token": 1e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemma-3-27b-it": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-08, + "output_cost_per_token": 1.6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/google/gemma-3-4b-it": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 8e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-3.2-11B-Vision-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4.9e-08, + "output_cost_per_token": 4.9e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/meta-llama/Llama-3.2-3B-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-08, + "output_cost_per_token": 2e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-3.3-70B-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2.3e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-3.3-70B-Instruct-Turbo": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1.3e-07, + "output_cost_per_token": 3.9e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": { + "max_tokens": 1048576, + "max_input_tokens": 1048576, + "max_output_tokens": 1048576, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-4-Scout-17B-16E-Instruct": { + "max_tokens": 327680, + "max_input_tokens": 327680, + "max_output_tokens": 327680, + "input_cost_per_token": 8e-08, + "output_cost_per_token": 3e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Llama-Guard-3-8B": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 5.5e-08, + "output_cost_per_token": 5.5e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/meta-llama/Llama-Guard-4-12B": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 1.8e-07, + "output_cost_per_token": 1.8e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/meta-llama/Meta-Llama-3-8B-Instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 3e-08, + "output_cost_per_token": 6e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Meta-Llama-3.1-70B-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 2.8e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Meta-Llama-3.1-8B-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 3e-08, + "output_cost_per_token": 5e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-08, + "output_cost_per_token": 3e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/microsoft/WizardLM-2-8x22B": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 4.8e-07, + "output_cost_per_token": 4.8e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": false + }, + "deepinfra/microsoft/phi-4": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 7e-08, + "output_cost_per_token": 1.4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/mistralai/Mistral-Nemo-Instruct-2407": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-08, + "output_cost_per_token": 4e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/mistralai/Mistral-Small-24B-Instruct-2501": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-08, + "output_cost_per_token": 8e-08, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/mistralai/Mistral-Small-3.2-24B-Instruct-2506": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 7.5e-08, + "output_cost_per_token": 2e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/mistralai/Mixtral-8x7B-Instruct-v0.1": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 4e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/moonshotai/Kimi-K2-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 2e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/moonshotai/Kimi-K2-Instruct-0905": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 2e-06, + "cache_read_input_token_cost": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/nvidia/Llama-3.1-Nemotron-70B-Instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 6e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/nvidia/Llama-3.3-Nemotron-Super-49B-v1.5": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 4e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/nvidia/NVIDIA-Nemotron-Nano-9B-v2": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 1.6e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/openai/gpt-oss-120b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 5e-08, + "output_cost_per_token": 4.5e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/openai/gpt-oss-20b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 1.5e-07, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepinfra/zai-org/GLM-4.5": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 4e-07, + "output_cost_per_token": 1.6e-06, + "litellm_provider": "deepinfra", + "mode": "chat", + "supports_tool_choice": true + }, + "deepseek/deepseek-chat": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 7e-08, + "input_cost_per_token": 2.7e-07, + "input_cost_per_token_cache_hit": 7e-08, + "litellm_provider": "deepseek", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.1e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "deepseek/deepseek-coder": { + "input_cost_per_token": 1.4e-07, + "input_cost_per_token_cache_hit": 1.4e-08, + "litellm_provider": "deepseek", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "deepseek/deepseek-r1": { + "input_cost_per_token": 5.5e-07, + "input_cost_per_token_cache_hit": 1.4e-07, + "litellm_provider": "deepseek", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "deepseek/deepseek-reasoner": { + "input_cost_per_token": 5.5e-07, + "input_cost_per_token_cache_hit": 1.4e-07, + "litellm_provider": "deepseek", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "deepseek/deepseek-v3": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 7e-08, + "input_cost_per_token": 2.7e-07, + "input_cost_per_token_cache_hit": 7e-08, + "litellm_provider": "deepseek", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.1e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "deepseek/deepseek-v3.2": { + "input_cost_per_token": 2.8e-07, + "input_cost_per_token_cache_hit": 2.8e-08, + "litellm_provider": "deepseek", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "deepseek.v3-v1:0": { + "input_cost_per_token": 5.8e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 163840, + "max_output_tokens": 81920, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "dolphin": { + "input_cost_per_token": 5e-07, + "litellm_provider": "nlp_cloud", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "completion", + "output_cost_per_token": 5e-07 + }, + "doubao-embedding": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Volcengine Doubao embedding model - standard version with 2560 dimensions" + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 2560 + }, + "doubao-embedding-large": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Volcengine Doubao embedding model - large version with 2048 dimensions" + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 2048 + }, + "doubao-embedding-large-text-240915": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Volcengine Doubao embedding model - text-240915 version with 4096 dimensions" + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 4096 + }, + "doubao-embedding-large-text-250515": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Volcengine Doubao embedding model - text-250515 version with 2048 dimensions" + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 2048 + }, + "doubao-embedding-text-240715": { + "input_cost_per_token": 0.0, + "litellm_provider": "volcengine", + "max_input_tokens": 4096, + "max_tokens": 4096, + "metadata": { + "notes": "Volcengine Doubao embedding model - text-240715 version with 2560 dimensions" + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 2560 + }, + "exa_ai/search": { + "litellm_provider": "exa_ai", + "mode": "search", + "tiered_pricing": [ + { + "input_cost_per_query": 5e-03, + "max_results_range": [ + 0, + 25 + ] + }, + { + "input_cost_per_query": 25e-03, + "max_results_range": [ + 26, + 100 + ] + } + ] + }, + "firecrawl/search": { + "litellm_provider": "firecrawl", + "mode": "search", + "tiered_pricing": [ + { + "input_cost_per_query": 1.66e-03, + "max_results_range": [ + 1, + 10 + ] + }, + { + "input_cost_per_query": 3.32e-03, + "max_results_range": [ + 11, + 20 + ] + }, + { + "input_cost_per_query": 4.98e-03, + "max_results_range": [ + 21, + 30 + ] + }, + { + "input_cost_per_query": 6.64e-03, + "max_results_range": [ + 31, + 40 + ] + }, + { + "input_cost_per_query": 8.3e-03, + "max_results_range": [ + 41, + 50 + ] + }, + { + "input_cost_per_query": 9.96e-03, + "max_results_range": [ + 51, + 60 + ] + }, + { + "input_cost_per_query": 11.62e-03, + "max_results_range": [ + 61, + 70 + ] + }, + { + "input_cost_per_query": 13.28e-03, + "max_results_range": [ + 71, + 80 + ] + }, + { + "input_cost_per_query": 14.94e-03, + "max_results_range": [ + 81, + 90 + ] + }, + { + "input_cost_per_query": 16.6e-03, + "max_results_range": [ + 91, + 100 + ] + } + ], + "metadata": { + "notes": "Firecrawl search pricing: $83 for 100,000 credits, 2 credits per 10 results. Cost = ceiling(limit/10) * 2 * $0.00083" + } + }, + "perplexity/search": { + "input_cost_per_query": 5e-03, + "litellm_provider": "perplexity", + "mode": "search" + }, + "searxng/search": { + "litellm_provider": "searxng", + "mode": "search", + "input_cost_per_query": 0.0, + "metadata": { + "notes": "SearXNG is an open-source metasearch engine. Free to use when self-hosted or using public instances." + } + }, + "elevenlabs/scribe_v1": { + "input_cost_per_second": 6.11e-05, + "litellm_provider": "elevenlabs", + "metadata": { + "calculation": "$0.22/hour = $0.00366/minute = $0.0000611 per second (enterprise pricing)", + "notes": "ElevenLabs Scribe v1 - state-of-the-art speech recognition model with 99 language support", + "original_pricing_per_hour": 0.22 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://elevenlabs.io/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "elevenlabs/scribe_v1_experimental": { + "input_cost_per_second": 6.11e-05, + "litellm_provider": "elevenlabs", + "metadata": { + "calculation": "$0.22/hour = $0.00366/minute = $0.0000611 per second (enterprise pricing)", + "notes": "ElevenLabs Scribe v1 experimental - enhanced version of the main Scribe model", + "original_pricing_per_hour": 0.22 + }, + "mode": "audio_transcription", + "output_cost_per_second": 0.0, + "source": "https://elevenlabs.io/pricing", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "embed-english-light-v2.0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "embed-english-light-v3.0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "embed-english-v2.0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_tokens": 4096, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "embed-english-v3.0": { + "input_cost_per_image": 0.0001, + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 1024, + "max_tokens": 1024, + "metadata": { + "notes": "'supports_image_input' is a deprecated field. Use 'supports_embedding_image_input' instead." + }, + "mode": "embedding", + "output_cost_per_token": 0.0, + "supports_embedding_image_input": true, + "supports_image_input": true + }, + "embed-multilingual-v2.0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 768, + "max_tokens": 768, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "embed-multilingual-v3.0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "cohere", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "embedding", + "output_cost_per_token": 0.0, + "supports_embedding_image_input": true + }, + "embed-multilingual-light-v3.0": { + "input_cost_per_token": 1e-04, + "litellm_provider": "cohere", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "embedding", + "output_cost_per_token": 0.0, + "supports_embedding_image_input": true + }, + "eu.amazon.nova-lite-v1:0": { + "input_cost_per_token": 7.8e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.12e-07, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "eu.amazon.nova-micro-v1:0": { + "input_cost_per_token": 4.6e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.84e-07, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, + "eu.amazon.nova-pro-v1:0": { + "input_cost_per_token": 1.05e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 4.2e-06, + "source": "https://aws.amazon.com/bedrock/pricing/", + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-5-haiku-20241022-v1:0": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "eu.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.375e-06, + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 1.1e-06, + "deprecation_date": "2026-10-15", + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5.5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "eu.anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-5-sonnet-20241022-v2:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-7-sonnet-20250219-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-opus-20240229-v1:0": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-3-sonnet-20240229-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "eu.anthropic.claude-opus-4-1-20250805-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "eu.anthropic.claude-opus-4-20250514-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "eu.anthropic.claude-sonnet-4-20250514-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "eu.anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 4.125e-06, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_token": 3.3e-06, + "input_cost_per_token_above_200k_tokens": 6.6e-06, + "output_cost_per_token_above_200k_tokens": 2.475e-05, + "cache_creation_input_token_cost_above_200k_tokens": 8.25e-06, + "cache_read_input_token_cost_above_200k_tokens": 6.6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "eu.meta.llama3-2-1b-instruct-v1:0": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.3e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "eu.meta.llama3-2-3b-instruct-v1:0": { + "input_cost_per_token": 1.9e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.9e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "eu.mistral.pixtral-large-2502-v1:0": { + "input_cost_per_token": 2e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "fal_ai/bria/text-to-image/3.2": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.0398, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/flux-pro/v1.1": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/flux-pro/v1.1-ultra": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.06, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/flux/schnell": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.003, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/bytedance/seedream/v3/text-to-image": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.03, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/bytedance/dreamina/v3.1/text-to-image": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.03, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/ideogram/v3": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.06, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/imagen4/preview": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.0398, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/imagen4/preview/fast": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/imagen4/preview/ultra": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.06, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/recraft/v3/text-to-image": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.0398, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "fal_ai/fal-ai/stable-diffusion-v35-medium": { + "litellm_provider": "fal_ai", + "mode": "image_generation", + "output_cost_per_image": 0.0398, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "featherless_ai/featherless-ai/Qwerky-72B": { + "litellm_provider": "featherless_ai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat" + }, + "featherless_ai/featherless-ai/Qwerky-QwQ-32B": { + "litellm_provider": "featherless_ai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat" + }, + "fireworks-ai-4.1b-to-16b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 2e-07 + }, + "fireworks-ai-56b-to-176b": { + "input_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 1.2e-06 + }, + "fireworks-ai-above-16b": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 9e-07 + }, + "fireworks-ai-default": { + "input_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 0.0 + }, + "fireworks-ai-embedding-150m-to-350m": { + "input_cost_per_token": 1.6e-08, + "litellm_provider": "fireworks_ai-embedding-models", + "output_cost_per_token": 0.0 + }, + "fireworks-ai-embedding-up-to-150m": { + "input_cost_per_token": 8e-09, + "litellm_provider": "fireworks_ai-embedding-models", + "output_cost_per_token": 0.0 + }, + "fireworks-ai-moe-up-to-56b": { + "input_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 5e-07 + }, + "fireworks-ai-up-to-4b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "output_cost_per_token": 2e-07 + }, + "fireworks_ai/WhereIsAI/UAE-Large-V1": { + "input_cost_per_token": 1.6e-08, + "litellm_provider": "fireworks_ai-embedding-models", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "source": "https://fireworks.ai/pricing" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-v2-instruct": { + "input_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1": { + "input_cost_per_token": 3e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 20480, + "max_tokens": 20480, + "mode": "chat", + "output_cost_per_token": 8e-06, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-0528": { + "input_cost_per_token": 3e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 160000, + "max_output_tokens": 160000, + "max_tokens": 160000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-basic": { + "input_cost_per_token": 5.5e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 20480, + "max_tokens": 20480, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3-0324": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/models/fireworks/deepseek-v3-0324", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3p1": { + "input_cost_per_token": 5.6e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "source": "https://fireworks.ai/pricing", + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3p1-terminus": { + "input_cost_per_token": 5.6e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "source": "https://fireworks.ai/pricing", + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v3p2": { + "input_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://fireworks.ai/models/fireworks/deepseek-v3p2", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/firefunction-v2": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/glm-4p5": { + "input_cost_per_token": 5.5e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 96000, + "max_tokens": 96000, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "source": "https://fireworks.ai/models/fireworks/glm-4p5", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/glm-4p5-air": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 96000, + "max_tokens": 96000, + "mode": "chat", + "output_cost_per_token": 8.8e-07, + "source": "https://artificialanalysis.ai/models/glm-4-5-air", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/glm-4p6": { + "input_cost_per_token": 0.55e-06, + "output_cost_per_token": 2.19e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 202800, + "max_output_tokens": 202800, + "max_tokens": 202800, + "mode": "chat", + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/gpt-oss-120b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/gpt-oss-20b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/kimi-k2-instruct": { + "input_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://fireworks.ai/models/fireworks/kimi-k2-instruct", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/kimi-k2-instruct-0905": { + "input_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 262144, + "max_output_tokens": 32768, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://app.fireworks.ai/models/fireworks/kimi-k2-instruct-0905", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/kimi-k2-thinking": { + "input_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-405b-instruct": { + "input_cost_per_token": 3e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-8b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-11b-vision-instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-1b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-3b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-90b-vision-instruct": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "fireworks_ai/accounts/fireworks/models/llama4-maverick-instruct-basic": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 8.8e-07, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/llama4-scout-instruct-basic": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://fireworks.ai/pricing", + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x22b-instruct-hf": { + "input_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "fireworks_ai/accounts/fireworks/models/qwen2-72b-instruct": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct": { + "input_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/accounts/fireworks/models/yi-large": { + "input_cost_per_token": 3e-06, + "litellm_provider": "fireworks_ai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://fireworks.ai/pricing", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "fireworks_ai/nomic-ai/nomic-embed-text-v1": { + "input_cost_per_token": 8e-09, + "litellm_provider": "fireworks_ai-embedding-models", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "source": "https://fireworks.ai/pricing" + }, + "fireworks_ai/nomic-ai/nomic-embed-text-v1.5": { + "input_cost_per_token": 8e-09, + "litellm_provider": "fireworks_ai-embedding-models", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0.0, + "source": "https://fireworks.ai/pricing" + }, + "fireworks_ai/thenlper/gte-base": { + "input_cost_per_token": 8e-09, + "litellm_provider": "fireworks_ai-embedding-models", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "source": "https://fireworks.ai/pricing" + }, + "fireworks_ai/thenlper/gte-large": { + "input_cost_per_token": 1.6e-08, + "litellm_provider": "fireworks_ai-embedding-models", + "max_input_tokens": 512, + "max_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "source": "https://fireworks.ai/pricing" + }, + "friendliai/meta-llama-3.1-70b-instruct": { + "input_cost_per_token": 6e-07, + "litellm_provider": "friendliai", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "friendliai/meta-llama-3.1-8b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "friendliai", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:babbage-002": { + "input_cost_per_token": 1.6e-06, + "input_cost_per_token_batches": 2e-07, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 16384, + "mode": "completion", + "output_cost_per_token": 1.6e-06, + "output_cost_per_token_batches": 2e-07 + }, + "ft:davinci-002": { + "input_cost_per_token": 1.2e-05, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 16384, + "mode": "completion", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_batches": 1e-06 + }, + "ft:gpt-3.5-turbo": { + "input_cost_per_token": 3e-06, + "input_cost_per_token_batches": 1.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "output_cost_per_token_batches": 3e-06, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-3.5-turbo-0125": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-3.5-turbo-0613": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-3.5-turbo-1106": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4-0613": { + "input_cost_per_token": 3e-05, + "litellm_provider": "openai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "source": "OpenAI needs to add pricing for this ft model, will be updated when added by OpenAI. Defaulting to base model pricing", + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4o-2024-08-06": { + "cache_read_input_token_cost": 1.875e-06, + "input_cost_per_token": 3.75e-06, + "input_cost_per_token_batches": 1.875e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_batches": 7.5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "ft:gpt-4o-2024-11-20": { + "cache_creation_input_token_cost": 1.875e-06, + "input_cost_per_token": 3.75e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4o-mini-2024-07-18": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 3e-07, + "input_cost_per_token_batches": 1.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "output_cost_per_token_batches": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4.1-2025-04-14": { + "cache_read_input_token_cost": 7.5e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_batches": 1.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_batches": 6e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4.1-mini-2025-04-14": { + "cache_read_input_token_cost": 2e-07, + "input_cost_per_token": 8e-07, + "input_cost_per_token_batches": 4e-07, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3.2e-06, + "output_cost_per_token_batches": 1.6e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:gpt-4.1-nano-2025-04-14": { + "cache_read_input_token_cost": 5e-08, + "input_cost_per_token": 2e-07, + "input_cost_per_token_batches": 1e-07, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-07, + "output_cost_per_token_batches": 4e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "ft:o4-mini-2025-04-16": { + "cache_read_input_token_cost": 1e-06, + "input_cost_per_token": 4e-06, + "input_cost_per_token_batches": 2e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 1.6e-05, + "output_cost_per_token_batches": 8e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "gemini-1.0-pro": { + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 32760, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#google_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-1.0-pro-001": { + "deprecation_date": "2025-04-09", + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 32760, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-1.0-pro-002": { + "deprecation_date": "2025-04-09", + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 32760, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-1.0-pro-vision": { + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "litellm_provider": "vertex_ai-vision-models", + "max_images_per_prompt": 16, + "max_input_tokens": 16384, + "max_output_tokens": 2048, + "max_tokens": 2048, + "max_video_length": 2, + "max_videos_per_prompt": 1, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.0-pro-vision-001": { + "deprecation_date": "2025-04-09", + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "litellm_provider": "vertex_ai-vision-models", + "max_images_per_prompt": 16, + "max_input_tokens": 16384, + "max_output_tokens": 2048, + "max_tokens": 2048, + "max_video_length": 2, + "max_videos_per_prompt": 1, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.0-ultra": { + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 8192, + "max_output_tokens": 2048, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "As of Jun, 2024. There is no available doc on vertex ai pricing gemini-1.0-ultra-001. Using gemini-1.0-pro pricing. Got max_tokens info here: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-1.0-ultra-001": { + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 8192, + "max_output_tokens": 2048, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "As of Jun, 2024. There is no available doc on vertex ai pricing gemini-1.0-ultra-001. Using gemini-1.0-pro pricing. Got max_tokens info here: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-1.5-flash": { + "input_cost_per_audio_per_second": 2e-06, + "input_cost_per_audio_per_second_above_128k_tokens": 4e-06, + "input_cost_per_character": 1.875e-08, + "input_cost_per_character_above_128k_tokens": 2.5e-07, + "input_cost_per_image": 2e-05, + "input_cost_per_image_above_128k_tokens": 4e-05, + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1e-06, + "input_cost_per_video_per_second": 2e-05, + "input_cost_per_video_per_second_above_128k_tokens": 4e-05, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 7.5e-08, + "output_cost_per_character_above_128k_tokens": 1.5e-07, + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-flash-001": { + "deprecation_date": "2025-05-24", + "input_cost_per_audio_per_second": 2e-06, + "input_cost_per_audio_per_second_above_128k_tokens": 4e-06, + "input_cost_per_character": 1.875e-08, + "input_cost_per_character_above_128k_tokens": 2.5e-07, + "input_cost_per_image": 2e-05, + "input_cost_per_image_above_128k_tokens": 4e-05, + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1e-06, + "input_cost_per_video_per_second": 2e-05, + "input_cost_per_video_per_second_above_128k_tokens": 4e-05, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 7.5e-08, + "output_cost_per_character_above_128k_tokens": 1.5e-07, + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-flash-002": { + "deprecation_date": "2025-09-24", + "input_cost_per_audio_per_second": 2e-06, + "input_cost_per_audio_per_second_above_128k_tokens": 4e-06, + "input_cost_per_character": 1.875e-08, + "input_cost_per_character_above_128k_tokens": 2.5e-07, + "input_cost_per_image": 2e-05, + "input_cost_per_image_above_128k_tokens": 4e-05, + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1e-06, + "input_cost_per_video_per_second": 2e-05, + "input_cost_per_video_per_second_above_128k_tokens": 4e-05, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 7.5e-08, + "output_cost_per_character_above_128k_tokens": 1.5e-07, + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-1.5-flash", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-flash-exp-0827": { + "input_cost_per_audio_per_second": 2e-06, + "input_cost_per_audio_per_second_above_128k_tokens": 4e-06, + "input_cost_per_character": 1.875e-08, + "input_cost_per_character_above_128k_tokens": 2.5e-07, + "input_cost_per_image": 2e-05, + "input_cost_per_image_above_128k_tokens": 4e-05, + "input_cost_per_token": 4.688e-09, + "input_cost_per_token_above_128k_tokens": 1e-06, + "input_cost_per_video_per_second": 2e-05, + "input_cost_per_video_per_second_above_128k_tokens": 4e-05, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 1.875e-08, + "output_cost_per_character_above_128k_tokens": 3.75e-08, + "output_cost_per_token": 4.6875e-09, + "output_cost_per_token_above_128k_tokens": 9.375e-09, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-flash-preview-0514": { + "input_cost_per_audio_per_second": 2e-06, + "input_cost_per_audio_per_second_above_128k_tokens": 4e-06, + "input_cost_per_character": 1.875e-08, + "input_cost_per_character_above_128k_tokens": 2.5e-07, + "input_cost_per_image": 2e-05, + "input_cost_per_image_above_128k_tokens": 4e-05, + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1e-06, + "input_cost_per_video_per_second": 2e-05, + "input_cost_per_video_per_second_above_128k_tokens": 4e-05, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 1.875e-08, + "output_cost_per_character_above_128k_tokens": 3.75e-08, + "output_cost_per_token": 4.6875e-09, + "output_cost_per_token_above_128k_tokens": 9.375e-09, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-pro": { + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_128k_tokens": 2.5e-06, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 5e-06, + "output_cost_per_token_above_128k_tokens": 1e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-pro-001": { + "deprecation_date": "2025-05-24", + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_128k_tokens": 2.5e-06, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 5e-06, + "output_cost_per_token_above_128k_tokens": 1e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-pro-002": { + "deprecation_date": "2025-09-24", + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_128k_tokens": 2.5e-06, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 5e-06, + "output_cost_per_token_above_128k_tokens": 1e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-1.5-pro", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini-1.5-pro-preview-0215": { + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 7.8125e-08, + "input_cost_per_token_above_128k_tokens": 1.5625e-07, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 3.125e-07, + "output_cost_per_token_above_128k_tokens": 6.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gemini-1.5-pro-preview-0409": { + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 7.8125e-08, + "input_cost_per_token_above_128k_tokens": 1.5625e-07, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 3.125e-07, + "output_cost_per_token_above_128k_tokens": 6.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "gemini-1.5-pro-preview-0514": { + "input_cost_per_audio_per_second": 3.125e-05, + "input_cost_per_audio_per_second_above_128k_tokens": 6.25e-05, + "input_cost_per_character": 3.125e-07, + "input_cost_per_character_above_128k_tokens": 6.25e-07, + "input_cost_per_image": 0.00032875, + "input_cost_per_image_above_128k_tokens": 0.0006575, + "input_cost_per_token": 7.8125e-08, + "input_cost_per_token_above_128k_tokens": 1.5625e-07, + "input_cost_per_video_per_second": 0.00032875, + "input_cost_per_video_per_second_above_128k_tokens": 0.0006575, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1.25e-06, + "output_cost_per_character_above_128k_tokens": 2.5e-06, + "output_cost_per_token": 3.125e-07, + "output_cost_per_token_above_128k_tokens": 6.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gemini-2.0-flash": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "source": "https://ai.google.dev/pricing#2_0flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-001": { + "cache_read_input_token_cost": 3.75e-08, + "deprecation_date": "2026-02-05", + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-exp": { + "cache_read_input_token_cost": 3.75e-08, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 1.5e-07, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 6e-07, + "output_cost_per_token_above_128k_tokens": 0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-lite": { + "cache_read_input_token_cost": 1.875e-08, + "input_cost_per_audio_token": 7.5e-08, + "input_cost_per_token": 7.5e-08, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 50, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-lite-001": { + "cache_read_input_token_cost": 1.875e-08, + "deprecation_date": "2026-02-25", + "input_cost_per_audio_token": 7.5e-08, + "input_cost_per_token": 7.5e-08, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 50, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-live-preview-04-09": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 3e-06, + "input_cost_per_image": 3e-06, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 3e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_audio_token": 1.2e-05, + "output_cost_per_token": 2e-06, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/docs/generative-ai/model-reference/gemini#gemini-2-0-flash-live-preview-04-09", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini-2.0-flash-preview-image-generation": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "source": "https://ai.google.dev/pricing#2_0flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-thinking-exp": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-flash-thinking-exp-01-21": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_pdf_size_mb": 30, + "max_tokens": 65536, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": false, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.0-pro-exp-02-05": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash": { + "cache_read_input_token_cost": 3e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-image": { + "cache_read_input_token_cost": 3e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "max_pdf_size_mb": 30, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "image_generation", + "output_cost_per_image": 0.039, + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash-image", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": false, + "tpm": 8000000 + }, + "gemini-2.5-flash-image-preview": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "image_generation", + "output_cost_per_image": 0.039, + "output_cost_per_reasoning_token": 3e-05, + "output_cost_per_token": 3e-05, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 8000000 + }, + "gemini-3-pro-image-preview": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 65536, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 1.2e-04, + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_batches": 6e-06, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-lite": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 5e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-lite-preview-09-2025": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 3e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-preview-09-2025": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-live-2.5-flash-preview-native-audio-09-2025": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 3e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_audio_token": 1.2e-05, + "output_cost_per_token": 2e-06, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini/gemini-live-2.5-flash-preview-native-audio-09-2025": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 3e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_audio_token": 1.2e-05, + "output_cost_per_token": 2e-06, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 8000000 + }, + "gemini-2.5-flash-lite-preview-06-17": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 5e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-preview-04-17": { + "cache_read_input_token_cost": 3.75e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 3.5e-06, + "output_cost_per_token": 6e-07, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-flash-preview-05-20": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro": { + "cache_read_input_token_cost": 1.25e-07, + "cache_creation_input_token_cost_above_200k_tokens": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-3-pro-preview": { + "cache_read_input_token_cost": 2e-07, + "cache_read_input_token_cost_above_200k_tokens": 4e-07, + "cache_creation_input_token_cost_above_200k_tokens": 2.5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_above_200k_tokens": 4e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_above_200k_tokens": 1.8e-05, + "output_cost_per_token_batches": 6e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "vertex_ai/gemini-3-pro-preview": { + "cache_read_input_token_cost": 2e-07, + "cache_read_input_token_cost_above_200k_tokens": 4e-07, + "cache_creation_input_token_cost_above_200k_tokens": 2.5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_above_200k_tokens": 4e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "vertex_ai", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_above_200k_tokens": 1.8e-05, + "output_cost_per_token_batches": 6e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro-exp-03-25": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro-preview-03-25": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 1.25e-06, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro-preview-05-06": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 1.25e-06, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supported_regions": [ + "global" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro-preview-06-05": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 1.25e-06, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-2.5-pro-preview-tts": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "audio" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini-embedding-001": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 3072, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models" + }, + "gemini-flash-experimental": { + "input_cost_per_character": 0, + "input_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_token": 0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/gemini-experimental", + "supports_function_calling": false, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-pro": { + "input_cost_per_character": 1.25e-07, + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "input_cost_per_video_per_second": 0.002, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 32760, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 3.75e-07, + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-pro-experimental": { + "input_cost_per_character": 0, + "input_cost_per_token": 0, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_token": 0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/gemini-experimental", + "supports_function_calling": false, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "gemini-pro-vision": { + "input_cost_per_image": 0.0025, + "input_cost_per_token": 5e-07, + "litellm_provider": "vertex_ai-vision-models", + "max_images_per_prompt": 16, + "max_input_tokens": 16384, + "max_output_tokens": 2048, + "max_tokens": 2048, + "max_video_length": 2, + "max_videos_per_prompt": 1, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini/gemini-embedding-001": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "gemini", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 3072, + "rpm": 10000, + "source": "https://ai.google.dev/gemini-api/docs/embeddings#model-versions", + "tpm": 10000000 + }, + "gemini/gemini-1.5-flash": { + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "rpm": 2000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-001": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 1.875e-08, + "deprecation_date": "2025-05-24", + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "rpm": 2000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-002": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 1.875e-08, + "deprecation_date": "2025-09-24", + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "rpm": 2000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-8b": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 4000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-8b-exp-0827": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 4000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-8b-exp-0924": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 4000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-exp-0827": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 2000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-flash-latest": { + "input_cost_per_token": 7.5e-08, + "input_cost_per_token_above_128k_tokens": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "output_cost_per_token_above_128k_tokens": 6e-07, + "rpm": 2000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro": { + "input_cost_per_token": 3.5e-06, + "input_cost_per_token_above_128k_tokens": 7e-06, + "litellm_provider": "gemini", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-05, + "output_cost_per_token_above_128k_tokens": 2.1e-05, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro-001": { + "deprecation_date": "2025-05-24", + "input_cost_per_token": 3.5e-06, + "input_cost_per_token_above_128k_tokens": 7e-06, + "litellm_provider": "gemini", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-05, + "output_cost_per_token_above_128k_tokens": 2.1e-05, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro-002": { + "deprecation_date": "2025-09-24", + "input_cost_per_token": 3.5e-06, + "input_cost_per_token_above_128k_tokens": 7e-06, + "litellm_provider": "gemini", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-05, + "output_cost_per_token_above_128k_tokens": 2.1e-05, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro-exp-0801": { + "input_cost_per_token": 3.5e-06, + "input_cost_per_token_above_128k_tokens": 7e-06, + "litellm_provider": "gemini", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-05, + "output_cost_per_token_above_128k_tokens": 2.1e-05, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro-exp-0827": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-1.5-pro-latest": { + "input_cost_per_token": 3.5e-06, + "input_cost_per_token_above_128k_tokens": 7e-06, + "litellm_provider": "gemini", + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-06, + "output_cost_per_token_above_128k_tokens": 2.1e-05, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-2.0-flash": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "rpm": 10000, + "source": "https://ai.google.dev/pricing#2_0flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.0-flash-001": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "rpm": 10000, + "source": "https://ai.google.dev/pricing#2_0flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.0-flash-exp": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 4000000 + }, + "gemini/gemini-2.0-flash-lite": { + "cache_read_input_token_cost": 1.875e-08, + "input_cost_per_audio_token": 7.5e-08, + "input_cost_per_token": 7.5e-08, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 50, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "rpm": 4000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 4000000 + }, + "gemini/gemini-2.0-flash-lite-preview-02-05": { + "cache_read_input_token_cost": 1.875e-08, + "input_cost_per_audio_token": 7.5e-08, + "input_cost_per_token": 7.5e-08, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 3e-07, + "rpm": 60000, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.0-flash-live-001": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 2.1e-06, + "input_cost_per_image": 2.1e-06, + "input_cost_per_token": 3.5e-07, + "input_cost_per_video_per_second": 2.1e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_audio_token": 8.5e-06, + "output_cost_per_token": 1.5e-06, + "rpm": 10, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2-0-flash-live-001", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.0-flash-preview-image-generation": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "rpm": 10000, + "source": "https://ai.google.dev/pricing#2_0flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.0-flash-thinking-exp": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 4000000 + }, + "gemini/gemini-2.0-flash-thinking-exp-01-21": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 10, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 4000000 + }, + "gemini/gemini-2.0-pro-exp-02-05": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 2, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 1000000 + }, + "gemini/gemini-2.5-flash": { + "cache_read_input_token_cost": 3e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 8000000 + }, + "gemini/gemini-2.5-flash-image": { + "cache_read_input_token_cost": 3e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "supports_reasoning": false, + "max_images_per_prompt": 3000, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "max_pdf_size_mb": 30, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "image_generation", + "output_cost_per_image": 0.039, + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-flash-image", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 8000000 + }, + "gemini/gemini-2.5-flash-image-preview": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "image_generation", + "output_cost_per_image": 0.039, + "output_cost_per_reasoning_token": 3e-05, + "output_cost_per_token": 3e-05, + "rpm": 100000, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 8000000 + }, + "gemini/gemini-3-pro-image-preview": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "gemini", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 65536, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 1.2e-04, + "output_cost_per_token": 1.2e-05, + "rpm": 1000, + "tpm": 4000000, + "output_cost_per_token_batches": 6e-06, + "source": "https://ai.google.dev/gemini-api/docs/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini/gemini-2.5-flash-lite": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 5e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "rpm": 15, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-lite", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-lite-preview-09-2025": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 3e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "rpm": 15, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-preview-09-2025": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 15, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-flash-latest": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 15, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-flash-lite-latest": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 3e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "rpm": 15, + "source": "https://developers.googleblog.com/en/continuing-to-bring-you-our-latest-models-with-an-improved-gemini-2-5-flash-and-flash-lite-release/", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-lite-preview-06-17": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_audio_token": 5e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 4e-07, + "output_cost_per_token": 4e-07, + "rpm": 15, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-lite", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-preview-04-17": { + "cache_read_input_token_cost": 3.75e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 3.5e-06, + "output_cost_per_token": 6e-07, + "rpm": 10, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-preview-05-20": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 10, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-flash-preview-tts": { + "cache_read_input_token_cost": 3.75e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 3.5e-06, + "output_cost_per_token": 6e-07, + "rpm": 10, + "source": "https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "audio" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-pro": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 2000, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 800000 + }, + "gemini/gemini-2.5-computer-use-preview-10-2025": { + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_images_per_prompt": 3000, + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 2000, + "source": "https://ai.google.dev/gemini-api/docs/computer-use", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_computer_use": true, + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 800000 + }, + "gemini/gemini-3-pro-preview": { + "cache_read_input_token_cost": 2e-07, + "cache_read_input_token_cost_above_200k_tokens": 4e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_above_200k_tokens": 4e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_above_200k_tokens": 1.8e-05, + "output_cost_per_token_batches": 6e-06, + "rpm": 2000, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 800000 + }, + "gemini/gemini-3-flash-preview": { + "cache_read_input_token_cost": 5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 5e-07, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 3e-06, + "output_cost_per_token": 3e-06, + "rpm": 2000, + "source": "https://ai.google.dev/pricing/gemini-3", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 800000 + }, + "gemini-3-flash-preview": { + "cache_read_input_token_cost": 5e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 5e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_reasoning_token": 3e-06, + "output_cost_per_token": 3e-06, + "source": "https://ai.google.dev/pricing/gemini-3", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true + }, + "gemini/gemini-2.5-pro-exp-03-25": { + "cache_read_input_token_cost": 0.0, + "input_cost_per_token": 0.0, + "input_cost_per_token_above_200k_tokens": 0.0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 0.0, + "output_cost_per_token_above_200k_tokens": 0.0, + "rpm": 5, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 250000 + }, + "gemini/gemini-2.5-pro-preview-03-25": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 10000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.5-pro-preview-05-06": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 10000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.5-pro-preview-06-05": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 10000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview", + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-2.5-pro-preview-tts": { + "cache_read_input_token_cost": 3.125e-07, + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_above_200k_tokens": 2.5e-06, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_above_200k_tokens": 1.5e-05, + "rpm": 10000, + "source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "audio" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true, + "tpm": 10000000 + }, + "gemini/gemini-exp-1114": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "metadata": { + "notes": "Rate limits not documented for gemini-exp-1114. Assuming same as gemini-1.5-pro.", + "supports_tool_choice": true + }, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-exp-1206": { + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 2097152, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "metadata": { + "notes": "Rate limits not documented for gemini-exp-1206. Assuming same as gemini-1.5-pro.", + "supports_tool_choice": true + }, + "mode": "chat", + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "rpm": 1000, + "source": "https://ai.google.dev/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 4000000 + }, + "gemini/gemini-gemma-2-27b-it": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "gemini", + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini/gemini-gemma-2-9b-it": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "gemini", + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini/gemini-pro": { + "input_cost_per_token": 3.5e-07, + "input_cost_per_token_above_128k_tokens": 7e-07, + "litellm_provider": "gemini", + "max_input_tokens": 32760, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.05e-06, + "output_cost_per_token_above_128k_tokens": 2.1e-06, + "rpd": 30000, + "rpm": 360, + "source": "https://ai.google.dev/gemini-api/docs/models/gemini", + "supports_function_calling": true, + "supports_tool_choice": true, + "tpm": 120000 + }, + "gemini/gemini-pro-vision": { + "input_cost_per_token": 3.5e-07, + "input_cost_per_token_above_128k_tokens": 7e-07, + "litellm_provider": "gemini", + "max_input_tokens": 30720, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 1.05e-06, + "output_cost_per_token_above_128k_tokens": 2.1e-06, + "rpd": 30000, + "rpm": 360, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "tpm": 120000 + }, + "gemini/gemma-3-27b-it": { + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "source": "https://aistudio.google.com", + "supports_audio_output": false, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini/imagen-3.0-fast-generate-001": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/imagen-3.0-generate-001": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/imagen-3.0-generate-002": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/imagen-4.0-fast-generate-001": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/imagen-4.0-generate-001": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/imagen-4.0-ultra-generate-001": { + "litellm_provider": "gemini", + "mode": "image_generation", + "output_cost_per_image": 0.06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "gemini/learnlm-1.5-pro-experimental": { + "input_cost_per_audio_per_second": 0, + "input_cost_per_audio_per_second_above_128k_tokens": 0, + "input_cost_per_character": 0, + "input_cost_per_character_above_128k_tokens": 0, + "input_cost_per_image": 0, + "input_cost_per_image_above_128k_tokens": 0, + "input_cost_per_token": 0, + "input_cost_per_token_above_128k_tokens": 0, + "input_cost_per_video_per_second": 0, + "input_cost_per_video_per_second_above_128k_tokens": 0, + "litellm_provider": "gemini", + "max_input_tokens": 32767, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 0, + "output_cost_per_character_above_128k_tokens": 0, + "output_cost_per_token": 0, + "output_cost_per_token_above_128k_tokens": 0, + "source": "https://aistudio.google.com", + "supports_audio_output": false, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gemini/veo-2.0-generate-001": { + "litellm_provider": "gemini", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.35, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "gemini/veo-3.0-fast-generate-preview": { + "litellm_provider": "gemini", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.4, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "gemini/veo-3.0-generate-preview": { + "litellm_provider": "gemini", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.75, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "gemini/veo-3.1-fast-generate-preview": { + "litellm_provider": "gemini", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.15, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "gemini/veo-3.1-generate-preview": { + "litellm_provider": "gemini", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.40, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "github_copilot/claude-haiku-4.5": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/claude-opus-4.5": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/claude-opus-41": { + "litellm_provider": "github_copilot", + "max_input_tokens": 80000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions" + ], + "supports_vision": true + }, + "github_copilot/claude-sonnet-4": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/claude-sonnet-4.5": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gemini-2.5-pro": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gemini-3-pro-preview": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gpt-3.5-turbo": { + "litellm_provider": "github_copilot", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true + }, + "github_copilot/gpt-3.5-turbo-0613": { + "litellm_provider": "github_copilot", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true + }, + "github_copilot/gpt-4": { + "litellm_provider": "github_copilot", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true + }, + "github_copilot/gpt-4-0613": { + "litellm_provider": "github_copilot", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true + }, + "github_copilot/gpt-4-o-preview": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "github_copilot/gpt-4.1": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-4.1-2025-04-14": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-41-copilot": { + "litellm_provider": "github_copilot", + "mode": "completion" + }, + "github_copilot/gpt-4o": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gpt-4o-2024-05-13": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gpt-4o-2024-08-06": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "github_copilot/gpt-4o-2024-11-20": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "github_copilot/gpt-4o-mini": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "github_copilot/gpt-4o-mini-2024-07-18": { + "litellm_provider": "github_copilot", + "max_input_tokens": 64000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true + }, + "github_copilot/gpt-5": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions", + "/responses" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-5-mini": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-5.1": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions", + "/responses" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-5.1-codex-max": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "supported_endpoints": [ + "/responses" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/gpt-5.2": { + "litellm_provider": "github_copilot", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "supported_endpoints": [ + "/chat/completions", + "/responses" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_vision": true + }, + "github_copilot/text-embedding-3-small": { + "litellm_provider": "github_copilot", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding" + }, + "github_copilot/text-embedding-3-small-inference": { + "litellm_provider": "github_copilot", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding" + }, + "github_copilot/text-embedding-ada-002": { + "litellm_provider": "github_copilot", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding" + }, + "google.gemma-3-12b-it": { + "input_cost_per_token": 9e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.9e-07, + "supports_system_messages": true, + "supports_vision": true + }, + "google.gemma-3-27b-it": { + "input_cost_per_token": 2.3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.8e-07, + "supports_system_messages": true, + "supports_vision": true + }, + "google.gemma-3-4b-it": { + "input_cost_per_token": 4e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8e-08, + "supports_system_messages": true, + "supports_vision": true + }, + "google_pse/search": { + "input_cost_per_query": 0.005, + "litellm_provider": "google_pse", + "mode": "search" + }, + "global.anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "global.anthropic.claude-sonnet-4-20250514-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "global.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "global.amazon.nova-2-lite-v1:0": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_video_input": true, + "supports_vision": true + }, + "gpt-3.5-turbo": { + "input_cost_per_token": 0.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 4097, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-0125": { + "input_cost_per_token": 5e-07, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 16385, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-0301": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4097, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-0613": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 4097, + "max_output_tokens": 4096, + "max_tokens": 4097, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-1106": { + "deprecation_date": "2026-09-28", + "input_cost_per_token": 1e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 16385, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-16k": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 16385, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-16k-0613": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 16385, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-3.5-turbo-instruct": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "gpt-3.5-turbo-instruct-0914": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "text-completion-openai", + "max_input_tokens": 8192, + "max_output_tokens": 4097, + "max_tokens": 4097, + "mode": "completion", + "output_cost_per_token": 2e-06 + }, + "gpt-4": { + "input_cost_per_token": 3e-05, + "litellm_provider": "openai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-0125-preview": { + "deprecation_date": "2026-03-26", + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-0314": { + "input_cost_per_token": 3e-05, + "litellm_provider": "openai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-0613": { + "deprecation_date": "2025-06-06", + "input_cost_per_token": 3e-05, + "litellm_provider": "openai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-1106-preview": { + "deprecation_date": "2026-03-26", + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-1106-vision-preview": { + "deprecation_date": "2024-12-06", + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4-32k": { + "input_cost_per_token": 6e-05, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.00012, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-32k-0314": { + "input_cost_per_token": 6e-05, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.00012, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-32k-0613": { + "input_cost_per_token": 6e-05, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.00012, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-turbo": { + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4-turbo-2024-04-09": { + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4-turbo-preview": { + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4-vision-preview": { + "deprecation_date": "2024-12-06", + "input_cost_per_token": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4.1": { + "cache_read_input_token_cost": 5e-07, + "cache_read_input_token_cost_priority": 8.75e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "output_cost_per_token_priority": 1.4e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.1-2025-04-14": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.1-mini": { + "cache_read_input_token_cost": 1e-07, + "cache_read_input_token_cost_priority": 1.75e-07, + "input_cost_per_token": 4e-07, + "input_cost_per_token_batches": 2e-07, + "input_cost_per_token_priority": 7e-07, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "output_cost_per_token_batches": 8e-07, + "output_cost_per_token_priority": 2.8e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.1-mini-2025-04-14": { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "input_cost_per_token_batches": 2e-07, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "output_cost_per_token_batches": 8e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.1-nano": { + "cache_read_input_token_cost": 2.5e-08, + "cache_read_input_token_cost_priority": 5e-08, + "input_cost_per_token": 1e-07, + "input_cost_per_token_batches": 5e-08, + "input_cost_per_token_priority": 2e-07, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_batches": 2e-07, + "output_cost_per_token_priority": 8e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.1-nano-2025-04-14": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "input_cost_per_token_batches": 5e-08, + "litellm_provider": "openai", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_batches": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4.5-preview": { + "cache_read_input_token_cost": 3.75e-05, + "input_cost_per_token": 7.5e-05, + "input_cost_per_token_batches": 3.75e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 0.00015, + "output_cost_per_token_batches": 7.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4.5-preview-2025-02-27": { + "cache_read_input_token_cost": 3.75e-05, + "deprecation_date": "2025-07-14", + "input_cost_per_token": 7.5e-05, + "input_cost_per_token_batches": 3.75e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 0.00015, + "output_cost_per_token_batches": 7.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4o": { + "cache_read_input_token_cost": 1.25e-06, + "cache_read_input_token_cost_priority": 2.125e-06, + "input_cost_per_token": 2.5e-06, + "input_cost_per_token_batches": 1.25e-06, + "input_cost_per_token_priority": 4.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_batches": 5e-06, + "output_cost_per_token_priority": 1.7e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4o-2024-05-13": { + "input_cost_per_token": 5e-06, + "input_cost_per_token_batches": 2.5e-06, + "input_cost_per_token_priority": 8.75e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_batches": 7.5e-06, + "output_cost_per_token_priority": 2.625e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4o-2024-08-06": { + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "input_cost_per_token_batches": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_batches": 5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4o-2024-11-20": { + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "input_cost_per_token_batches": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_batches": 5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4o-audio-preview": { + "input_cost_per_audio_token": 0.0001, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 0.0002, + "output_cost_per_token": 1e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-audio-preview-2024-10-01": { + "input_cost_per_audio_token": 0.0001, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 0.0002, + "output_cost_per_token": 1e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-audio-preview-2024-12-17": { + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 1e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-audio-preview-2025-06-03": { + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 1e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-mini": { + "cache_read_input_token_cost": 7.5e-08, + "cache_read_input_token_cost_priority": 1.25e-07, + "input_cost_per_token": 1.5e-07, + "input_cost_per_token_batches": 7.5e-08, + "input_cost_per_token_priority": 2.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-07, + "output_cost_per_token_batches": 3e-07, + "output_cost_per_token_priority": 1e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4o-mini-2024-07-18": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.5e-07, + "input_cost_per_token_batches": 7.5e-08, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-07, + "output_cost_per_token_batches": 3e-07, + "search_context_cost_per_query": { + "search_context_size_high": 0.03, + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275 + }, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-4o-mini-audio-preview": { + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 6e-07, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-mini-audio-preview-2024-12-17": { + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 6e-07, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-mini-realtime-preview": { + "cache_creation_input_audio_token_cost": 3e-07, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 6e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-mini-realtime-preview-2024-12-17": { + "cache_creation_input_audio_token_cost": 3e-07, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 6e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-mini-search-preview": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.5e-07, + "input_cost_per_token_batches": 7.5e-08, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-07, + "output_cost_per_token_batches": 3e-07, + "search_context_cost_per_query": { + "search_context_size_high": 0.03, + "search_context_size_low": 0.025, + "search_context_size_medium": 0.0275 + }, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-4o-mini-search-preview-2025-03-11": { + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.5e-07, + "input_cost_per_token_batches": 7.5e-08, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 6e-07, + "output_cost_per_token_batches": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4o-mini-transcribe": { + "input_cost_per_audio_token": 3e-06, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 5e-06, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "gpt-4o-mini-tts": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "mode": "audio_speech", + "output_cost_per_audio_token": 1.2e-05, + "output_cost_per_second": 0.00025, + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/speech" + ], + "supported_modalities": [ + "text", + "audio" + ], + "supported_output_modalities": [ + "audio" + ] + }, + "gpt-4o-realtime-preview": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-realtime-preview-2024-10-01": { + "cache_creation_input_audio_token_cost": 2e-05, + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 0.0001, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 0.0002, + "output_cost_per_token": 2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-realtime-preview-2024-12-17": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-realtime-preview-2025-06-03": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_audio_token": 4e-05, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 8e-05, + "output_cost_per_token": 2e-05, + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-4o-search-preview": { + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "input_cost_per_token_batches": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_batches": 5e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.05, + "search_context_size_low": 0.03, + "search_context_size_medium": 0.035 + }, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-4o-search-preview-2025-03-11": { + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "input_cost_per_token_batches": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_batches": 5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-4o-transcribe": { + "input_cost_per_audio_token": 6e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 16000, + "max_output_tokens": 2000, + "mode": "audio_transcription", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "gpt-image-1.5": { + "cache_read_input_image_token_cost": 2e-06, + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_token": 1e-05, + "input_cost_per_image_token": 8e-06, + "output_cost_per_image_token": 3.2e-05, + "supported_endpoints": [ + "/v1/images/generations" + ], + "supports_vision": true + }, + "gpt-image-1.5-2025-12-16": { + "cache_read_input_image_token_cost": 2e-06, + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 5e-06, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_token": 1e-05, + "input_cost_per_image_token": 8e-06, + "output_cost_per_image_token": 3.2e-05, + "supported_endpoints": [ + "/v1/images/generations" + ], + "supports_vision": true + }, + "gpt-5": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_flex": 6.25e-08, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_flex": 6.25e-07, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_flex": 5e-06, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5.1": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5.1-2025-11-13": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5.1-chat-latest": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": false, + "supports_native_streaming": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "gpt-5.2": { + "cache_read_input_token_cost": 1.75e-07, + "cache_read_input_token_cost_priority": 3.5e-07, + "input_cost_per_token": 1.75e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "output_cost_per_token_priority": 2.8e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5.2-2025-12-11": { + "cache_read_input_token_cost": 1.75e-07, + "cache_read_input_token_cost_priority": 3.5e-07, + "input_cost_per_token": 1.75e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "output_cost_per_token_priority": 2.8e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5.2-chat-latest": { + "cache_read_input_token_cost": 1.75e-07, + "cache_read_input_token_cost_priority": 3.5e-07, + "input_cost_per_token": 1.75e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "output_cost_per_token_priority": 2.8e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5.2-pro": { + "input_cost_per_token": 2.1e-05, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.68e-04, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-5.2-pro-2025-12-11": { + "input_cost_per_token": 2.1e-05, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1.68e-04, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-5-pro": { + "input_cost_per_token": 1.5e-05, + "input_cost_per_token_batches": 7.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 272000, + "max_tokens": 272000, + "mode": "responses", + "output_cost_per_token": 1.2e-04, + "output_cost_per_token_batches": 6e-05, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": false, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-5-pro-2025-10-06": { + "input_cost_per_token": 1.5e-05, + "input_cost_per_token_batches": 7.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 272000, + "max_tokens": 272000, + "mode": "responses", + "output_cost_per_token": 1.2e-04, + "output_cost_per_token_batches": 6e-05, + "supported_endpoints": [ + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": false, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "gpt-5-2025-08-07": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_flex": 6.25e-08, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_flex": 6.25e-07, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "output_cost_per_token_flex": 5e-06, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5-chat": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": false, + "supports_native_streaming": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "gpt-5-chat-latest": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": false, + "supports_native_streaming": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "gpt-5-codex": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5.1-codex": { + "cache_read_input_token_cost": 1.25e-07, + "cache_read_input_token_cost_priority": 2.5e-07, + "input_cost_per_token": 1.25e-06, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "output_cost_per_token_priority": 2e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5.1-codex-max": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openai", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 1e-05, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5.1-codex-mini": { + "cache_read_input_token_cost": 2.5e-08, + "cache_read_input_token_cost_priority": 4.5e-08, + "input_cost_per_token": 2.5e-07, + "input_cost_per_token_priority": 4.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "responses", + "output_cost_per_token": 2e-06, + "output_cost_per_token_priority": 3.6e-06, + "supported_endpoints": [ + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5-mini": { + "cache_read_input_token_cost": 2.5e-08, + "cache_read_input_token_cost_flex": 1.25e-08, + "cache_read_input_token_cost_priority": 4.5e-08, + "input_cost_per_token": 2.5e-07, + "input_cost_per_token_flex": 1.25e-07, + "input_cost_per_token_priority": 4.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "output_cost_per_token_flex": 1e-06, + "output_cost_per_token_priority": 3.6e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5-mini-2025-08-07": { + "cache_read_input_token_cost": 2.5e-08, + "cache_read_input_token_cost_flex": 1.25e-08, + "cache_read_input_token_cost_priority": 4.5e-08, + "input_cost_per_token": 2.5e-07, + "input_cost_per_token_flex": 1.25e-07, + "input_cost_per_token_priority": 4.5e-07, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "output_cost_per_token_flex": 1e-06, + "output_cost_per_token_priority": 3.6e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "gpt-5-nano": { + "cache_read_input_token_cost": 5e-09, + "cache_read_input_token_cost_flex": 2.5e-09, + "input_cost_per_token": 5e-08, + "input_cost_per_token_flex": 2.5e-08, + "input_cost_per_token_priority": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_flex": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-5-nano-2025-08-07": { + "cache_read_input_token_cost": 5e-09, + "cache_read_input_token_cost_flex": 2.5e-09, + "input_cost_per_token": 5e-08, + "input_cost_per_token_flex": 2.5e-08, + "litellm_provider": "openai", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "output_cost_per_token_flex": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "gpt-image-1": { + "input_cost_per_image": 0.042, + "input_cost_per_pixel": 4.0054321e-08, + "input_cost_per_token": 0.000005, + "input_cost_per_image_token": 0.00001, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "output_cost_per_token": 0.00004, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "gpt-image-1-mini": { + "cache_read_input_image_token_cost": 2.5e-07, + "cache_read_input_token_cost": 2e-07, + "input_cost_per_image_token": 2.5e-06, + "input_cost_per_token": 2e-06, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_image_token": 8e-06, + "supported_endpoints": [ + "/v1/images/generations", + "/v1/images/edits" + ] + }, + "gpt-realtime": { + "cache_creation_input_audio_token_cost": 4e-07, + "cache_read_input_token_cost": 4e-07, + "input_cost_per_audio_token": 3.2e-05, + "input_cost_per_image": 5e-06, + "input_cost_per_token": 4e-06, + "litellm_provider": "openai", + "max_input_tokens": 32000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 6.4e-05, + "output_cost_per_token": 1.6e-05, + "supported_endpoints": [ + "/v1/realtime" + ], + "supported_modalities": [ + "text", + "image", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-realtime-mini": { + "cache_creation_input_audio_token_cost": 3e-07, + "cache_read_input_audio_token_cost": 3e-07, + "input_cost_per_audio_token": 1e-05, + "input_cost_per_token": 6e-07, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 2e-05, + "output_cost_per_token": 2.4e-06, + "supported_endpoints": [ + "/v1/realtime" + ], + "supported_modalities": [ + "text", + "image", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gpt-realtime-2025-08-28": { + "cache_creation_input_audio_token_cost": 4e-07, + "cache_read_input_token_cost": 4e-07, + "input_cost_per_audio_token": 3.2e-05, + "input_cost_per_image": 5e-06, + "input_cost_per_token": 4e-06, + "litellm_provider": "openai", + "max_input_tokens": 32000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_audio_token": 6.4e-05, + "output_cost_per_token": 1.6e-05, + "supported_endpoints": [ + "/v1/realtime" + ], + "supported_modalities": [ + "text", + "image", + "audio" + ], + "supported_output_modalities": [ + "text", + "audio" + ], + "supports_audio_input": true, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "gradient_ai/alibaba-qwen3-32b": { + "litellm_provider": "gradient_ai", + "max_tokens": 2048, + "mode": "chat", + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/anthropic-claude-3-opus": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "gradient_ai", + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/anthropic-claude-3.5-haiku": { + "input_cost_per_token": 8e-07, + "litellm_provider": "gradient_ai", + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/anthropic-claude-3.5-sonnet": { + "input_cost_per_token": 3e-06, + "litellm_provider": "gradient_ai", + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/anthropic-claude-3.7-sonnet": { + "input_cost_per_token": 3e-06, + "litellm_provider": "gradient_ai", + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/deepseek-r1-distill-llama-70b": { + "input_cost_per_token": 9.9e-07, + "litellm_provider": "gradient_ai", + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 9.9e-07, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/llama3-8b-instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "gradient_ai", + "max_tokens": 512, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/llama3.3-70b-instruct": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "gradient_ai", + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 6.5e-07, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/mistral-nemo-instruct-2407": { + "input_cost_per_token": 3e-07, + "litellm_provider": "gradient_ai", + "max_tokens": 512, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/openai-gpt-4o": { + "litellm_provider": "gradient_ai", + "max_tokens": 16384, + "mode": "chat", + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/openai-gpt-4o-mini": { + "litellm_provider": "gradient_ai", + "max_tokens": 16384, + "mode": "chat", + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/openai-o3": { + "input_cost_per_token": 2e-06, + "litellm_provider": "gradient_ai", + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "gradient_ai/openai-o3-mini": { + "input_cost_per_token": 1.1e-06, + "litellm_provider": "gradient_ai", + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supported_endpoints": [ + "/v1/chat/completions" + ], + "supported_modalities": [ + "text" + ], + "supports_tool_choice": false + }, + "lemonade/Qwen3-Coder-30B-A3B-Instruct-GGUF": { + "input_cost_per_token": 0, + "litellm_provider": "lemonade", + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "lemonade/gpt-oss-20b-mxfp4-GGUF": { + "input_cost_per_token": 0, + "litellm_provider": "lemonade", + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "lemonade/gpt-oss-120b-mxfp-GGUF": { + "input_cost_per_token": 0, + "litellm_provider": "lemonade", + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "lemonade/Gemma-3-4b-it-GGUF": { + "input_cost_per_token": 0, + "litellm_provider": "lemonade", + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "lemonade/Qwen3-4B-Instruct-2507-GGUF": { + "input_cost_per_token": 0, + "litellm_provider": "lemonade", + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "amazon-nova/nova-micro-v1": { + "input_cost_per_token": 3.5e-08, + "litellm_provider": "amazon_nova", + "max_input_tokens": 128000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.4e-07, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, + "amazon-nova/nova-lite-v1": { + "input_cost_per_token": 6e-08, + "litellm_provider": "amazon_nova", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 2.4e-07, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "amazon-nova/nova-premier-v1": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "amazon_nova", + "max_input_tokens": 1000000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.25e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": false, + "supports_response_schema": true, + "supports_vision": true + }, + "amazon-nova/nova-pro-v1": { + "input_cost_per_token": 8e-07, + "litellm_provider": "amazon_nova", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.2e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "groq/deepseek-r1-distill-llama-70b": { + "input_cost_per_token": 7.5e-07, + "litellm_provider": "groq", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9.9e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/distil-whisper-large-v3-en": { + "input_cost_per_second": 5.56e-06, + "litellm_provider": "groq", + "mode": "audio_transcription", + "output_cost_per_second": 0.0 + }, + "groq/gemma-7b-it": { + "deprecation_date": "2024-12-18", + "input_cost_per_token": 7e-08, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7e-08, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/gemma2-9b-it": { + "input_cost_per_token": 2e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": false, + "supports_response_schema": false, + "supports_tool_choice": false + }, + "groq/llama-3.1-405b-reasoning": { + "input_cost_per_token": 5.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.1-70b-versatile": { + "deprecation_date": "2025-01-24", + "input_cost_per_token": 5.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.1-8b-instant": { + "input_cost_per_token": 5e-08, + "litellm_provider": "groq", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8e-08, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.2-11b-text-preview": { + "deprecation_date": "2024-10-28", + "input_cost_per_token": 1.8e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.2-11b-vision-preview": { + "deprecation_date": "2025-04-14", + "input_cost_per_token": 1.8e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.8e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "groq/llama-3.2-1b-preview": { + "deprecation_date": "2025-04-14", + "input_cost_per_token": 4e-08, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-08, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.2-3b-preview": { + "deprecation_date": "2025-04-14", + "input_cost_per_token": 6e-08, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-08, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.2-90b-text-preview": { + "deprecation_date": "2024-11-25", + "input_cost_per_token": 9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-3.2-90b-vision-preview": { + "deprecation_date": "2025-04-14", + "input_cost_per_token": 9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "groq/llama-3.3-70b-specdec": { + "deprecation_date": "2025-04-14", + "input_cost_per_token": 5.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9.9e-07, + "supports_tool_choice": true + }, + "groq/llama-3.3-70b-versatile": { + "input_cost_per_token": 5.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 7.9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama-guard-3-8b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "groq/llama2-70b-4096": { + "input_cost_per_token": 7e-07, + "litellm_provider": "groq", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 8e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama3-groq-70b-8192-tool-use-preview": { + "deprecation_date": "2025-01-06", + "input_cost_per_token": 8.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8.9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/llama3-groq-8b-8192-tool-use-preview": { + "deprecation_date": "2025-01-06", + "input_cost_per_token": 1.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.9e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/meta-llama/llama-4-maverick-17b-128e-instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "groq", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "groq/meta-llama/llama-4-scout-17b-16e-instruct": { + "input_cost_per_token": 1.1e-07, + "litellm_provider": "groq", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.4e-07, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "groq/mistral-saba-24b": { + "input_cost_per_token": 7.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.9e-07 + }, + "groq/mixtral-8x7b-32768": { + "deprecation_date": "2025-03-20", + "input_cost_per_token": 2.4e-07, + "litellm_provider": "groq", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 2.4e-07, + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/moonshotai/kimi-k2-instruct": { + "input_cost_per_token": 1e-06, + "litellm_provider": "groq", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "groq/moonshotai/kimi-k2-instruct-0905": { + "input_cost_per_token": 1e-06, + "output_cost_per_token": 3e-06, + "cache_read_input_token_cost": 0.5e-06, + "litellm_provider": "groq", + "max_input_tokens": 262144, + "max_output_tokens": 16384, + "max_tokens": 278528, + "mode": "chat", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "groq/openai/gpt-oss-120b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "groq", + "max_input_tokens": 131072, + "max_output_tokens": 32766, + "max_tokens": 32766, + "mode": "chat", + "output_cost_per_token": 7.5e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "groq/openai/gpt-oss-20b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "groq", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "groq/playai-tts": { + "input_cost_per_character": 5e-05, + "litellm_provider": "groq", + "max_input_tokens": 10000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "audio_speech" + }, + "groq/qwen/qwen3-32b": { + "input_cost_per_token": 2.9e-07, + "litellm_provider": "groq", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 5.9e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true + }, + "groq/whisper-large-v3": { + "input_cost_per_second": 3.083e-05, + "litellm_provider": "groq", + "mode": "audio_transcription", + "output_cost_per_second": 0.0 + }, + "groq/whisper-large-v3-turbo": { + "input_cost_per_second": 1.111e-05, + "litellm_provider": "groq", + "mode": "audio_transcription", + "output_cost_per_second": 0.0 + }, + "hd/1024-x-1024/dall-e-3": { + "input_cost_per_pixel": 7.629e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "hd/1024-x-1792/dall-e-3": { + "input_cost_per_pixel": 6.539e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "hd/1792-x-1024/dall-e-3": { + "input_cost_per_pixel": 6.539e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "heroku/claude-3-5-haiku": { + "litellm_provider": "heroku", + "max_tokens": 4096, + "mode": "chat", + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "heroku/claude-3-5-sonnet-latest": { + "litellm_provider": "heroku", + "max_tokens": 8192, + "mode": "chat", + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "heroku/claude-3-7-sonnet": { + "litellm_provider": "heroku", + "max_tokens": 8192, + "mode": "chat", + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "heroku/claude-4-sonnet": { + "litellm_provider": "heroku", + "max_tokens": 8192, + "mode": "chat", + "supports_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "high/1024-x-1024/gpt-image-1": { + "input_cost_per_image": 0.167, + "input_cost_per_pixel": 1.59263611e-07, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "high/1024-x-1536/gpt-image-1": { + "input_cost_per_image": 0.25, + "input_cost_per_pixel": 1.58945719e-07, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "high/1536-x-1024/gpt-image-1": { + "input_cost_per_image": 0.25, + "input_cost_per_pixel": 1.58945719e-07, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "hyperbolic/NousResearch/Hermes-3-Llama-3.1-70B": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/Qwen/QwQ-32B": { + "input_cost_per_token": 2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/Qwen/Qwen2.5-72B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/Qwen/Qwen2.5-Coder-32B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/Qwen/Qwen3-235B-A22B": { + "input_cost_per_token": 2e-06, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/deepseek-ai/DeepSeek-R1": { + "input_cost_per_token": 4e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/deepseek-ai/DeepSeek-R1-0528": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/deepseek-ai/DeepSeek-V3": { + "input_cost_per_token": 2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/deepseek-ai/DeepSeek-V3-0324": { + "input_cost_per_token": 4e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Llama-3.2-3B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Llama-3.3-70B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Meta-Llama-3-70B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Meta-Llama-3.1-405B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Meta-Llama-3.1-70B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/meta-llama/Meta-Llama-3.1-8B-Instruct": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "hyperbolic", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "hyperbolic/moonshotai/Kimi-K2-Instruct": { + "input_cost_per_token": 2e-06, + "litellm_provider": "hyperbolic", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "j2-light": { + "input_cost_per_token": 3e-06, + "litellm_provider": "ai21", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 3e-06 + }, + "j2-mid": { + "input_cost_per_token": 1e-05, + "litellm_provider": "ai21", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 1e-05 + }, + "j2-ultra": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "ai21", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 1.5e-05 + }, + "jamba-1.5": { + "input_cost_per_token": 2e-07, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "jamba-1.5-large": { + "input_cost_per_token": 2e-06, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "jamba-1.5-large@001": { + "input_cost_per_token": 2e-06, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "jamba-1.5-mini": { + "input_cost_per_token": 2e-07, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "jamba-1.5-mini@001": { + "input_cost_per_token": 2e-07, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "jamba-large-1.6": { + "input_cost_per_token": 2e-06, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "jamba-large-1.7": { + "input_cost_per_token": 2e-06, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "jamba-mini-1.6": { + "input_cost_per_token": 2e-07, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "jamba-mini-1.7": { + "input_cost_per_token": 2e-07, + "litellm_provider": "ai21", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "jina-reranker-v2-base-multilingual": { + "input_cost_per_token": 1.8e-08, + "litellm_provider": "jina_ai", + "max_document_chunks_per_query": 2048, + "max_input_tokens": 1024, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "rerank", + "output_cost_per_token": 1.8e-08 + }, + "jp.anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 4.125e-06, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_token": 3.3e-06, + "input_cost_per_token_above_200k_tokens": 6.6e-06, + "output_cost_per_token_above_200k_tokens": 2.475e-05, + "cache_creation_input_token_cost_above_200k_tokens": 8.25e-06, + "cache_read_input_token_cost_above_200k_tokens": 6.6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "jp.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.375e-06, + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5.5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "lambda_ai/deepseek-llama3.3-70b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/deepseek-r1-0528": { + "input_cost_per_token": 2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/deepseek-r1-671b": { + "input_cost_per_token": 8e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/deepseek-v3-0324": { + "input_cost_per_token": 2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/hermes3-405b": { + "input_cost_per_token": 8e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/hermes3-70b": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/hermes3-8b": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-08, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/lfm-40b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/lfm-7b": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-08, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama-4-maverick-17b-128e-instruct-fp8": { + "input_cost_per_token": 5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama-4-scout-17b-16e-instruct": { + "input_cost_per_token": 5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 16384, + "max_output_tokens": 8192, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.1-405b-instruct-fp8": { + "input_cost_per_token": 8e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.1-70b-instruct-fp8": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.1-8b-instruct": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-08, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.1-nemotron-70b-instruct-fp8": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.2-11b-vision-instruct": { + "input_cost_per_token": 1.5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-08, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "lambda_ai/llama3.2-3b-instruct": { + "input_cost_per_token": 1.5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-08, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/llama3.3-70b-instruct-fp8": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/qwen25-coder-32b-instruct": { + "input_cost_per_token": 5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "lambda_ai/qwen3-32b-fp8": { + "input_cost_per_token": 5e-08, + "litellm_provider": "lambda_ai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_system_messages": true, + "supports_tool_choice": true + }, + "low/1024-x-1024/gpt-image-1": { + "input_cost_per_image": 0.011, + "input_cost_per_pixel": 1.0490417e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "low/1024-x-1536/gpt-image-1": { + "input_cost_per_image": 0.016, + "input_cost_per_pixel": 1.0172526e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "low/1536-x-1024/gpt-image-1": { + "input_cost_per_image": 0.016, + "input_cost_per_pixel": 1.0172526e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "luminous-base": { + "input_cost_per_token": 3e-05, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "completion", + "output_cost_per_token": 3.3e-05 + }, + "luminous-base-control": { + "input_cost_per_token": 3.75e-05, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 4.125e-05 + }, + "luminous-extended": { + "input_cost_per_token": 4.5e-05, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "completion", + "output_cost_per_token": 4.95e-05 + }, + "luminous-extended-control": { + "input_cost_per_token": 5.625e-05, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 6.1875e-05 + }, + "luminous-supreme": { + "input_cost_per_token": 0.000175, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "completion", + "output_cost_per_token": 0.0001925 + }, + "luminous-supreme-control": { + "input_cost_per_token": 0.00021875, + "litellm_provider": "aleph_alpha", + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 0.000240625 + }, + "max-x-max/50-steps/stability.stable-diffusion-xl-v0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.036 + }, + "max-x-max/max-steps/stability.stable-diffusion-xl-v0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.072 + }, + "medium/1024-x-1024/gpt-image-1": { + "input_cost_per_image": 0.042, + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medium/1024-x-1536/gpt-image-1": { + "input_cost_per_image": 0.063, + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medium/1536-x-1024/gpt-image-1": { + "input_cost_per_image": 0.063, + "input_cost_per_pixel": 4.0054321e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "low/1024-x-1024/gpt-image-1-mini": { + "input_cost_per_image": 0.005, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "low/1024-x-1536/gpt-image-1-mini": { + "input_cost_per_image": 0.006, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "low/1536-x-1024/gpt-image-1-mini": { + "input_cost_per_image": 0.006, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medium/1024-x-1024/gpt-image-1-mini": { + "input_cost_per_image": 0.011, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medium/1024-x-1536/gpt-image-1-mini": { + "input_cost_per_image": 0.015, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medium/1536-x-1024/gpt-image-1-mini": { + "input_cost_per_image": 0.015, + "litellm_provider": "openai", + "mode": "image_generation", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "medlm-large": { + "input_cost_per_character": 5e-06, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "chat", + "output_cost_per_character": 1.5e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "medlm-medium": { + "input_cost_per_character": 5e-07, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_character": 1e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models", + "supports_tool_choice": true + }, + "meta.llama2-13b-chat-v1": { + "input_cost_per_token": 7.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "meta.llama2-70b-chat-v1": { + "input_cost_per_token": 1.95e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.56e-06 + }, + "meta.llama3-1-405b-instruct-v1:0": { + "input_cost_per_token": 5.32e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.6e-05, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-1-70b-instruct-v1:0": { + "input_cost_per_token": 9.9e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9.9e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-1-8b-instruct-v1:0": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.2e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-2-11b-instruct-v1:0": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3.5e-07, + "supports_function_calling": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "meta.llama3-2-1b-instruct-v1:0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-2-3b-instruct-v1:0": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-2-90b-instruct-v1:0": { + "input_cost_per_token": 2e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "meta.llama3-3-70b-instruct-v1:0": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.2e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama3-70b-instruct-v1:0": { + "input_cost_per_token": 2.65e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3.5e-06 + }, + "meta.llama3-8b-instruct-v1:0": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "meta.llama4-maverick-17b-instruct-v1:0": { + "input_cost_per_token": 2.4e-07, + "input_cost_per_token_batches": 1.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 9.7e-07, + "output_cost_per_token_batches": 4.85e-07, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta.llama4-scout-17b-instruct-v1:0": { + "input_cost_per_token": 1.7e-07, + "input_cost_per_token_batches": 8.5e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "output_cost_per_token_batches": 3.3e-07, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": false + }, + "meta_llama/Llama-3.3-70B-Instruct": { + "litellm_provider": "meta_llama", + "max_input_tokens": 128000, + "max_output_tokens": 4028, + "max_tokens": 128000, + "mode": "chat", + "source": "https://llama.developer.meta.com/docs/models", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "meta_llama/Llama-3.3-8B-Instruct": { + "litellm_provider": "meta_llama", + "max_input_tokens": 128000, + "max_output_tokens": 4028, + "max_tokens": 128000, + "mode": "chat", + "source": "https://llama.developer.meta.com/docs/models", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "meta_llama/Llama-4-Maverick-17B-128E-Instruct-FP8": { + "litellm_provider": "meta_llama", + "max_input_tokens": 1000000, + "max_output_tokens": 4028, + "max_tokens": 128000, + "mode": "chat", + "source": "https://llama.developer.meta.com/docs/models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "meta_llama/Llama-4-Scout-17B-16E-Instruct-FP8": { + "litellm_provider": "meta_llama", + "max_input_tokens": 10000000, + "max_output_tokens": 4028, + "max_tokens": 128000, + "mode": "chat", + "source": "https://llama.developer.meta.com/docs/models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "minimax.minimax-m2": { + "input_cost_per_token": 3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "supports_system_messages": true + }, + "mistral.magistral-small-2509": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_system_messages": true + }, + "mistral.ministral-3-14b-instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": true, + "supports_system_messages": true + }, + "mistral.ministral-3-3b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_system_messages": true + }, + "mistral.ministral-3-8b-instruct": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_function_calling": true, + "supports_system_messages": true + }, + "mistral.mistral-7b-instruct-v0:2": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "mistral.mistral-large-2402-v1:0": { + "input_cost_per_token": 8e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_function_calling": true + }, + "mistral.mistral-large-2407-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 9e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "mistral.mistral-large-3-675b-instruct": { + "input_cost_per_token": 5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_system_messages": true + }, + "mistral.mistral-small-2402-v1:0": { + "input_cost_per_token": 1e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true + }, + "mistral.mixtral-8x7b-instruct-v0:1": { + "input_cost_per_token": 4.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_tool_choice": true + }, + "mistral.voxtral-mini-3b-2507": { + "input_cost_per_token": 4e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-08, + "supports_audio_input": true, + "supports_system_messages": true + }, + "mistral.voxtral-small-24b-2507": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_audio_input": true, + "supports_system_messages": true + }, + "mistral/codestral-2405": { + "input_cost_per_token": 1e-06, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/codestral-2508": { + "input_cost_per_token": 3e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 9e-07, + "source": "https://mistral.ai/news/codestral-25-08", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/codestral-latest": { + "input_cost_per_token": 1e-06, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/codestral-mamba-latest": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "source": "https://mistral.ai/technology/", + "supports_assistant_prefill": true, + "supports_tool_choice": true + }, + "mistral/devstral-medium-2507": { + "input_cost_per_token": 4e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://mistral.ai/news/devstral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/devstral-small-2505": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://mistral.ai/news/devstral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/devstral-small-2507": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://mistral.ai/news/devstral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/labs-devstral-small-2512": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://docs.mistral.ai/models/devstral-small-2-25-12", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/devstral-2512": { + "input_cost_per_token": 4e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://mistral.ai/news/devstral-2-vibe-cli", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/magistral-medium-2506": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 40000, + "max_output_tokens": 40000, + "max_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://mistral.ai/news/magistral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/magistral-medium-2509": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 40000, + "max_output_tokens": 40000, + "max_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://mistral.ai/news/magistral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-ocr-latest": { + "litellm_provider": "mistral", + "ocr_cost_per_page": 1e-3, + "annotation_cost_per_page": 3e-3, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://mistral.ai/pricing#api-pricing" + }, + "mistral/mistral-ocr-2505-completion": { + "litellm_provider": "mistral", + "ocr_cost_per_page": 1e-3, + "annotation_cost_per_page": 3e-3, + "mode": "ocr", + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://mistral.ai/pricing#api-pricing" + }, + "mistral/magistral-medium-latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 40000, + "max_output_tokens": 40000, + "max_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://mistral.ai/news/magistral", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/magistral-small-2506": { + "input_cost_per_token": 5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 40000, + "max_output_tokens": 40000, + "max_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://mistral.ai/pricing#api-pricing", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/magistral-small-latest": { + "input_cost_per_token": 5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 40000, + "max_output_tokens": 40000, + "max_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://mistral.ai/pricing#api-pricing", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-embed": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding" + }, + "mistral/codestral-embed": { + "input_cost_per_token": 0.15e-06, + "litellm_provider": "mistral", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding" + }, + "mistral/codestral-embed-2505": { + "input_cost_per_token": 0.15e-06, + "litellm_provider": "mistral", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding" + }, + "mistral/mistral-large-2402": { + "input_cost_per_token": 4e-06, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-large-2407": { + "input_cost_per_token": 3e-06, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-large-2411": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-large-latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-large-3": { + "input_cost_per_token": 5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://docs.mistral.ai/models/mistral-large-3-25-12", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "mistral/mistral-medium": { + "input_cost_per_token": 2.7e-06, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 8.1e-06, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-medium-2312": { + "input_cost_per_token": 2.7e-06, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 8.1e-06, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-medium-2505": { + "input_cost_per_token": 4e-07, + "litellm_provider": "mistral", + "max_input_tokens": 131072, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-medium-latest": { + "input_cost_per_token": 4e-07, + "litellm_provider": "mistral", + "max_input_tokens": 131072, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-small": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-small-latest": { + "input_cost_per_token": 1e-07, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/mistral-tiny": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/open-codestral-mamba": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "source": "https://mistral.ai/technology/", + "supports_assistant_prefill": true, + "supports_tool_choice": true + }, + "mistral/open-mistral-7b": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/open-mistral-nemo": { + "input_cost_per_token": 3e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://mistral.ai/technology/", + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/open-mistral-nemo-2407": { + "input_cost_per_token": 3e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://mistral.ai/technology/", + "supports_assistant_prefill": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/open-mixtral-8x22b": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 65336, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/open-mixtral-8x7b": { + "input_cost_per_token": 7e-07, + "litellm_provider": "mistral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 7e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "mistral/pixtral-12b-2409": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "mistral/pixtral-large-2411": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "mistral/pixtral-large-latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "mistral", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot.kimi-k2-thinking": { + "input_cost_per_token": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "supports_reasoning": true, + "supports_system_messages": true + }, + "moonshot/kimi-k2-0711-preview": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 6e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "moonshot/kimi-k2-0905-preview": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 6e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "moonshot/kimi-k2-turbo-preview": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 1.15e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 8e-06, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "moonshot/kimi-latest": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/kimi-latest-128k": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/kimi-latest-32k": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/kimi-latest-8k": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 2e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/kimi-thinking-preview": { + "cache_read_input_token_cost": 1.5e-07, + "input_cost_per_token": 6e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_vision": true + }, + "moonshot/kimi-k2-thinking": { + "cache_read_input_token_cost": 1.5e-7, + "input_cost_per_token": 6e-7, + "litellm_provider": "moonshot", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 2.5e-6, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "moonshot/kimi-k2-thinking-turbo": { + "cache_read_input_token_cost": 1.5e-7, + "input_cost_per_token": 1.15e-6, + "litellm_provider": "moonshot", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 8e-6, + "source": "https://platform.moonshot.ai/docs/pricing/chat#generation-model-kimi-k2", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "moonshot/moonshot-v1-128k": { + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-128k-0430": { + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-128k-vision-preview": { + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/moonshot-v1-32k": { + "input_cost_per_token": 1e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-32k-0430": { + "input_cost_per_token": 1e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-32k-vision-preview": { + "input_cost_per_token": 1e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/moonshot-v1-8k": { + "input_cost_per_token": 2e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-8k-0430": { + "input_cost_per_token": 2e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "moonshot/moonshot-v1-8k-vision-preview": { + "input_cost_per_token": 2e-07, + "litellm_provider": "moonshot", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "moonshot/moonshot-v1-auto": { + "input_cost_per_token": 2e-06, + "litellm_provider": "moonshot", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://platform.moonshot.ai/docs/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "morph/morph-v3-fast": { + "input_cost_per_token": 8e-07, + "litellm_provider": "morph", + "max_input_tokens": 16000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": false + }, + "morph/morph-v3-large": { + "input_cost_per_token": 9e-07, + "litellm_provider": "morph", + "max_input_tokens": 16000, + "max_output_tokens": 16000, + "max_tokens": 16000, + "mode": "chat", + "output_cost_per_token": 1.9e-06, + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_system_messages": true, + "supports_tool_choice": false, + "supports_vision": false + }, + "multimodalembedding": { + "input_cost_per_character": 2e-07, + "input_cost_per_image": 0.0001, + "input_cost_per_token": 8e-07, + "input_cost_per_video_per_second": 0.0005, + "input_cost_per_video_per_second_above_15s_interval": 0.002, + "input_cost_per_video_per_second_above_8s_interval": 0.001, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models", + "supported_endpoints": [ + "/v1/embeddings" + ], + "supported_modalities": [ + "text", + "image", + "video" + ] + }, + "multimodalembedding@001": { + "input_cost_per_character": 2e-07, + "input_cost_per_image": 0.0001, + "input_cost_per_token": 8e-07, + "input_cost_per_video_per_second": 0.0005, + "input_cost_per_video_per_second_above_15s_interval": 0.002, + "input_cost_per_video_per_second_above_8s_interval": 0.001, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models", + "supported_endpoints": [ + "/v1/embeddings" + ], + "supported_modalities": [ + "text", + "image", + "video" + ] + }, + "nscale/Qwen/QwQ-32B": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "nscale", + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/Qwen/Qwen2.5-Coder-32B-Instruct": { + "input_cost_per_token": 6e-08, + "litellm_provider": "nscale", + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/Qwen/Qwen2.5-Coder-3B-Instruct": { + "input_cost_per_token": 1e-08, + "litellm_provider": "nscale", + "mode": "chat", + "output_cost_per_token": 3e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/Qwen/Qwen2.5-Coder-7B-Instruct": { + "input_cost_per_token": 1e-08, + "litellm_provider": "nscale", + "mode": "chat", + "output_cost_per_token": 3e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/black-forest-labs/FLUX.1-schnell": { + "input_cost_per_pixel": 1.3e-09, + "litellm_provider": "nscale", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#image-models", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Llama-70B": { + "input_cost_per_token": 3.75e-07, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.75/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 3.75e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Llama-8B": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.05/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 2.5e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B": { + "input_cost_per_token": 9e-08, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.18/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 9e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Qwen-14B": { + "input_cost_per_token": 7e-08, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.14/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 7e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.30/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B": { + "input_cost_per_token": 2e-07, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.40/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/meta-llama/Llama-3.1-8B-Instruct": { + "input_cost_per_token": 3e-08, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.06/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 3e-08, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/meta-llama/Llama-3.3-70B-Instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $0.40/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/meta-llama/Llama-4-Scout-17B-16E-Instruct": { + "input_cost_per_token": 9e-08, + "litellm_provider": "nscale", + "mode": "chat", + "output_cost_per_token": 2.9e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/mistralai/mixtral-8x22b-instruct-v0.1": { + "input_cost_per_token": 6e-07, + "litellm_provider": "nscale", + "metadata": { + "notes": "Pricing listed as $1.20/1M tokens total. Assumed 50/50 split for input/output." + }, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#chat-models" + }, + "nscale/stabilityai/stable-diffusion-xl-base-1.0": { + "input_cost_per_pixel": 3e-09, + "litellm_provider": "nscale", + "mode": "image_generation", + "output_cost_per_pixel": 0.0, + "source": "https://docs.nscale.com/docs/inference/serverless-models/current#image-models", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "nvidia.nemotron-nano-12b-v2": { + "input_cost_per_token": 2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_system_messages": true, + "supports_vision": true + }, + "nvidia.nemotron-nano-9b-v2": { + "input_cost_per_token": 6e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.3e-07, + "supports_system_messages": true + }, + "o1": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o1-2024-12-17": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o1-mini": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_vision": true + }, + "o1-mini-2024-09-12": { + "deprecation_date": "2025-10-27", + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 3e-06, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": true + }, + "o1-preview": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": true + }, + "o1-preview-2024-09-12": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_vision": true + }, + "o1-pro": { + "input_cost_per_token": 0.00015, + "input_cost_per_token_batches": 7.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 0.0006, + "output_cost_per_token_batches": 0.0003, + "supported_endpoints": [ + "/v1/responses", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": false, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o1-pro-2025-03-19": { + "input_cost_per_token": 0.00015, + "input_cost_per_token_batches": 7.5e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 0.0006, + "output_cost_per_token_batches": 0.0003, + "supported_endpoints": [ + "/v1/responses", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": false, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o3": { + "cache_read_input_token_cost": 5e-07, + "cache_read_input_token_cost_flex": 2.5e-07, + "cache_read_input_token_cost_priority": 8.75e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_flex": 1e-06, + "input_cost_per_token_priority": 3.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "output_cost_per_token_flex": 4e-06, + "output_cost_per_token_priority": 1.4e-05, + "supported_endpoints": [ + "/v1/responses", + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "o3-2025-04-16": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supported_endpoints": [ + "/v1/responses", + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "o3-deep-research": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_token": 1e-05, + "input_cost_per_token_batches": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 4e-05, + "output_cost_per_token_batches": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o3-deep-research-2025-06-26": { + "cache_read_input_token_cost": 2.5e-06, + "input_cost_per_token": 1e-05, + "input_cost_per_token_batches": 5e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 4e-05, + "output_cost_per_token_batches": 2e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o3-mini": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "o3-mini-2025-01-31": { + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "o3-pro": { + "input_cost_per_token": 2e-05, + "input_cost_per_token_batches": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-05, + "output_cost_per_token_batches": 4e-05, + "supported_endpoints": [ + "/v1/responses", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o3-pro-2025-06-10": { + "input_cost_per_token": 2e-05, + "input_cost_per_token_batches": 1e-05, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-05, + "output_cost_per_token_batches": 4e-05, + "supported_endpoints": [ + "/v1/responses", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o4-mini": { + "cache_read_input_token_cost": 2.75e-07, + "cache_read_input_token_cost_flex": 1.375e-07, + "cache_read_input_token_cost_priority": 5e-07, + "input_cost_per_token": 1.1e-06, + "input_cost_per_token_flex": 5.5e-07, + "input_cost_per_token_priority": 2e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "output_cost_per_token_flex": 2.2e-06, + "output_cost_per_token_priority": 8e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "o4-mini-2025-04-16": { + "cache_read_input_token_cost": 2.75e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_service_tier": true, + "supports_vision": true + }, + "o4-mini-deep-research": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "o4-mini-deep-research-2025-06-26": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "openai", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "responses", + "output_cost_per_token": 8e-06, + "output_cost_per_token_batches": 4e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/batch", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "oci/meta.llama-3.1-405b-instruct": { + "input_cost_per_token": 1.068e-05, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.068e-05, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/meta.llama-3.2-90b-vision-instruct": { + "input_cost_per_token": 2e-06, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/meta.llama-3.3-70b-instruct": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 7.2e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/meta.llama-4-maverick-17b-128e-instruct-fp8": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "oci", + "max_input_tokens": 512000, + "max_output_tokens": 4000, + "max_tokens": 512000, + "mode": "chat", + "output_cost_per_token": 7.2e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/meta.llama-4-scout-17b-16e-instruct": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "oci", + "max_input_tokens": 192000, + "max_output_tokens": 4000, + "max_tokens": 192000, + "mode": "chat", + "output_cost_per_token": 7.2e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/xai.grok-3": { + "input_cost_per_token": 3e-06, + "litellm_provider": "oci", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/xai.grok-3-fast": { + "input_cost_per_token": 5e-06, + "litellm_provider": "oci", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/xai.grok-3-mini": { + "input_cost_per_token": 3e-07, + "litellm_provider": "oci", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/xai.grok-3-mini-fast": { + "input_cost_per_token": 6e-07, + "litellm_provider": "oci", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-06, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/xai.grok-4": { + "input_cost_per_token": 3e-06, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://www.oracle.com/artificial-intelligence/generative-ai/generative-ai-service/pricing", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/cohere.command-latest": { + "input_cost_per_token": 1.56e-06, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.56e-06, + "source": "https://www.oracle.com/cloud/ai/generative-ai/pricing/", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/cohere.command-a-03-2025": { + "input_cost_per_token": 1.56e-06, + "litellm_provider": "oci", + "max_input_tokens": 256000, + "max_output_tokens": 4000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.56e-06, + "source": "https://www.oracle.com/cloud/ai/generative-ai/pricing/", + "supports_function_calling": true, + "supports_response_schema": false + }, + "oci/cohere.command-plus-latest": { + "input_cost_per_token": 1.56e-06, + "litellm_provider": "oci", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.56e-06, + "source": "https://www.oracle.com/cloud/ai/generative-ai/pricing/", + "supports_function_calling": true, + "supports_response_schema": false + }, + "ollama/codegeex4": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": false + }, + "ollama/codegemma": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "ollama/codellama": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "ollama/deepseek-coder-v2-base": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/deepseek-coder-v2-instruct": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/deepseek-coder-v2-lite-base": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/deepseek-coder-v2-lite-instruct": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/deepseek-v3.1:671b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/gpt-oss:120b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/gpt-oss:20b-cloud" : { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/internlm2_5-20b-chat": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/llama2": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama2-uncensored": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "ollama/llama2:13b": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama2:70b": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama2:7b": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama3": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama3.1": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/llama3:70b": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/llama3:8b": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "ollama/mistral": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "completion", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/mistral-7B-Instruct-v0.1": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/mistral-7B-Instruct-v0.2": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/mistral-large-instruct-2407": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/mixtral-8x22B-Instruct-v0.1": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/mixtral-8x7B-Instruct-v0.1": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/orca-mini": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "ollama/qwen3-coder:480b-cloud": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_function_calling": true + }, + "ollama/vicuna": { + "input_cost_per_token": 0.0, + "litellm_provider": "ollama", + "max_input_tokens": 2048, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "omni-moderation-2024-09-26": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "omni-moderation-latest": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "omni-moderation-latest-intents": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "openai.gpt-oss-120b-1:0": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "openai.gpt-oss-20b-1:0": { + "input_cost_per_token": 7e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "openai.gpt-oss-safeguard-120b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_system_messages": true + }, + "openai.gpt-oss-safeguard-20b": { + "input_cost_per_token": 7e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_system_messages": true + }, + "openrouter/anthropic/claude-2": { + "input_cost_per_token": 1.102e-05, + "litellm_provider": "openrouter", + "max_output_tokens": 8191, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 3.268e-05, + "supports_tool_choice": true + }, + "openrouter/anthropic/claude-3-5-haiku": { + "input_cost_per_token": 1e-06, + "litellm_provider": "openrouter", + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "openrouter/anthropic/claude-3-5-haiku-20241022": { + "input_cost_per_token": 1e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "tool_use_system_prompt_tokens": 264 + }, + "openrouter/anthropic/claude-3-haiku": { + "input_cost_per_image": 0.0004, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "openrouter", + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/anthropic/claude-3-haiku-20240307": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 264 + }, + "openrouter/anthropic/claude-3-opus": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 395 + }, + "openrouter/anthropic/claude-3-sonnet": { + "input_cost_per_image": 0.0048, + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/anthropic/claude-3.5-sonnet": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-3.5-sonnet:beta": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-3.7-sonnet": { + "input_cost_per_image": 0.0048, + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-3.7-sonnet:beta": { + "input_cost_per_image": 0.0048, + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-instant-v1": { + "input_cost_per_token": 1.63e-06, + "litellm_provider": "openrouter", + "max_output_tokens": 8191, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 5.51e-06, + "supports_tool_choice": true + }, + "openrouter/anthropic/claude-opus-4": { + "input_cost_per_image": 0.0048, + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-opus-4.1": { + "input_cost_per_image": 0.0048, + "cache_creation_input_token_cost": 1.875e-05, + "cache_creation_input_token_cost_above_1hr": 3e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-sonnet-4": { + "input_cost_per_image": 0.0048, + "cache_creation_input_token_cost": 3.75e-06, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost": 3e-07, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-opus-4.5": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-sonnet-4.5": { + "input_cost_per_image": 0.0048, + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "openrouter/anthropic/claude-haiku-4.5": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "openrouter/bytedance/ui-tars-1.5-7b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 131072, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://openrouter.ai/api/v1/models/bytedance/ui-tars-1.5-7b", + "supports_tool_choice": true + }, + "openrouter/cognitivecomputations/dolphin-mixtral-8x7b": { + "input_cost_per_token": 5e-07, + "litellm_provider": "openrouter", + "max_tokens": 32769, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "openrouter/cohere/command-r-plus": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_tool_choice": true + }, + "openrouter/databricks/dbrx-instruct": { + "input_cost_per_token": 6e-07, + "litellm_provider": "openrouter", + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-chat": { + "input_cost_per_token": 1.4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.8e-07, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-chat-v3-0324": { + "input_cost_per_token": 1.4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.8e-07, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-chat-v3.1": { + "input_cost_per_token": 2e-07, + "input_cost_per_token_cache_hit": 2e-08, + "litellm_provider": "openrouter", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-v3.2": { + "input_cost_per_token": 2.8e-07, + "input_cost_per_token_cache_hit": 2.8e-08, + "litellm_provider": "openrouter", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-v3.2-exp": { + "input_cost_per_token": 2e-07, + "input_cost_per_token_cache_hit": 2e-08, + "litellm_provider": "openrouter", + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": false, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-coder": { + "input_cost_per_token": 1.4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 66000, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.8e-07, + "supports_prompt_caching": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-r1": { + "input_cost_per_token": 5.5e-07, + "input_cost_per_token_cache_hit": 1.4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 65336, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/deepseek/deepseek-r1-0528": { + "input_cost_per_token": 5e-07, + "input_cost_per_token_cache_hit": 1.4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 65336, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.15e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/fireworks/firellava-13b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "openrouter/google/gemini-2.0-flash-001": { + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/google/gemini-2.5-flash": { + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 3e-07, + "litellm_provider": "openrouter", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/google/gemini-2.5-pro": { + "input_cost_per_audio_token": 7e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openrouter", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_pdf_size_mb": 30, + "max_tokens": 8192, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_audio_output": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/google/gemini-3-pro-preview": { + "cache_read_input_token_cost": 2e-07, + "cache_read_input_token_cost_above_200k_tokens": 4e-07, + "cache_creation_input_token_cost_above_200k_tokens": 2.5e-07, + "input_cost_per_token": 2e-06, + "input_cost_per_token_above_200k_tokens": 4e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "openrouter", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 1048576, + "max_output_tokens": 65535, + "max_pdf_size_mb": 30, + "max_tokens": 65535, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_above_200k_tokens": 1.8e-05, + "output_cost_per_token_batches": 6e-06, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text" + ], + "supports_audio_input": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_video_input": true, + "supports_vision": true, + "supports_web_search": true + }, + "openrouter/google/gemini-pro-1.5": { + "input_cost_per_image": 0.00265, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 1000000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.5e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/google/gemini-pro-vision": { + "input_cost_per_image": 0.0025, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "openrouter", + "max_tokens": 45875, + "mode": "chat", + "output_cost_per_token": 3.75e-07, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/google/palm-2-chat-bison": { + "input_cost_per_token": 5e-07, + "litellm_provider": "openrouter", + "max_tokens": 25804, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "openrouter/google/palm-2-codechat-bison": { + "input_cost_per_token": 5e-07, + "litellm_provider": "openrouter", + "max_tokens": 20070, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "openrouter/gryphe/mythomax-l2-13b": { + "input_cost_per_token": 1.875e-06, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.875e-06, + "supports_tool_choice": true + }, + "openrouter/jondurbin/airoboros-l2-70b-2.1": { + "input_cost_per_token": 1.3875e-05, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.3875e-05, + "supports_tool_choice": true + }, + "openrouter/mancer/weaver": { + "input_cost_per_token": 5.625e-06, + "litellm_provider": "openrouter", + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 5.625e-06, + "supports_tool_choice": true + }, + "openrouter/meta-llama/codellama-34b-instruct": { + "input_cost_per_token": 5e-07, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-2-13b-chat": { + "input_cost_per_token": 2e-07, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-2-70b-chat": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-3-70b-instruct": { + "input_cost_per_token": 5.9e-07, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.9e-07, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-3-70b-instruct:nitro": { + "input_cost_per_token": 9e-07, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-3-8b-instruct:extended": { + "input_cost_per_token": 2.25e-07, + "litellm_provider": "openrouter", + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2.25e-06, + "supports_tool_choice": true + }, + "openrouter/meta-llama/llama-3-8b-instruct:free": { + "input_cost_per_token": 0.0, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_tool_choice": true + }, + "openrouter/microsoft/wizardlm-2-8x22b:nitro": { + "input_cost_per_token": 1e-06, + "litellm_provider": "openrouter", + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1e-06, + "supports_tool_choice": true + }, + "openrouter/minimax/minimax-m2": { + "input_cost_per_token": 2.55e-7, + "litellm_provider": "openrouter", + "max_input_tokens": 204800, + "max_output_tokens": 204800, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.02e-6, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/mistralai/devstral-2512:free": { + "input_cost_per_image": 0, + "input_cost_per_token": 0, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": null, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/mistralai/devstral-2512": { + "input_cost_per_image": 0, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": 65536, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/mistralai/ministral-3b-2512": { + "input_cost_per_image": 0, + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 131072, + "max_output_tokens": null, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/mistralai/ministral-8b-2512": { + "input_cost_per_image": 0, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": null, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/mistralai/ministral-14b-2512": { + "input_cost_per_image": 0, + "input_cost_per_token": 2e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": null, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/mistralai/mistral-large-2512": { + "input_cost_per_image": 0, + "input_cost_per_token": 5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 262144, + "max_output_tokens": null, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/mistralai/mistral-7b-instruct": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.3e-07, + "supports_tool_choice": true + }, + "openrouter/mistralai/mistral-7b-instruct:free": { + "input_cost_per_token": 0.0, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "supports_tool_choice": true + }, + "openrouter/mistralai/mistral-large": { + "input_cost_per_token": 8e-06, + "litellm_provider": "openrouter", + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.4e-05, + "supports_tool_choice": true + }, + "openrouter/mistralai/mistral-small-3.1-24b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_tool_choice": true + }, + "openrouter/mistralai/mistral-small-3.2-24b-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 3e-07, + "supports_tool_choice": true + }, + "openrouter/mistralai/mixtral-8x22b-instruct": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "openrouter", + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 6.5e-07, + "supports_tool_choice": true + }, + "openrouter/nousresearch/nous-hermes-llama2-13b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2e-07, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-3.5-turbo": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "openrouter", + "max_tokens": 4095, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-3.5-turbo-16k": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_tokens": 16383, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-4": { + "input_cost_per_token": 3e-05, + "litellm_provider": "openrouter", + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-4-vision-preview": { + "input_cost_per_image": 0.01445, + "input_cost_per_token": 1e-05, + "litellm_provider": "openrouter", + "max_tokens": 130000, + "mode": "chat", + "output_cost_per_token": 3e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1-2025-04-14": { + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1-mini": { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1-mini-2025-04-14": { + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.6e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1-nano": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4.1-nano-2025-04-14": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4o": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-4o-2024-05-13": { + "input_cost_per_token": 5e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-5-chat": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-5-codex": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-5": { + "cache_read_input_token_cost": 1.25e-07, + "input_cost_per_token": 1.25e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-5-mini": { + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-5-nano": { + "cache_read_input_token_cost": 5e-09, + "input_cost_per_token": 5e-08, + "litellm_provider": "openrouter", + "max_input_tokens": 272000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text" + ], + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-5.2": { + "input_cost_per_image": 0, + "cache_read_input_token_cost": 1.75e-07, + "input_cost_per_token": 1.75e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-5.2-chat": { + "input_cost_per_image": 0, + "cache_read_input_token_cost": 1.75e-07, + "input_cost_per_token": 1.75e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-05, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-5.2-pro": { + "input_cost_per_image": 0, + "input_cost_per_token": 2.1e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 400000, + "max_output_tokens": 128000, + "max_tokens": 400000, + "mode": "chat", + "output_cost_per_token": 1.68e-04, + "supports_function_calling": true, + "supports_prompt_caching": false, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/gpt-oss-120b": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-07, + "source": "https://openrouter.ai/openai/gpt-oss-120b", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "openrouter/openai/gpt-oss-20b": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 8e-07, + "source": "https://openrouter.ai/openai/gpt-oss-20b", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "openrouter/openai/o1": { + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 100000, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/openai/o1-mini": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/openai/o1-mini-2024-09-12": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/openai/o1-preview": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/openai/o1-preview-2024-09-12": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/openai/o3-mini": { + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/openai/o3-mini-high": { + "input_cost_per_token": 1.1e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 128000, + "max_output_tokens": 65536, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 4.4e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "openrouter/pygmalionai/mythalion-13b": { + "input_cost_per_token": 1.875e-06, + "litellm_provider": "openrouter", + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.875e-06, + "supports_tool_choice": true + }, + "openrouter/qwen/qwen-2.5-coder-32b-instruct": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 33792, + "max_output_tokens": 33792, + "max_tokens": 33792, + "mode": "chat", + "output_cost_per_token": 1.8e-07, + "supports_tool_choice": true + }, + "openrouter/qwen/qwen-vl-plus": { + "input_cost_per_token": 2.1e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 8192, + "max_output_tokens": 2048, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 6.3e-07, + "supports_tool_choice": true, + "supports_vision": true + }, + "openrouter/qwen/qwen3-coder": { + "input_cost_per_token": 2.2e-7, + "litellm_provider": "openrouter", + "max_input_tokens": 262100, + "max_output_tokens": 262100, + "max_tokens": 262100, + "mode": "chat", + "output_cost_per_token": 9.5e-7, + "source": "https://openrouter.ai/qwen/qwen3-coder", + "supports_tool_choice": true, + "supports_function_calling": true + }, + "openrouter/switchpoint/router": { + "input_cost_per_token": 8.5e-07, + "litellm_provider": "openrouter", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3.4e-06, + "source": "https://openrouter.ai/switchpoint/router", + "supports_tool_choice": true + }, + "openrouter/undi95/remm-slerp-l2-13b": { + "input_cost_per_token": 1.875e-06, + "litellm_provider": "openrouter", + "max_tokens": 6144, + "mode": "chat", + "output_cost_per_token": 1.875e-06, + "supports_tool_choice": true + }, + "openrouter/x-ai/grok-4": { + "input_cost_per_token": 3e-06, + "litellm_provider": "openrouter", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://openrouter.ai/x-ai/grok-4", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "openrouter/x-ai/grok-4-fast:free": { + "input_cost_per_token": 0, + "litellm_provider": "openrouter", + "max_input_tokens": 2000000, + "max_output_tokens": 30000, + "max_tokens": 2000000, + "mode": "chat", + "output_cost_per_token": 0, + "source": "https://openrouter.ai/x-ai/grok-4-fast:free", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true, + "supports_web_search": false + }, + "openrouter/z-ai/glm-4.6": { + "input_cost_per_token": 4.0e-7, + "litellm_provider": "openrouter", + "max_input_tokens": 202800, + "max_output_tokens": 131000, + "max_tokens": 202800, + "mode": "chat", + "output_cost_per_token": 1.75e-6, + "source": "https://openrouter.ai/z-ai/glm-4.6", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "openrouter/z-ai/glm-4.6:exacto": { + "input_cost_per_token": 4.5e-7, + "litellm_provider": "openrouter", + "max_input_tokens": 202800, + "max_output_tokens": 131000, + "max_tokens": 202800, + "mode": "chat", + "output_cost_per_token": 1.9e-6, + "source": "https://openrouter.ai/z-ai/glm-4.6:exacto", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "ovhcloud/DeepSeek-R1-Distill-Llama-70B": { + "input_cost_per_token": 6.7e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 6.7e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/deepseek-r1-distill-llama-70b", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/Llama-3.1-8B-Instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 1e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/llama-3-1-8b-instruct", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/Meta-Llama-3_1-70B-Instruct": { + "input_cost_per_token": 6.7e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 6.7e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/meta-llama-3-1-70b-instruct", + "supports_function_calling": false, + "supports_response_schema": false, + "supports_tool_choice": false + }, + "ovhcloud/Meta-Llama-3_3-70B-Instruct": { + "input_cost_per_token": 6.7e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 6.7e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/meta-llama-3-3-70b-instruct", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/Mistral-7B-Instruct-v0.3": { + "input_cost_per_token": 1e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 127000, + "max_output_tokens": 127000, + "max_tokens": 127000, + "mode": "chat", + "output_cost_per_token": 1e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/mistral-7b-instruct-v0-3", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/Mistral-Nemo-Instruct-2407": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 118000, + "max_output_tokens": 118000, + "max_tokens": 118000, + "mode": "chat", + "output_cost_per_token": 1.3e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/mistral-nemo-instruct-2407", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/Mistral-Small-3.2-24B-Instruct-2506": { + "input_cost_per_token": 9e-08, + "litellm_provider": "ovhcloud", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.8e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/mistral-small-3-2-24b-instruct-2506", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "ovhcloud/Mixtral-8x7B-Instruct-v0.1": { + "input_cost_per_token": 6.3e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 6.3e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/mixtral-8x7b-instruct-v0-1", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "ovhcloud/Qwen2.5-Coder-32B-Instruct": { + "input_cost_per_token": 8.7e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 8.7e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/qwen2-5-coder-32b-instruct", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "ovhcloud/Qwen2.5-VL-72B-Instruct": { + "input_cost_per_token": 9.1e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 9.1e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/qwen2-5-vl-72b-instruct", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "ovhcloud/Qwen3-32B": { + "input_cost_per_token": 8e-08, + "litellm_provider": "ovhcloud", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.3e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/qwen3-32b", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "ovhcloud/gpt-oss-120b": { + "input_cost_per_token": 8e-08, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/gpt-oss-120b", + "supports_function_calling": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "ovhcloud/gpt-oss-20b": { + "input_cost_per_token": 4e-08, + "litellm_provider": "ovhcloud", + "max_input_tokens": 131000, + "max_output_tokens": 131000, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/gpt-oss-20b", + "supports_function_calling": false, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "ovhcloud/llava-v1.6-mistral-7b-hf": { + "input_cost_per_token": 2.9e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 2.9e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/llava-next-mistral-7b", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "ovhcloud/mamba-codestral-7B-v0.1": { + "input_cost_per_token": 1.9e-07, + "litellm_provider": "ovhcloud", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.9e-07, + "source": "https://endpoints.ai.cloud.ovh.net/models/mamba-codestral-7b-v0-1", + "supports_function_calling": false, + "supports_response_schema": true, + "supports_tool_choice": false + }, + "palm/chat-bison": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "palm/chat-bison-001": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "palm/text-bison": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "palm/text-bison-001": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "palm/text-bison-safety-off": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "palm/text-bison-safety-recitation-off": { + "input_cost_per_token": 1.25e-07, + "litellm_provider": "palm", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "parallel_ai/search": { + "input_cost_per_query": 0.004, + "litellm_provider": "parallel_ai", + "mode": "search" + }, + "parallel_ai/search-pro": { + "input_cost_per_query": 0.009, + "litellm_provider": "parallel_ai", + "mode": "search" + }, + "perplexity/codellama-34b-instruct": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.4e-06 + }, + "perplexity/codellama-70b-instruct": { + "input_cost_per_token": 7e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2.8e-06 + }, + "perplexity/llama-2-70b-chat": { + "input_cost_per_token": 7e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-06 + }, + "perplexity/llama-3.1-70b-instruct": { + "input_cost_per_token": 1e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "perplexity/llama-3.1-8b-instruct": { + "input_cost_per_token": 2e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "perplexity/llama-3.1-sonar-huge-128k-online": { + "deprecation_date": "2025-02-22", + "input_cost_per_token": 5e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 127072, + "max_output_tokens": 127072, + "max_tokens": 127072, + "mode": "chat", + "output_cost_per_token": 5e-06 + }, + "perplexity/llama-3.1-sonar-large-128k-chat": { + "deprecation_date": "2025-02-22", + "input_cost_per_token": 1e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "perplexity/llama-3.1-sonar-large-128k-online": { + "deprecation_date": "2025-02-22", + "input_cost_per_token": 1e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 127072, + "max_output_tokens": 127072, + "max_tokens": 127072, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "perplexity/llama-3.1-sonar-small-128k-chat": { + "deprecation_date": "2025-02-22", + "input_cost_per_token": 2e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "perplexity/llama-3.1-sonar-small-128k-online": { + "deprecation_date": "2025-02-22", + "input_cost_per_token": 2e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 127072, + "max_output_tokens": 127072, + "max_tokens": 127072, + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "perplexity/mistral-7b-instruct": { + "input_cost_per_token": 7e-08, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "perplexity/mixtral-8x7b-instruct": { + "input_cost_per_token": 7e-08, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "perplexity/pplx-70b-chat": { + "input_cost_per_token": 7e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-06 + }, + "perplexity/pplx-70b-online": { + "input_cost_per_request": 0.005, + "input_cost_per_token": 0.0, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-06 + }, + "perplexity/pplx-7b-chat": { + "input_cost_per_token": 7e-08, + "litellm_provider": "perplexity", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "perplexity/pplx-7b-online": { + "input_cost_per_request": 0.005, + "input_cost_per_token": 0.0, + "litellm_provider": "perplexity", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "perplexity/sonar": { + "input_cost_per_token": 1e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.012, + "search_context_size_low": 0.005, + "search_context_size_medium": 0.008 + }, + "supports_web_search": true + }, + "perplexity/sonar-deep-research": { + "citation_cost_per_token": 2e-06, + "input_cost_per_token": 2e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_reasoning_token": 3e-06, + "output_cost_per_token": 8e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.005, + "search_context_size_low": 0.005, + "search_context_size_medium": 0.005 + }, + "supports_reasoning": true, + "supports_web_search": true + }, + "perplexity/sonar-medium-chat": { + "input_cost_per_token": 6e-07, + "litellm_provider": "perplexity", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1.8e-06 + }, + "perplexity/sonar-medium-online": { + "input_cost_per_request": 0.005, + "input_cost_per_token": 0, + "litellm_provider": "perplexity", + "max_input_tokens": 12000, + "max_output_tokens": 12000, + "max_tokens": 12000, + "mode": "chat", + "output_cost_per_token": 1.8e-06 + }, + "perplexity/sonar-pro": { + "input_cost_per_token": 3e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 200000, + "max_output_tokens": 8000, + "max_tokens": 8000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.014, + "search_context_size_low": 0.006, + "search_context_size_medium": 0.01 + }, + "supports_web_search": true + }, + "perplexity/sonar-reasoning": { + "input_cost_per_token": 1e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 5e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.014, + "search_context_size_low": 0.005, + "search_context_size_medium": 0.008 + }, + "supports_reasoning": true, + "supports_web_search": true + }, + "perplexity/sonar-reasoning-pro": { + "input_cost_per_token": 2e-06, + "litellm_provider": "perplexity", + "max_input_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "search_context_cost_per_query": { + "search_context_size_high": 0.014, + "search_context_size_low": 0.006, + "search_context_size_medium": 0.01 + }, + "supports_reasoning": true, + "supports_web_search": true + }, + "perplexity/sonar-small-chat": { + "input_cost_per_token": 7e-08, + "litellm_provider": "perplexity", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "perplexity/sonar-small-online": { + "input_cost_per_request": 0.005, + "input_cost_per_token": 0, + "litellm_provider": "perplexity", + "max_input_tokens": 12000, + "max_output_tokens": 12000, + "max_tokens": 12000, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "publicai/swiss-ai/apertus-8b-instruct": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/swiss-ai/apertus-70b-instruct": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/aisingapore/Gemma-SEA-LION-v4-27B-IT": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/BSC-LT/salamandra-7b-instruct-tools-16k": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 16384, + "max_output_tokens": 4096, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/BSC-LT/ALIA-40b-instruct_Q8_0": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/allenai/Olmo-3-7B-Instruct": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/aisingapore/Qwen-SEA-LION-v4-32B-IT": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "publicai/allenai/Olmo-3-7B-Think": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true + }, + "publicai/allenai/Olmo-3-32B-Think": { + "input_cost_per_token": 0.0, + "litellm_provider": "publicai", + "max_input_tokens": 32768, + "max_output_tokens": 4096, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://platform.publicai.co/docs", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true + }, + "qwen.qwen3-coder-480b-a35b-v1:0": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 262000, + "max_output_tokens": 65536, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.8e-06, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "qwen.qwen3-235b-a22b-2507-v1:0": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 262144, + "max_output_tokens": 131072, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 8.8e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "qwen.qwen3-coder-30b-a3b-v1:0": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 262144, + "max_output_tokens": 131072, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 6.0e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "qwen.qwen3-32b-v1:0": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6.0e-07, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "qwen.qwen3-next-80b-a3b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "supports_function_calling": true, + "supports_system_messages": true + }, + "qwen.qwen3-vl-235b-a22b": { + "input_cost_per_token": 5.3e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.66e-06, + "supports_function_calling": true, + "supports_system_messages": true, + "supports_vision": true + }, + "recraft/recraftv2": { + "litellm_provider": "recraft", + "mode": "image_generation", + "output_cost_per_image": 0.022, + "source": "https://www.recraft.ai/docs#pricing", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "recraft/recraftv3": { + "litellm_provider": "recraft", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://www.recraft.ai/docs#pricing", + "supported_endpoints": [ + "/v1/images/generations" + ] + }, + "replicate/meta/llama-2-13b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "replicate/meta/llama-2-13b-chat": { + "input_cost_per_token": 1e-07, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5e-07, + "supports_tool_choice": true + }, + "replicate/meta/llama-2-70b": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_tool_choice": true + }, + "replicate/meta/llama-2-70b-chat": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_tool_choice": true + }, + "replicate/meta/llama-2-7b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/meta/llama-2-7b-chat": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/meta/llama-3-70b": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "replicate", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_tool_choice": true + }, + "replicate/meta/llama-3-70b-instruct": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "replicate", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2.75e-06, + "supports_tool_choice": true + }, + "replicate/meta/llama-3-8b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 8086, + "max_output_tokens": 8086, + "max_tokens": 8086, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/meta/llama-3-8b-instruct": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 8086, + "max_output_tokens": 8086, + "max_tokens": 8086, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/mistralai/mistral-7b-instruct-v0.2": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/mistralai/mistral-7b-v0.1": { + "input_cost_per_token": 5e-08, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 2.5e-07, + "supports_tool_choice": true + }, + "replicate/mistralai/mixtral-8x7b-instruct-v0.1": { + "input_cost_per_token": 3e-07, + "litellm_provider": "replicate", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1e-06, + "supports_tool_choice": true + }, + "rerank-english-v2.0": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "rerank-english-v3.0": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "rerank-multilingual-v2.0": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "rerank-multilingual-v3.0": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "rerank-v3.5": { + "input_cost_per_query": 0.002, + "input_cost_per_token": 0.0, + "litellm_provider": "cohere", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_query_tokens": 2048, + "max_tokens": 4096, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "nvidia_nim/nvidia/nv-rerankqa-mistral-4b-v3": { + "input_cost_per_query": 0.0, + "input_cost_per_token": 0.0, + "litellm_provider": "nvidia_nim", + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "nvidia_nim/nvidia/llama-3_2-nv-rerankqa-1b-v2": { + "input_cost_per_query": 0.0, + "input_cost_per_token": 0.0, + "litellm_provider": "nvidia_nim", + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "nvidia_nim/ranking/nvidia/llama-3.2-nv-rerankqa-1b-v2": { + "input_cost_per_query": 0.0, + "input_cost_per_token": 0.0, + "litellm_provider": "nvidia_nim", + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-13b": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-13b-f": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-70b": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-70b-b-f": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-7b": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "completion", + "output_cost_per_token": 0.0 + }, + "sagemaker/meta-textgeneration-llama-2-7b-f": { + "input_cost_per_token": 0.0, + "litellm_provider": "sagemaker", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "sambanova/DeepSeek-R1": { + "input_cost_per_token": 5e-06, + "litellm_provider": "sambanova", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 7e-06, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/DeepSeek-R1-Distill-Llama-70B": { + "input_cost_per_token": 7e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.4e-06, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/DeepSeek-V3-0324": { + "input_cost_per_token": 3e-06, + "litellm_provider": "sambanova", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4.5e-06, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "sambanova/Llama-4-Maverick-17B-128E-Instruct": { + "input_cost_per_token": 6.3e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "metadata": { + "notes": "For vision models, images are converted to 6432 input tokens and are billed at that amount" + }, + "mode": "chat", + "output_cost_per_token": 1.8e-06, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "sambanova/Llama-4-Scout-17B-16E-Instruct": { + "input_cost_per_token": 4e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "metadata": { + "notes": "For vision models, images are converted to 6432 input tokens and are billed at that amount" + }, + "mode": "chat", + "output_cost_per_token": 7e-07, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "sambanova/Meta-Llama-3.1-405B-Instruct": { + "input_cost_per_token": 5e-06, + "litellm_provider": "sambanova", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-05, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "sambanova/Meta-Llama-3.1-8B-Instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "sambanova/Meta-Llama-3.2-1B-Instruct": { + "input_cost_per_token": 4e-08, + "litellm_provider": "sambanova", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 8e-08, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/Meta-Llama-3.2-3B-Instruct": { + "input_cost_per_token": 8e-08, + "litellm_provider": "sambanova", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.6e-07, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/Meta-Llama-3.3-70B-Instruct": { + "input_cost_per_token": 6e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "sambanova/Meta-Llama-Guard-3-8B": { + "input_cost_per_token": 3e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/QwQ-32B": { + "input_cost_per_token": 5e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-06, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/Qwen2-Audio-7B-Instruct": { + "input_cost_per_token": 5e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 0.0001, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_audio_input": true + }, + "sambanova/Qwen3-32B": { + "input_cost_per_token": 4e-07, + "litellm_provider": "sambanova", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8e-07, + "source": "https://cloud.sambanova.ai/plans/pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "sambanova/DeepSeek-V3.1": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 3e-06, + "output_cost_per_token": 4.5e-06, + "litellm_provider": "sambanova", + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + "sambanova/gpt-oss-120b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 3e-06, + "output_cost_per_token": 4.5e-06, + "litellm_provider": "sambanova", + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_reasoning": true, + "source": "https://cloud.sambanova.ai/plans/pricing" + }, + + "snowflake/claude-3-5-sonnet": { + "litellm_provider": "snowflake", + "max_input_tokens": 18000, + "max_output_tokens": 8192, + "max_tokens": 18000, + "mode": "chat", + "supports_computer_use": true + }, + "snowflake/deepseek-r1": { + "litellm_provider": "snowflake", + "max_input_tokens": 32768, + "max_output_tokens": 8192, + "max_tokens": 32768, + "mode": "chat", + "supports_reasoning": true + }, + "snowflake/gemma-7b": { + "litellm_provider": "snowflake", + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "max_tokens": 8000, + "mode": "chat" + }, + "snowflake/jamba-1.5-large": { + "litellm_provider": "snowflake", + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "max_tokens": 256000, + "mode": "chat" + }, + "snowflake/jamba-1.5-mini": { + "litellm_provider": "snowflake", + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "max_tokens": 256000, + "mode": "chat" + }, + "snowflake/jamba-instruct": { + "litellm_provider": "snowflake", + "max_input_tokens": 256000, + "max_output_tokens": 8192, + "max_tokens": 256000, + "mode": "chat" + }, + "snowflake/llama2-70b-chat": { + "litellm_provider": "snowflake", + "max_input_tokens": 4096, + "max_output_tokens": 8192, + "max_tokens": 4096, + "mode": "chat" + }, + "snowflake/llama3-70b": { + "litellm_provider": "snowflake", + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "max_tokens": 8000, + "mode": "chat" + }, + "snowflake/llama3-8b": { + "litellm_provider": "snowflake", + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "max_tokens": 8000, + "mode": "chat" + }, + "snowflake/llama3.1-405b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/llama3.1-70b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/llama3.1-8b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/llama3.2-1b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/llama3.2-3b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/llama3.3-70b": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/mistral-7b": { + "litellm_provider": "snowflake", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 32000, + "mode": "chat" + }, + "snowflake/mistral-large": { + "litellm_provider": "snowflake", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 32000, + "mode": "chat" + }, + "snowflake/mistral-large2": { + "litellm_provider": "snowflake", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat" + }, + "snowflake/mixtral-8x7b": { + "litellm_provider": "snowflake", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 32000, + "mode": "chat" + }, + "snowflake/reka-core": { + "litellm_provider": "snowflake", + "max_input_tokens": 32000, + "max_output_tokens": 8192, + "max_tokens": 32000, + "mode": "chat" + }, + "snowflake/reka-flash": { + "litellm_provider": "snowflake", + "max_input_tokens": 100000, + "max_output_tokens": 8192, + "max_tokens": 100000, + "mode": "chat" + }, + "snowflake/snowflake-arctic": { + "litellm_provider": "snowflake", + "max_input_tokens": 4096, + "max_output_tokens": 8192, + "max_tokens": 4096, + "mode": "chat" + }, + "snowflake/snowflake-llama-3.1-405b": { + "litellm_provider": "snowflake", + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "max_tokens": 8000, + "mode": "chat" + }, + "snowflake/snowflake-llama-3.3-70b": { + "litellm_provider": "snowflake", + "max_input_tokens": 8000, + "max_output_tokens": 8192, + "max_tokens": 8000, + "mode": "chat" + }, + "stability/sd3": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.065, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3-large": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.065, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3-large-turbo": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3-medium": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.035, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3.5-large": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.065, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3.5-large-turbo": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/sd3.5-medium": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.035, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/stable-image-ultra": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.08, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability/stable-image-core": { + "litellm_provider": "stability", + "mode": "image_generation", + "output_cost_per_image": 0.03, + "supported_endpoints": ["/v1/images/generations"] + }, + "stability.sd3-5-large-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.08 + }, + "stability.sd3-large-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.08 + }, + "stability.stable-image-core-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.04 + }, + "stability.stable-image-core-v1:1": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.04 + }, + "stability.stable-image-ultra-v1:0": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.14 + }, + "stability.stable-image-ultra-v1:1": { + "litellm_provider": "bedrock", + "max_input_tokens": 77, + "max_tokens": 77, + "mode": "image_generation", + "output_cost_per_image": 0.14 + }, + "standard/1024-x-1024/dall-e-3": { + "input_cost_per_pixel": 3.81469e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "standard/1024-x-1792/dall-e-3": { + "input_cost_per_pixel": 4.359e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "standard/1792-x-1024/dall-e-3": { + "input_cost_per_pixel": 4.359e-08, + "litellm_provider": "openai", + "mode": "image_generation", + "output_cost_per_pixel": 0.0 + }, + "tavily/search": { + "input_cost_per_query": 0.008, + "litellm_provider": "tavily", + "mode": "search" + }, + "tavily/search-advanced": { + "input_cost_per_query": 0.016, + "litellm_provider": "tavily", + "mode": "search" + }, + "text-bison": { + "input_cost_per_character": 2.5e-07, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 2048, + "max_tokens": 2048, + "mode": "completion", + "output_cost_per_character": 5e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-bison32k": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-bison32k@002": { + "input_cost_per_character": 2.5e-07, + "input_cost_per_token": 1.25e-07, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "output_cost_per_token": 1.25e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-bison@001": { + "input_cost_per_character": 2.5e-07, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-bison@002": { + "input_cost_per_character": 2.5e-07, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_character": 5e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-completion-codestral/codestral-2405": { + "input_cost_per_token": 0.0, + "litellm_provider": "text-completion-codestral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "completion", + "output_cost_per_token": 0.0, + "source": "https://docs.mistral.ai/capabilities/code_generation/" + }, + "text-completion-codestral/codestral-latest": { + "input_cost_per_token": 0.0, + "litellm_provider": "text-completion-codestral", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "completion", + "output_cost_per_token": 0.0, + "source": "https://docs.mistral.ai/capabilities/code_generation/" + }, + "text-embedding-004": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models" + }, + "text-embedding-005": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models" + }, + "text-embedding-3-large": { + "input_cost_per_token": 1.3e-07, + "input_cost_per_token_batches": 6.5e-08, + "litellm_provider": "openai", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_cost_per_token_batches": 0.0, + "output_vector_size": 3072 + }, + "text-embedding-3-small": { + "input_cost_per_token": 2e-08, + "input_cost_per_token_batches": 1e-08, + "litellm_provider": "openai", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_cost_per_token_batches": 0.0, + "output_vector_size": 1536 + }, + "text-embedding-ada-002": { + "input_cost_per_token": 1e-07, + "litellm_provider": "openai", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 1536 + }, + "text-embedding-ada-002-v2": { + "input_cost_per_token": 1e-07, + "input_cost_per_token_batches": 5e-08, + "litellm_provider": "openai", + "max_input_tokens": 8191, + "max_tokens": 8191, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_cost_per_token_batches": 0.0 + }, + "text-embedding-large-exp-03-07": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 8192, + "max_tokens": 8192, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 3072, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models" + }, + "text-embedding-preview-0409": { + "input_cost_per_token": 6.25e-09, + "input_cost_per_token_batch_requests": 5e-09, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "text-moderation-007": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "text-moderation-latest": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "text-moderation-stable": { + "input_cost_per_token": 0.0, + "litellm_provider": "openai", + "max_input_tokens": 32768, + "max_output_tokens": 0, + "max_tokens": 32768, + "mode": "moderation", + "output_cost_per_token": 0.0 + }, + "text-multilingual-embedding-002": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 2048, + "max_tokens": 2048, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models" + }, + "text-multilingual-embedding-preview-0409": { + "input_cost_per_token": 6.25e-09, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-unicorn": { + "input_cost_per_token": 1e-05, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 2.8e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "text-unicorn@001": { + "input_cost_per_token": 1e-05, + "litellm_provider": "vertex_ai-text-models", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 1024, + "mode": "completion", + "output_cost_per_token": 2.8e-05, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "textembedding-gecko": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "textembedding-gecko-multilingual": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "textembedding-gecko-multilingual@001": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "textembedding-gecko@001": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "textembedding-gecko@003": { + "input_cost_per_character": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vertex_ai-embedding-models", + "max_input_tokens": 3072, + "max_tokens": 3072, + "mode": "embedding", + "output_cost_per_token": 0, + "output_vector_size": 768, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models" + }, + "together-ai-21.1b-41b": { + "input_cost_per_token": 8e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 8e-07 + }, + "together-ai-4.1b-8b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "together-ai-41.1b-80b": { + "input_cost_per_token": 9e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 9e-07 + }, + "together-ai-8.1b-21b": { + "input_cost_per_token": 3e-07, + "litellm_provider": "together_ai", + "max_tokens": 1000, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "together-ai-81.1b-110b": { + "input_cost_per_token": 1.8e-06, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 1.8e-06 + }, + "together-ai-embedding-151m-to-350m": { + "input_cost_per_token": 1.6e-08, + "litellm_provider": "together_ai", + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "together-ai-embedding-up-to-150m": { + "input_cost_per_token": 8e-09, + "litellm_provider": "together_ai", + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "together_ai/baai/bge-base-en-v1.5": { + "input_cost_per_token": 8e-09, + "litellm_provider": "together_ai", + "max_input_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 768 + }, + "together_ai/BAAI/bge-base-en-v1.5": { + "input_cost_per_token": 8e-09, + "litellm_provider": "together_ai", + "max_input_tokens": 512, + "mode": "embedding", + "output_cost_per_token": 0.0, + "output_vector_size": 768 + }, + "together-ai-up-to-4b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 1e-07 + }, + "together_ai/Qwen/Qwen2.5-72B-Instruct-Turbo": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen2.5-7B-Instruct-Turbo": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen3-235B-A22B-Instruct-2507-tput": { + "input_cost_per_token": 2e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 262000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "source": "https://www.together.ai/models/qwen3-235b-a22b-instruct-2507-fp8", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen3-235B-A22B-Thinking-2507": { + "input_cost_per_token": 6.5e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://www.together.ai/models/qwen3-235b-a22b-thinking-2507", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen3-235B-A22B-fp8-tput": { + "input_cost_per_token": 2e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 40000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://www.together.ai/models/qwen3-235b-a22b-fp8-tput", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_tool_choice": false + }, + "together_ai/Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8": { + "input_cost_per_token": 2e-06, + "litellm_provider": "together_ai", + "max_input_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "source": "https://www.together.ai/models/qwen3-coder-480b-a35b-instruct", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/deepseek-ai/DeepSeek-R1": { + "input_cost_per_token": 3e-06, + "litellm_provider": "together_ai", + "max_input_tokens": 128000, + "max_output_tokens": 20480, + "max_tokens": 20480, + "mode": "chat", + "output_cost_per_token": 7e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/deepseek-ai/DeepSeek-R1-0528-tput": { + "input_cost_per_token": 5.5e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.19e-06, + "source": "https://www.together.ai/models/deepseek-r1-0528-throughput", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/deepseek-ai/DeepSeek-V3": { + "input_cost_per_token": 1.25e-06, + "litellm_provider": "together_ai", + "max_input_tokens": 65536, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/deepseek-ai/DeepSeek-V3.1": { + "input_cost_per_token": 6e-07, + "litellm_provider": "together_ai", + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.7e-06, + "source": "https://www.together.ai/models/deepseek-v3-1", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Llama-3.2-3B-Instruct-Turbo": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo": { + "input_cost_per_token": 8.8e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 8.8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Llama-3.3-70B-Instruct-Turbo-Free": { + "input_cost_per_token": 0, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 0, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8": { + "input_cost_per_token": 2.7e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 8.5e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Llama-4-Scout-17B-16E-Instruct": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 5.9e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo": { + "input_cost_per_token": 3.5e-06, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 3.5e-06, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": { + "input_cost_per_token": 8.8e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 8.8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 1.8e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/mistralai/Mistral-7B-Instruct-v0.1": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/mistralai/Mistral-Small-24B-Instruct-2501": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/mistralai/Mixtral-8x7B-Instruct-v0.1": { + "input_cost_per_token": 6e-07, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "together_ai/moonshotai/Kimi-K2-Instruct": { + "input_cost_per_token": 1e-06, + "litellm_provider": "together_ai", + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://www.together.ai/models/kimi-k2-instruct", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/openai/gpt-oss-120b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://www.together.ai/models/gpt-oss-120b", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/openai/gpt-oss-20b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "together_ai", + "max_input_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-07, + "source": "https://www.together.ai/models/gpt-oss-20b", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/togethercomputer/CodeLlama-34b-Instruct": { + "litellm_provider": "together_ai", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/zai-org/GLM-4.5-Air-FP8": { + "input_cost_per_token": 2e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-06, + "source": "https://www.together.ai/models/glm-4-5-air", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/zai-org/GLM-4.6": { + "input_cost_per_token": 0.6e-06, + "litellm_provider": "together_ai", + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 2.2e-06, + "source": "https://www.together.ai/models/glm-4-6", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "together_ai/moonshotai/Kimi-K2-Instruct-0905": { + "input_cost_per_token": 1e-06, + "litellm_provider": "together_ai", + "max_input_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 3e-06, + "source": "https://www.together.ai/models/kimi-k2-0905", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen3-Next-80B-A3B-Instruct": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://www.together.ai/models/qwen3-next-80b-a3b-instruct", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "together_ai/Qwen/Qwen3-Next-80B-A3B-Thinking": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "together_ai", + "max_input_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://www.together.ai/models/qwen3-next-80b-a3b-thinking", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "tts-1": { + "input_cost_per_character": 1.5e-05, + "litellm_provider": "openai", + "mode": "audio_speech", + "supported_endpoints": [ + "/v1/audio/speech" + ] + }, + "tts-1-hd": { + "input_cost_per_character": 3e-05, + "litellm_provider": "openai", + "mode": "audio_speech", + "supported_endpoints": [ + "/v1/audio/speech" + ] + }, + "us.amazon.nova-lite-v1:0": { + "input_cost_per_token": 6e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 2.4e-07, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "us.amazon.nova-micro-v1:0": { + "input_cost_per_token": 3.5e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.4e-07, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_response_schema": true + }, + "us.amazon.nova-premier-v1:0": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 1.25e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": false, + "supports_response_schema": true, + "supports_vision": true + }, + "us.amazon.nova-pro-v1:0": { + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 300000, + "max_output_tokens": 10000, + "max_tokens": 10000, + "mode": "chat", + "output_cost_per_token": 3.2e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_vision": true + }, + "us.anthropic.claude-3-5-haiku-20241022-v1:0": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 8e-08, + "input_cost_per_token": 8e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 4e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "us.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.375e-06, + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5.5e-06, + "source": "https://aws.amazon.com/about-aws/whats-new/2025/10/claude-4-5-haiku-anthropic-amazon-bedrock", + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "us.anthropic.claude-3-5-sonnet-20240620-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-3-5-sonnet-20241022-v2:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-3-7-sonnet-20250219-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-3-haiku-20240307-v1:0": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-3-opus-20240229-v1:0": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-3-sonnet-20240229-v1:0": { + "input_cost_per_token": 3e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "us.anthropic.claude-opus-4-1-20250805-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "us.anthropic.claude-sonnet-4-5-20250929-v1:0": { + "cache_creation_input_token_cost": 4.125e-06, + "cache_read_input_token_cost": 3.3e-07, + "input_cost_per_token": 3.3e-06, + "input_cost_per_token_above_200k_tokens": 6.6e-06, + "output_cost_per_token_above_200k_tokens": 2.475e-05, + "cache_creation_input_token_cost_above_200k_tokens": 8.25e-06, + "cache_read_input_token_cost_above_200k_tokens": 6.6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.65e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "au.anthropic.claude-haiku-4-5-20251001-v1:0": { + "cache_creation_input_token_cost": 1.375e-06, + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 5.5e-06, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 346 + }, + "us.anthropic.claude-opus-4-20250514-v1:0": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "us.anthropic.claude-opus-4-5-20251101-v1:0": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "global.anthropic.claude-opus-4-5-20251101-v1:0": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "eu.anthropic.claude-opus-4-5-20251101-v1:0": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "us.anthropic.claude-sonnet-4-20250514-v1:0": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "us.deepseek.r1-v1:0": { + "input_cost_per_token": 1.35e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 5.4e-06, + "supports_function_calling": false, + "supports_reasoning": true, + "supports_tool_choice": false + }, + "us.meta.llama3-1-405b-instruct-v1:0": { + "input_cost_per_token": 5.32e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.6e-05, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama3-1-70b-instruct-v1:0": { + "input_cost_per_token": 9.9e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9.9e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama3-1-8b-instruct-v1:0": { + "input_cost_per_token": 2.2e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.2e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama3-2-11b-instruct-v1:0": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3.5e-07, + "supports_function_calling": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "us.meta.llama3-2-1b-instruct-v1:0": { + "input_cost_per_token": 1e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama3-2-3b-instruct-v1:0": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama3-2-90b-instruct-v1:0": { + "input_cost_per_token": 2e-06, + "litellm_provider": "bedrock", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": false, + "supports_vision": true + }, + "us.meta.llama3-3-70b-instruct-v1:0": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.2e-07, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama4-maverick-17b-instruct-v1:0": { + "input_cost_per_token": 2.4e-07, + "input_cost_per_token_batches": 1.2e-07, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 9.7e-07, + "output_cost_per_token_batches": 4.85e-07, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.meta.llama4-scout-17b-instruct-v1:0": { + "input_cost_per_token": 1.7e-07, + "input_cost_per_token_batches": 8.5e-08, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 6.6e-07, + "output_cost_per_token_batches": 3.3e-07, + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": false + }, + "us.mistral.pixtral-large-2502-v1:0": { + "input_cost_per_token": 2e-06, + "litellm_provider": "bedrock_converse", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": false + }, + "v0/v0-1.0-md": { + "input_cost_per_token": 3e-06, + "litellm_provider": "v0", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "v0/v0-1.5-lg": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "v0", + "max_input_tokens": 512000, + "max_output_tokens": 512000, + "max_tokens": 512000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "v0/v0-1.5-md": { + "input_cost_per_token": 3e-06, + "litellm_provider": "v0", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vercel_ai_gateway/alibaba/qwen-3-14b": { + "input_cost_per_token": 8e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 40960, + "max_output_tokens": 16384, + "max_tokens": 40960, + "mode": "chat", + "output_cost_per_token": 2.4e-07 + }, + "vercel_ai_gateway/alibaba/qwen-3-235b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 40960, + "max_output_tokens": 16384, + "max_tokens": 40960, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "vercel_ai_gateway/alibaba/qwen-3-30b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 40960, + "max_output_tokens": 16384, + "max_tokens": 40960, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "vercel_ai_gateway/alibaba/qwen-3-32b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 40960, + "max_output_tokens": 16384, + "max_tokens": 40960, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "vercel_ai_gateway/alibaba/qwen3-coder": { + "input_cost_per_token": 4e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 262144, + "max_output_tokens": 66536, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.6e-06 + }, + "vercel_ai_gateway/amazon/nova-lite": { + "input_cost_per_token": 6e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 300000, + "max_output_tokens": 8192, + "max_tokens": 300000, + "mode": "chat", + "output_cost_per_token": 2.4e-07 + }, + "vercel_ai_gateway/amazon/nova-micro": { + "input_cost_per_token": 3.5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.4e-07 + }, + "vercel_ai_gateway/amazon/nova-pro": { + "input_cost_per_token": 8e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 300000, + "max_output_tokens": 8192, + "max_tokens": 300000, + "mode": "chat", + "output_cost_per_token": 3.2e-06 + }, + "vercel_ai_gateway/amazon/titan-embed-text-v2": { + "input_cost_per_token": 2e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/anthropic/claude-3-haiku": { + "cache_creation_input_token_cost": 3e-07, + "cache_read_input_token_cost": 3e-08, + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.25e-06 + }, + "vercel_ai_gateway/anthropic/claude-3-opus": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 7.5e-05 + }, + "vercel_ai_gateway/anthropic/claude-3.5-haiku": { + "cache_creation_input_token_cost": 1e-06, + "cache_read_input_token_cost": 8e-08, + "input_cost_per_token": 8e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 4e-06 + }, + "vercel_ai_gateway/anthropic/claude-3.5-sonnet": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/anthropic/claude-3.7-sonnet": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/anthropic/claude-4-opus": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 7.5e-05 + }, + "vercel_ai_gateway/anthropic/claude-4-sonnet": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/cohere/command-a": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 256000, + "max_output_tokens": 8000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/cohere/command-r": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "vercel_ai_gateway/cohere/command-r-plus": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/cohere/embed-v4.0": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/deepseek/deepseek-r1": { + "input_cost_per_token": 5.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.19e-06 + }, + "vercel_ai_gateway/deepseek/deepseek-r1-distill-llama-70b": { + "input_cost_per_token": 7.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 9.9e-07 + }, + "vercel_ai_gateway/deepseek/deepseek-v3": { + "input_cost_per_token": 9e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-07 + }, + "vercel_ai_gateway/google/gemini-2.0-flash": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_tokens": 1048576, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "vercel_ai_gateway/google/gemini-2.0-flash-lite": { + "input_cost_per_token": 7.5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1048576, + "max_output_tokens": 8192, + "max_tokens": 1048576, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "vercel_ai_gateway/google/gemini-2.5-flash": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1000000, + "max_output_tokens": 65536, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 2.5e-06 + }, + "vercel_ai_gateway/google/gemini-2.5-pro": { + "input_cost_per_token": 2.5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1048576, + "max_output_tokens": 65536, + "max_tokens": 1048576, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/google/gemini-embedding-001": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/google/gemma-2-9b": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-07 + }, + "vercel_ai_gateway/google/text-embedding-005": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/google/text-multilingual-embedding-002": { + "input_cost_per_token": 2.5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/inception/mercury-coder-small": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32000, + "max_output_tokens": 16384, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "vercel_ai_gateway/meta/llama-3-70b": { + "input_cost_per_token": 5.9e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 7.9e-07 + }, + "vercel_ai_gateway/meta/llama-3-8b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 8e-08 + }, + "vercel_ai_gateway/meta/llama-3.1-70b": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 7.2e-07 + }, + "vercel_ai_gateway/meta/llama-3.1-8b": { + "input_cost_per_token": 5e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131000, + "max_output_tokens": 131072, + "max_tokens": 131000, + "mode": "chat", + "output_cost_per_token": 8e-08 + }, + "vercel_ai_gateway/meta/llama-3.2-11b": { + "input_cost_per_token": 1.6e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.6e-07 + }, + "vercel_ai_gateway/meta/llama-3.2-1b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-07 + }, + "vercel_ai_gateway/meta/llama-3.2-3b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07 + }, + "vercel_ai_gateway/meta/llama-3.2-90b": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 7.2e-07 + }, + "vercel_ai_gateway/meta/llama-3.3-70b": { + "input_cost_per_token": 7.2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 8192, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 7.2e-07 + }, + "vercel_ai_gateway/meta/llama-4-maverick": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "vercel_ai_gateway/meta/llama-4-scout": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 8192, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "vercel_ai_gateway/mistral/codestral": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 256000, + "max_output_tokens": 4000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 9e-07 + }, + "vercel_ai_gateway/mistral/codestral-embed": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/mistral/devstral-small": { + "input_cost_per_token": 7e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 2.8e-07 + }, + "vercel_ai_gateway/mistral/magistral-medium": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 5e-06 + }, + "vercel_ai_gateway/mistral/magistral-small": { + "input_cost_per_token": 5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 64000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-06 + }, + "vercel_ai_gateway/mistral/ministral-3b": { + "input_cost_per_token": 4e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 4e-08 + }, + "vercel_ai_gateway/mistral/ministral-8b": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-07 + }, + "vercel_ai_gateway/mistral/mistral-embed": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "chat", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/mistral/mistral-large": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32000, + "max_output_tokens": 4000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 6e-06 + }, + "vercel_ai_gateway/mistral/mistral-saba-24b": { + "input_cost_per_token": 7.9e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 7.9e-07 + }, + "vercel_ai_gateway/mistral/mistral-small": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32000, + "max_output_tokens": 4000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 3e-07 + }, + "vercel_ai_gateway/mistral/mixtral-8x22b-instruct": { + "input_cost_per_token": 1.2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 65536, + "max_output_tokens": 2048, + "max_tokens": 65536, + "mode": "chat", + "output_cost_per_token": 1.2e-06 + }, + "vercel_ai_gateway/mistral/pixtral-12b": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07 + }, + "vercel_ai_gateway/mistral/pixtral-large": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-06 + }, + "vercel_ai_gateway/moonshotai/kimi-k2": { + "input_cost_per_token": 5.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.2e-06 + }, + "vercel_ai_gateway/morph/morph-v3-fast": { + "input_cost_per_token": 8e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32768, + "max_output_tokens": 16384, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.2e-06 + }, + "vercel_ai_gateway/morph/morph-v3-large": { + "input_cost_per_token": 9e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32768, + "max_output_tokens": 16384, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1.9e-06 + }, + "vercel_ai_gateway/openai/gpt-3.5-turbo": { + "input_cost_per_token": 5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "max_tokens": 16385, + "mode": "chat", + "output_cost_per_token": 1.5e-06 + }, + "vercel_ai_gateway/openai/gpt-3.5-turbo-instruct": { + "input_cost_per_token": 1.5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 8192, + "max_output_tokens": 4096, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 2e-06 + }, + "vercel_ai_gateway/openai/gpt-4-turbo": { + "input_cost_per_token": 1e-05, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 4096, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-05 + }, + "vercel_ai_gateway/openai/gpt-4.1": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 1047576, + "mode": "chat", + "output_cost_per_token": 8e-06 + }, + "vercel_ai_gateway/openai/gpt-4.1-mini": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 4e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 1047576, + "mode": "chat", + "output_cost_per_token": 1.6e-06 + }, + "vercel_ai_gateway/openai/gpt-4.1-nano": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 2.5e-08, + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 1047576, + "max_output_tokens": 32768, + "max_tokens": 1047576, + "mode": "chat", + "output_cost_per_token": 4e-07 + }, + "vercel_ai_gateway/openai/gpt-4o": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 1.25e-06, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/openai/gpt-4o-mini": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 7.5e-08, + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 16384, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07 + }, + "vercel_ai_gateway/openai/o1": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 7.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 6e-05 + }, + "vercel_ai_gateway/openai/o3": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 8e-06 + }, + "vercel_ai_gateway/openai/o3-mini": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 5.5e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 4.4e-06 + }, + "vercel_ai_gateway/openai/o4-mini": { + "cache_creation_input_token_cost": 0.0, + "cache_read_input_token_cost": 2.75e-07, + "input_cost_per_token": 1.1e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 100000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 4.4e-06 + }, + "vercel_ai_gateway/openai/text-embedding-3-large": { + "input_cost_per_token": 1.3e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/openai/text-embedding-3-small": { + "input_cost_per_token": 2e-08, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/openai/text-embedding-ada-002": { + "input_cost_per_token": 1e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 0, + "max_output_tokens": 0, + "max_tokens": 0, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "vercel_ai_gateway/perplexity/sonar": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 127000, + "max_output_tokens": 8000, + "max_tokens": 127000, + "mode": "chat", + "output_cost_per_token": 1e-06 + }, + "vercel_ai_gateway/perplexity/sonar-pro": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 200000, + "max_output_tokens": 8000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/perplexity/sonar-reasoning": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 127000, + "max_output_tokens": 8000, + "max_tokens": 127000, + "mode": "chat", + "output_cost_per_token": 5e-06 + }, + "vercel_ai_gateway/perplexity/sonar-reasoning-pro": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 127000, + "max_output_tokens": 8000, + "max_tokens": 127000, + "mode": "chat", + "output_cost_per_token": 8e-06 + }, + "vercel_ai_gateway/vercel/v0-1.0-md": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/vercel/v0-1.5-md": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 32768, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/xai/grok-2": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 4000, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/xai/grok-2-vision": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1e-05 + }, + "vercel_ai_gateway/xai/grok-3": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/xai/grok-3-fast": { + "input_cost_per_token": 5e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-05 + }, + "vercel_ai_gateway/xai/grok-3-mini": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-07 + }, + "vercel_ai_gateway/xai/grok-3-mini-fast": { + "input_cost_per_token": 6e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-06 + }, + "vercel_ai_gateway/xai/grok-4": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-05 + }, + "vercel_ai_gateway/zai/glm-4.5": { + "input_cost_per_token": 6e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.2e-06 + }, + "vercel_ai_gateway/zai/glm-4.5-air": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vercel_ai_gateway", + "max_input_tokens": 128000, + "max_output_tokens": 96000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.1e-06 + }, + "vercel_ai_gateway/zai/glm-4.6": { + "litellm_provider": "vercel_ai_gateway", + "cache_read_input_token_cost": 1.1e-07, + "input_cost_per_token": 4.5e-07, + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "max_tokens": 200000, + "mode": "chat", + "output_cost_per_token": 1.8e-06, + "source": "https://vercel.com/ai-gateway/models/glm-4.6", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/chirp": { + "input_cost_per_character": 30e-06, + "litellm_provider": "vertex_ai", + "mode": "audio_speech", + "source": "https://cloud.google.com/text-to-speech/pricing", + "supported_endpoints": [ + "/v1/audio/speech" + ] + }, + "vertex_ai/claude-3-5-haiku": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true + }, + "vertex_ai/claude-3-5-haiku@20241022": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true + }, + "vertex_ai/claude-haiku-4-5@20251001": { + "cache_creation_input_token_cost": 1.25e-06, + "cache_read_input_token_cost": 1e-07, + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/partner-models/claude/haiku-4-5", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true + }, + "vertex_ai/claude-3-5-sonnet": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-5-sonnet-v2": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-5-sonnet-v2@20241022": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-5-sonnet@20240620": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-7-sonnet@20250219": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "deprecation_date": "2025-06-01", + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-3-haiku": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-haiku@20240307": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.25e-06, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-opus": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-opus@20240229": { + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-sonnet": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-3-sonnet@20240229": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 4096, + "max_tokens": 4096, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-opus-4": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-opus-4-1": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "input_cost_per_token_batches": 7.5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "output_cost_per_token_batches": 3.75e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-opus-4-1@20250805": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "input_cost_per_token_batches": 7.5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "output_cost_per_token_batches": 3.75e-05, + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-opus-4-5": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-opus-4-5@20251101": { + "cache_creation_input_token_cost": 6.25e-06, + "cache_read_input_token_cost": 5e-07, + "input_cost_per_token": 5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-sonnet-4-5": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "input_cost_per_token_batches": 1.5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_batches": 7.5e-06, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-sonnet-4-5@20250929": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "input_cost_per_token_batches": 1.5e-06, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_batches": 7.5e-06, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/claude-opus-4@20250514": { + "cache_creation_input_token_cost": 1.875e-05, + "cache_read_input_token_cost": 1.5e-06, + "input_cost_per_token": 1.5e-05, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 200000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 7.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-sonnet-4": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/claude-sonnet-4@20250514": { + "cache_creation_input_token_cost": 3.75e-06, + "cache_read_input_token_cost": 3e-07, + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_200k_tokens": 6e-06, + "output_cost_per_token_above_200k_tokens": 2.25e-05, + "cache_creation_input_token_cost_above_200k_tokens": 7.5e-06, + "cache_read_input_token_cost_above_200k_tokens": 6e-07, + "litellm_provider": "vertex_ai-anthropic_models", + "max_input_tokens": 1000000, + "max_output_tokens": 64000, + "max_tokens": 64000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "search_context_cost_per_query": { + "search_context_size_high": 0.01, + "search_context_size_low": 0.01, + "search_context_size_medium": 0.01 + }, + "supports_assistant_prefill": true, + "supports_computer_use": true, + "supports_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "tool_use_system_prompt_tokens": 159 + }, + "vertex_ai/mistralai/codestral-2@001": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/codestral-2": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/codestral-2@001": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistralai/codestral-2": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 9e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/codestral-2501": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/codestral@2405": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/codestral@latest": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 6e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/deepseek-ai/deepseek-v3.1-maas": { + "input_cost_per_token": 1.35e-06, + "litellm_provider": "vertex_ai-deepseek_models", + "max_input_tokens": 163840, + "max_output_tokens": 32768, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 5.4e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_regions": [ + "us-west2" + ], + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "vertex_ai/deepseek-ai/deepseek-v3.2-maas": { + "input_cost_per_token": 5.6e-07, + "input_cost_per_token_batches": 2.8e-07, + "litellm_provider": "vertex_ai-deepseek_models", + "max_input_tokens": 163840, + "max_output_tokens": 32768, + "max_tokens": 163840, + "mode": "chat", + "output_cost_per_token": 1.68e-06, + "output_cost_per_token_batches": 8.4e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_regions": [ + "us-west2" + ], + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "vertex_ai/deepseek-ai/deepseek-r1-0528-maas": { + "input_cost_per_token": 1.35e-06, + "litellm_provider": "vertex_ai-deepseek_models", + "max_input_tokens": 65336, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 5.4e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_assistant_prefill": true, + "supports_function_calling": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "vertex_ai/gemini-2.5-flash-image": { + "cache_read_input_token_cost": 3e-08, + "input_cost_per_audio_token": 1e-06, + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-language-models", + "max_audio_length_hours": 8.4, + "max_audio_per_prompt": 1, + "max_images_per_prompt": 3000, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "max_pdf_size_mb": 30, + "max_video_length": 1, + "max_videos_per_prompt": 10, + "mode": "image_generation", + "output_cost_per_image": 0.039, + "output_cost_per_reasoning_token": 2.5e-06, + "output_cost_per_token": 2.5e-06, + "rpm": 100000, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/image-generation#edit-an-image", + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/completions", + "/v1/batch" + ], + "supported_modalities": [ + "text", + "image", + "audio", + "video" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_url_context": true, + "supports_vision": true, + "supports_web_search": false, + "tpm": 8000000 + }, + "vertex_ai/gemini-3-pro-image-preview": { + "input_cost_per_image": 0.0011, + "input_cost_per_token": 2e-06, + "input_cost_per_token_batches": 1e-06, + "litellm_provider": "vertex_ai-language-models", + "max_input_tokens": 65536, + "max_output_tokens": 32768, + "max_tokens": 65536, + "mode": "image_generation", + "output_cost_per_image": 0.134, + "output_cost_per_image_token": 1.2e-04, + "output_cost_per_token": 1.2e-05, + "output_cost_per_token_batches": 6e-06, + "source": "https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/3-pro-image" + }, + "vertex_ai/imagegeneration@006": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-3.0-fast-generate-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-3.0-generate-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-3.0-generate-002": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-3.0-capability-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/image/edit-insert-objects" + }, + "vertex_ai/imagen-4.0-fast-generate-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.02, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-4.0-generate-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.04, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/imagen-4.0-ultra-generate-001": { + "litellm_provider": "vertex_ai-image-models", + "mode": "image_generation", + "output_cost_per_image": 0.06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing" + }, + "vertex_ai/jamba-1.5": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-ai21_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "vertex_ai/jamba-1.5-large": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-ai21_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "vertex_ai/jamba-1.5-large@001": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-ai21_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 8e-06, + "supports_tool_choice": true + }, + "vertex_ai/jamba-1.5-mini": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-ai21_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "vertex_ai/jamba-1.5-mini@001": { + "input_cost_per_token": 2e-07, + "litellm_provider": "vertex_ai-ai21_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 4e-07, + "supports_tool_choice": true + }, + "vertex_ai/meta/llama-3.1-405b-instruct-maas": { + "input_cost_per_token": 5e-06, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.6e-05, + "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas", + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/meta/llama-3.1-70b-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas", + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/meta/llama-3.1-8b-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "metadata": { + "notes": "VertexAI states that The Llama 3.1 API service for llama-3.1-70b-instruct-maas and llama-3.1-8b-instruct-maas are in public preview and at no cost." + }, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas", + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/meta/llama-3.2-90b-vision-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 128000, + "max_output_tokens": 2048, + "max_tokens": 128000, + "metadata": { + "notes": "VertexAI states that The Llama 3.2 API service is at no cost during public preview, and will be priced as per dollar-per-1M-tokens at GA." + }, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama-3.2-90b-vision-instruct-maas", + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/meta/llama-4-maverick-17b-128e-instruct-maas": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 1.15e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/meta/llama-4-maverick-17b-16e-instruct-maas": { + "input_cost_per_token": 3.5e-07, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 1000000, + "max_output_tokens": 1000000, + "max_tokens": 1000000, + "mode": "chat", + "output_cost_per_token": 1.15e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/meta/llama-4-scout-17b-128e-instruct-maas": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 10000000, + "max_output_tokens": 10000000, + "max_tokens": 10000000, + "mode": "chat", + "output_cost_per_token": 7e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/meta/llama-4-scout-17b-16e-instruct-maas": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 10000000, + "max_output_tokens": 10000000, + "max_tokens": 10000000, + "mode": "chat", + "output_cost_per_token": 7e-07, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "code" + ], + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/meta/llama3-405b-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_tool_choice": true + }, + "vertex_ai/meta/llama3-70b-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_tool_choice": true + }, + "vertex_ai/meta/llama3-8b-instruct-maas": { + "input_cost_per_token": 0.0, + "litellm_provider": "vertex_ai-llama_models", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_tokens": 32000, + "mode": "chat", + "output_cost_per_token": 0.0, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_tool_choice": true + }, + "vertex_ai/minimaxai/minimax-m2-maas": { + "input_cost_per_token": 3e-07, + "litellm_provider": "vertex_ai-minimax_models", + "max_input_tokens": 196608, + "max_output_tokens": 196608, + "max_tokens": 196608, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/moonshotai/kimi-k2-thinking-maas": { + "input_cost_per_token": 6e-07, + "litellm_provider": "vertex_ai-moonshot_models", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 2.5e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing#partner-models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "vertex_ai/mistral-medium-3": { + "input_cost_per_token": 4e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-medium-3@001": { + "input_cost_per_token": 4e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistralai/mistral-medium-3": { + "input_cost_per_token": 4e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistralai/mistral-medium-3@001": { + "input_cost_per_token": 4e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 2e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-large-2411": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-large@2407": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-large@2411-001": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-large@latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 6e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-nemo@2407": { + "input_cost_per_token": 3e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-nemo@latest": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-07, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-small-2503": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true + }, + "vertex_ai/mistral-small-2503@001": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-mistral_models", + "max_input_tokens": 32000, + "max_output_tokens": 8191, + "max_tokens": 8191, + "mode": "chat", + "output_cost_per_token": 3e-06, + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/mistral-ocr-2505": { + "litellm_provider": "vertex_ai", + "mode": "ocr", + "ocr_cost_per_page": 5e-4, + "supported_endpoints": [ + "/v1/ocr" + ], + "source": "https://cloud.google.com/generative-ai-app-builder/pricing" + }, + "vertex_ai/openai/gpt-oss-120b-maas": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-openai_models", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 6e-07, + "source": "https://console.cloud.google.com/vertex-ai/publishers/openai/model-garden/gpt-oss-120b-maas", + "supports_reasoning": true + }, + "vertex_ai/openai/gpt-oss-20b-maas": { + "input_cost_per_token": 7.5e-08, + "litellm_provider": "vertex_ai-openai_models", + "max_input_tokens": 131072, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 3e-07, + "source": "https://console.cloud.google.com/vertex-ai/publishers/openai/model-garden/gpt-oss-120b-maas", + "supports_reasoning": true + }, + "vertex_ai/qwen/qwen3-235b-a22b-instruct-2507-maas": { + "input_cost_per_token": 2.5e-07, + "litellm_provider": "vertex_ai-qwen_models", + "max_input_tokens": 262144, + "max_output_tokens": 16384, + "max_tokens": 16384, + "mode": "chat", + "output_cost_per_token": 1e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/qwen/qwen3-coder-480b-a35b-instruct-maas": { + "input_cost_per_token": 1e-06, + "litellm_provider": "vertex_ai-qwen_models", + "max_input_tokens": 262144, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 4e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/qwen/qwen3-next-80b-a3b-instruct-maas": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-qwen_models", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/qwen/qwen3-next-80b-a3b-thinking-maas": { + "input_cost_per_token": 1.5e-07, + "litellm_provider": "vertex_ai-qwen_models", + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "max_tokens": 262144, + "mode": "chat", + "output_cost_per_token": 1.2e-06, + "source": "https://cloud.google.com/vertex-ai/generative-ai/pricing", + "supports_function_calling": true, + "supports_tool_choice": true + }, + "vertex_ai/veo-2.0-generate-001": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.35, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.0-fast-generate-preview": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.15, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.0-generate-preview": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.4, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.0-fast-generate-001": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.15, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.0-generate-001": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.4, + "source": "https://ai.google.dev/gemini-api/docs/video", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.1-generate-preview": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.4, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "vertex_ai/veo-3.1-fast-generate-preview": { + "litellm_provider": "vertex_ai-video-models", + "max_input_tokens": 1024, + "max_tokens": 1024, + "mode": "video_generation", + "output_cost_per_second": 0.15, + "source": "https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/veo", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ] + }, + "voyage/rerank-2": { + "input_cost_per_token": 5e-08, + "litellm_provider": "voyage", + "max_input_tokens": 16000, + "max_output_tokens": 16000, + "max_query_tokens": 16000, + "max_tokens": 16000, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "voyage/rerank-2-lite": { + "input_cost_per_token": 2e-08, + "litellm_provider": "voyage", + "max_input_tokens": 8000, + "max_output_tokens": 8000, + "max_query_tokens": 8000, + "max_tokens": 8000, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "voyage/rerank-2.5": { + "input_cost_per_token": 5e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_query_tokens": 32000, + "max_tokens": 32000, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "voyage/rerank-2.5-lite": { + "input_cost_per_token": 2e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "max_query_tokens": 32000, + "max_tokens": 32000, + "mode": "rerank", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-2": { + "input_cost_per_token": 1e-07, + "litellm_provider": "voyage", + "max_input_tokens": 4000, + "max_tokens": 4000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-3": { + "input_cost_per_token": 6e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-3-large": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-3-lite": { + "input_cost_per_token": 2e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-3.5": { + "input_cost_per_token": 6e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-3.5-lite": { + "input_cost_per_token": 2e-08, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-code-2": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "voyage", + "max_input_tokens": 16000, + "max_tokens": 16000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-code-3": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-context-3": { + "input_cost_per_token": 1.8e-07, + "litellm_provider": "voyage", + "max_input_tokens": 120000, + "max_tokens": 120000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-finance-2": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-large-2": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "voyage", + "max_input_tokens": 16000, + "max_tokens": 16000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-law-2": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "voyage", + "max_input_tokens": 16000, + "max_tokens": 16000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-lite-01": { + "input_cost_per_token": 1e-07, + "litellm_provider": "voyage", + "max_input_tokens": 4096, + "max_tokens": 4096, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-lite-02-instruct": { + "input_cost_per_token": 1e-07, + "litellm_provider": "voyage", + "max_input_tokens": 4000, + "max_tokens": 4000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "voyage/voyage-multimodal-3": { + "input_cost_per_token": 1.2e-07, + "litellm_provider": "voyage", + "max_input_tokens": 32000, + "max_tokens": 32000, + "mode": "embedding", + "output_cost_per_token": 0.0 + }, + "wandb/openai/gpt-oss-120b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 0.015, + "output_cost_per_token": 0.06, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/openai/gpt-oss-20b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 0.005, + "output_cost_per_token": 0.02, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/zai-org/GLM-4.5": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 0.055, + "output_cost_per_token": 0.2, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/Qwen/Qwen3-235B-A22B-Instruct-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 0.01, + "output_cost_per_token": 0.01, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/Qwen/Qwen3-Coder-480B-A35B-Instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 0.1, + "output_cost_per_token": 0.15, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/Qwen/Qwen3-235B-A22B-Thinking-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 0.01, + "output_cost_per_token": 0.01, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/moonshotai/Kimi-K2-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 6e-07, + "output_cost_per_token": 2.5e-06, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/meta-llama/Llama-3.1-8B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.022, + "output_cost_per_token": 0.022, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/deepseek-ai/DeepSeek-V3.1": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.055, + "output_cost_per_token": 0.165, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/deepseek-ai/DeepSeek-R1-0528": { + "max_tokens": 161000, + "max_input_tokens": 161000, + "max_output_tokens": 161000, + "input_cost_per_token": 0.135, + "output_cost_per_token": 0.54, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/deepseek-ai/DeepSeek-V3-0324": { + "max_tokens": 161000, + "max_input_tokens": 161000, + "max_output_tokens": 161000, + "input_cost_per_token": 0.114, + "output_cost_per_token": 0.275, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/meta-llama/Llama-3.3-70B-Instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.071, + "output_cost_per_token": 0.071, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/meta-llama/Llama-4-Scout-17B-16E-Instruct": { + "max_tokens": 64000, + "max_input_tokens": 64000, + "max_output_tokens": 64000, + "input_cost_per_token": 0.017, + "output_cost_per_token": 0.066, + "litellm_provider": "wandb", + "mode": "chat" + }, + "wandb/microsoft/Phi-4-mini-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.008, + "output_cost_per_token": 0.035, + "litellm_provider": "wandb", + "mode": "chat" + }, + "watsonx/ibm/granite-3-8b-instruct": { + "input_cost_per_token": 0.2e-06, + "litellm_provider": "watsonx", + "max_input_tokens": 8192, + "max_output_tokens": 1024, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 0.2e-06, + "supports_audio_input": false, + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "watsonx/mistralai/mistral-large": { + "input_cost_per_token": 3e-06, + "litellm_provider": "watsonx", + "max_input_tokens": 131072, + "max_output_tokens": 16384, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 10e-06, + "supports_audio_input": false, + "supports_audio_output": false, + "supports_function_calling": true, + "supports_parallel_function_calling": false, + "supports_prompt_caching": true, + "supports_response_schema": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": false + }, + "watsonx/bigscience/mt0-xxl-13b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0005, + "output_cost_per_token": 0.002, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/core42/jais-13b-chat": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.0005, + "output_cost_per_token": 0.002, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/google/flan-t5-xl-3b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.6e-06, + "output_cost_per_token": 0.6e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-13b-chat-v2": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.6e-06, + "output_cost_per_token": 0.6e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-13b-instruct-v2": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.6e-06, + "output_cost_per_token": 0.6e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-3-3-8b-instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.2e-06, + "output_cost_per_token": 0.2e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/ibm/granite-4-h-small": { + "max_tokens": 20480, + "max_input_tokens": 20480, + "max_output_tokens": 20480, + "input_cost_per_token": 0.06e-06, + "output_cost_per_token": 0.25e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/ibm/granite-guardian-3-2-2b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.1e-06, + "output_cost_per_token": 0.1e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-guardian-3-3-8b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.2e-06, + "output_cost_per_token": 0.2e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-ttm-1024-96-r2": { + "max_tokens": 512, + "max_input_tokens": 512, + "max_output_tokens": 512, + "input_cost_per_token": 0.38e-06, + "output_cost_per_token": 0.38e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-ttm-1536-96-r2": { + "max_tokens": 512, + "max_input_tokens": 512, + "max_output_tokens": 512, + "input_cost_per_token": 0.38e-06, + "output_cost_per_token": 0.38e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-ttm-512-96-r2": { + "max_tokens": 512, + "max_input_tokens": 512, + "max_output_tokens": 512, + "input_cost_per_token": 0.38e-06, + "output_cost_per_token": 0.38e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/ibm/granite-vision-3-2-2b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.1e-06, + "output_cost_per_token": 0.1e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": true + }, + "watsonx/meta-llama/llama-3-2-11b-vision-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.35e-06, + "output_cost_per_token": 0.35e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "watsonx/meta-llama/llama-3-2-1b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.1e-06, + "output_cost_per_token": 0.1e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/meta-llama/llama-3-2-3b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.15e-06, + "output_cost_per_token": 0.15e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/meta-llama/llama-3-2-90b-vision-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-06, + "output_cost_per_token": 2e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": true + }, + "watsonx/meta-llama/llama-3-3-70b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.71e-06, + "output_cost_per_token": 0.71e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/meta-llama/llama-4-maverick-17b": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.35e-06, + "output_cost_per_token": 1.4e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/meta-llama/llama-guard-3-11b-vision": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.35e-06, + "output_cost_per_token": 0.35e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": true + }, + "watsonx/mistralai/mistral-medium-2505": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 3e-06, + "output_cost_per_token": 10e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/mistralai/mistral-small-2503": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "input_cost_per_token": 0.1e-06, + "output_cost_per_token": 0.3e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/mistralai/mistral-small-3-1-24b-instruct-2503": { + "max_tokens": 32000, + "max_input_tokens": 32000, + "max_output_tokens": 32000, + "input_cost_per_token": 0.1e-06, + "output_cost_per_token": 0.3e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": true, + "supports_parallel_function_calling": true, + "supports_vision": false + }, + "watsonx/mistralai/pixtral-12b-2409": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 0.35e-06, + "output_cost_per_token": 0.35e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": true + }, + "watsonx/openai/gpt-oss-120b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 0.15e-06, + "output_cost_per_token": 0.6e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/sdaia/allam-1-13b-instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 1.8e-06, + "output_cost_per_token": 1.8e-06, + "litellm_provider": "watsonx", + "mode": "chat", + "supports_function_calling": false, + "supports_parallel_function_calling": false, + "supports_vision": false + }, + "watsonx/whisper-large-v3-turbo": { + "input_cost_per_second": 0.0001, + "output_cost_per_second": 0.0001, + "litellm_provider": "watsonx", + "mode": "audio_transcription", + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "whisper-1": { + "input_cost_per_second": 0.0001, + "litellm_provider": "openai", + "mode": "audio_transcription", + "output_cost_per_second": 0.0001, + "supported_endpoints": [ + "/v1/audio/transcriptions" + ] + }, + "xai/grok-2": { + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-2-1212": { + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-2-latest": { + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-2-vision": { + "input_cost_per_image": 2e-06, + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-2-vision-1212": { + "input_cost_per_image": 2e-06, + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-2-vision-latest": { + "input_cost_per_image": 2e-06, + "input_cost_per_token": 2e-06, + "litellm_provider": "xai", + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "max_tokens": 32768, + "mode": "chat", + "output_cost_per_token": 1e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-3": { + "input_cost_per_token": 3e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-beta": { + "input_cost_per_token": 3e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-fast-beta": { + "input_cost_per_token": 5e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-fast-latest": { + "input_cost_per_token": 5e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 2.5e-05, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-latest": { + "input_cost_per_token": 3e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini": { + "input_cost_per_token": 3e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini-beta": { + "input_cost_per_token": 3e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini-fast": { + "input_cost_per_token": 6e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-06, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini-fast-beta": { + "input_cost_per_token": 6e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-06, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini-fast-latest": { + "input_cost_per_token": 6e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 4e-06, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-3-mini-latest": { + "input_cost_per_token": 3e-07, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 5e-07, + "source": "https://x.ai/api#pricing", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": false, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4": { + "input_cost_per_token": 3e-06, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4-fast-reasoning": { + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "cache_read_input_token_cost": 0.05e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4-fast-non-reasoning": { + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "cache_read_input_token_cost": 0.05e-06, + "max_tokens": 2e6, + "mode": "chat", + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4-0709": { + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_128k_tokens": 6e-06, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_above_128k_tokens": 30e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4-latest": { + "input_cost_per_token": 3e-06, + "input_cost_per_token_above_128k_tokens": 6e-06, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "output_cost_per_token_above_128k_tokens": 30e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_web_search": true + }, + "xai/grok-4-1-fast": { + "cache_read_input_token_cost": 0.05e-06, + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models/grok-4-1-fast-reasoning", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-4-1-fast-reasoning": { + "cache_read_input_token_cost": 0.05e-06, + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models/grok-4-1-fast-reasoning", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-4-1-fast-reasoning-latest": { + "cache_read_input_token_cost": 0.05e-06, + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models/grok-4-1-fast-reasoning", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-4-1-fast-non-reasoning": { + "cache_read_input_token_cost": 0.05e-06, + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models/grok-4-1-fast-non-reasoning", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-4-1-fast-non-reasoning-latest": { + "cache_read_input_token_cost": 0.05e-06, + "input_cost_per_token": 0.2e-06, + "input_cost_per_token_above_128k_tokens": 0.4e-06, + "litellm_provider": "xai", + "max_input_tokens": 2e6, + "max_output_tokens": 2e6, + "max_tokens": 2e6, + "mode": "chat", + "output_cost_per_token": 0.5e-06, + "output_cost_per_token_above_128k_tokens": 1e-06, + "source": "https://docs.x.ai/docs/models/grok-4-1-fast-non-reasoning", + "supports_audio_input": true, + "supports_function_calling": true, + "supports_response_schema": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-beta": { + "input_cost_per_token": 5e-06, + "litellm_provider": "xai", + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "max_tokens": 131072, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "xai/grok-code-fast": { + "cache_read_input_token_cost": 2e-08, + "input_cost_per_token": 2e-07, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "xai/grok-code-fast-1": { + "cache_read_input_token_cost": 2e-08, + "input_cost_per_token": 2e-07, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "xai/grok-code-fast-1-0825": { + "cache_read_input_token_cost": 2e-08, + "input_cost_per_token": 2e-07, + "litellm_provider": "xai", + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "max_tokens": 256000, + "mode": "chat", + "output_cost_per_token": 1.5e-06, + "source": "https://docs.x.ai/docs/models", + "supports_function_calling": true, + "supports_reasoning": true, + "supports_tool_choice": true + }, + "xai/grok-vision-beta": { + "input_cost_per_image": 5e-06, + "input_cost_per_token": 5e-06, + "litellm_provider": "xai", + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "max_tokens": 8192, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "supports_web_search": true + }, + "zai/glm-4.6": { + "input_cost_per_token": 6e-07, + "output_cost_per_token": 2.2e-06, + "litellm_provider": "zai", + "max_input_tokens": 200000, + "max_output_tokens": 128000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5": { + "input_cost_per_token": 6e-07, + "output_cost_per_token": 2.2e-06, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5v": { + "input_cost_per_token": 6e-07, + "output_cost_per_token": 1.8e-06, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "supports_vision": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5-x": { + "input_cost_per_token": 2.2e-06, + "output_cost_per_token": 8.9e-06, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5-air": { + "input_cost_per_token": 2e-07, + "output_cost_per_token": 1.1e-06, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5-airx": { + "input_cost_per_token": 1.1e-06, + "output_cost_per_token": 4.5e-06, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4-32b-0414-128k": { + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "zai/glm-4.5-flash": { + "input_cost_per_token": 0, + "output_cost_per_token": 0, + "litellm_provider": "zai", + "max_input_tokens": 128000, + "max_output_tokens": 32000, + "mode": "chat", + "supports_function_calling": true, + "supports_tool_choice": true, + "source": "https://docs.z.ai/guides/overview/pricing" + }, + "vertex_ai/search_api": { + "input_cost_per_query": 1.5e-03, + "litellm_provider": "vertex_ai", + "mode": "vector_store" + }, + "openai/container": { + "code_interpreter_cost_per_session": 0.03, + "litellm_provider": "openai", + "mode": "chat" + }, + "openai/sora-2": { + "litellm_provider": "openai", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.10, + "source": "https://platform.openai.com/docs/api-reference/videos", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "720x1280", + "1280x720" + ] + }, + "openai/sora-2-pro": { + "litellm_provider": "openai", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.30, + "source": "https://platform.openai.com/docs/api-reference/videos", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "720x1280", + "1280x720" + ] + }, + "azure/sora-2": { + "litellm_provider": "azure", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.10, + "source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "720x1280", + "1280x720" + ] + }, + "azure/sora-2-pro": { + "litellm_provider": "azure", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.30, + "source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "720x1280", + "1280x720" + ] + }, + "azure/sora-2-pro-high-res": { + "litellm_provider": "azure", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.50, + "source": "https://azure.microsoft.com/en-us/products/ai-services/video-generation", + "supported_modalities": [ + "text" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "1024x1792", + "1792x1024" + ] + }, + "runwayml/gen4_turbo": { + "litellm_provider": "runwayml", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.05, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "1280x720", + "720x1280" + ], + "metadata": { + "comment": "5 credits per second @ $0.01 per credit = $0.05 per second" + } + }, + "runwayml/gen4_aleph": { + "litellm_provider": "runwayml", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.15, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "1280x720", + "720x1280" + ], + "metadata": { + "comment": "15 credits per second @ $0.01 per credit = $0.15 per second" + } + }, + "runwayml/gen3a_turbo": { + "litellm_provider": "runwayml", + "mode": "video_generation", + "output_cost_per_video_per_second": 0.05, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "video" + ], + "supported_resolutions": [ + "1280x720", + "720x1280" + ], + "metadata": { + "comment": "5 credits per second @ $0.01 per credit = $0.05 per second" + } + }, + "runwayml/gen4_image": { + "litellm_provider": "runwayml", + "mode": "image_generation", + "input_cost_per_image": 0.05, + "output_cost_per_image": 0.05, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "image" + ], + "supported_resolutions": [ + "1280x720", + "1920x1080" + ], + "metadata": { + "comment": "5 credits per 720p image or 8 credits per 1080p image @ $0.01 per credit. Using 5 credits ($0.05) as base cost" + } + }, + "runwayml/gen4_image_turbo": { + "litellm_provider": "runwayml", + "mode": "image_generation", + "input_cost_per_image": 0.02, + "output_cost_per_image": 0.02, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "image" + ], + "supported_resolutions": [ + "1280x720", + "1920x1080" + ], + "metadata": { + "comment": "2 credits per image (any resolution) @ $0.01 per credit = $0.02 per image" + } + }, + "runwayml/eleven_multilingual_v2": { + "litellm_provider": "runwayml", + "mode": "audio_speech", + "input_cost_per_character": 3e-07, + "source": "https://docs.dev.runwayml.com/guides/pricing/", + "metadata": { + "comment": "Estimated cost based on standard TTS pricing. RunwayML uses ElevenLabs models." + } + }, + "fireworks_ai/accounts/fireworks/models/qwen3-coder-480b-a35b-instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 4.5e-07, + "output_cost_per_token": 1.8e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/flux-kontext-pro": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 4e-08, + "output_cost_per_token": 4e-08, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/SSD-1B": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1.3e-10, + "output_cost_per_token": 1.3e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/chronos-hermes-13b-v2": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-13b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-13b-instruct": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-13b-python": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-34b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-34b-instruct": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-34b-python": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-70b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-70b-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-70b-python": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-7b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-7b-instruct": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-llama-7b-python": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/code-qwen-1p5-7b": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/codegemma-2b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/codegemma-7b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-671b-v2-p1": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-v1-preview-llama-3b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-v1-preview-llama-70b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-v1-preview-llama-8b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-v1-preview-qwen-14b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/cogito-v1-preview-qwen-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/flux-kontext-max": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 8e-08, + "output_cost_per_token": 8e-08, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/dbrx-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-1b-base": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-33b-instruct": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-7b-base": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-7b-base-v1p5": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-7b-instruct-v1p5": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-v2-lite-base": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-coder-v2-lite-instruct": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-prover-v2": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-0528-distill-qwen3-8b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-llama-70b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-llama-8b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-qwen-14b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-qwen-1p5b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-qwen-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-r1-distill-qwen-7b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v2-lite-chat": { + "max_tokens": 163840, + "max_input_tokens": 163840, + "max_output_tokens": 163840, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/deepseek-v2p5": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/devstral-small-2505": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/dobby-mini-unhinged-plus-llama-3-1-8b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/dobby-unhinged-llama-3-3-70b-new": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/dolphin-2-9-2-qwen2-72b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/dolphin-2p6-mixtral-8x7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/ernie-4p5-21b-a3b-pt": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/ernie-4p5-300b-a47b-pt": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/fare-20b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/firefunction-v1": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/firellava-13b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/firesearch-ocr-v6": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/fireworks-asr-large": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "audio_transcription" + }, + "fireworks_ai/accounts/fireworks/models/fireworks-asr-v2": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "audio_transcription" + }, + "fireworks_ai/accounts/fireworks/models/flux-1-dev": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/flux-1-dev-controlnet-union": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-09, + "output_cost_per_token": 1e-09, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/flux-1-dev-fp8": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 5e-10, + "output_cost_per_token": 5e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/flux-1-schnell": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/flux-1-schnell-fp8": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 3.5e-10, + "output_cost_per_token": 3.5e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/gemma-2b-it": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/gemma-3-27b-it": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/gemma-7b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/gemma-7b-it": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/gemma2-9b-it": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/glm-4p5v": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat", + "supports_reasoning": true + }, + "fireworks_ai/accounts/fireworks/models/gpt-oss-safeguard-120b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/gpt-oss-safeguard-20b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/hermes-2-pro-mistral-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/internvl3-38b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/internvl3-78b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/internvl3-8b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/japanese-stable-diffusion-xl": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1.3e-10, + "output_cost_per_token": 1.3e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/kat-coder": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/kat-dev-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/kat-dev-72b-exp": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-guard-2-8b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-guard-3-1b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-guard-3-8b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-13b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-13b-chat": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-70b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-70b-chat": { + "max_tokens": 2048, + "max_input_tokens": 2048, + "max_output_tokens": 2048, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-7b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v2-7b-chat": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct-hf": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3-8b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3-8b-instruct-hf": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-405b-instruct-long": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-70b-instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-70b-instruct-1b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p1-nemotron-70b-instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-1b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p2-3b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llama-v3p3-70b-instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llamaguard-7b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/llava-yi-34b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/minimax-m1-80k": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/minimax-m2": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 3e-07, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/ministral-3-14b-instruct-2512": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/ministral-3-3b-instruct-2512": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/ministral-3-8b-instruct-2512": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-7b-instruct-4k": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-7b-instruct-v0p2": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-7b-instruct-v3": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-7b-v0p2": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-large-3-fp8": { + "max_tokens": 256000, + "max_input_tokens": 256000, + "max_output_tokens": 256000, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-nemo-base-2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-nemo-instruct-2407": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mistral-small-24b-instruct-2501": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x22b": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x22b-instruct": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 1.2e-06, + "output_cost_per_token": 1.2e-06, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x7b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mixtral-8x7b-instruct-hf": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/mythomax-l2-13b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nemotron-nano-v2-12b-vl": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-capybara-7b-v1p9": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-hermes-2-mixtral-8x7b-dpo": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-hermes-2-yi-34b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-hermes-llama2-13b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-hermes-llama2-70b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nous-hermes-llama2-7b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nvidia-nemotron-nano-12b-v2": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/nvidia-nemotron-nano-9b-v2": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/openchat-3p5-0106-7b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/openhermes-2-mistral-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/openhermes-2p5-mistral-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/openorca-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phi-2-3b": { + "max_tokens": 2048, + "max_input_tokens": 2048, + "max_output_tokens": 2048, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phi-3-mini-128k-instruct": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phi-3-vision-128k-instruct": { + "max_tokens": 32064, + "max_input_tokens": 32064, + "max_output_tokens": 32064, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phind-code-llama-34b-python-v1": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phind-code-llama-34b-v1": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/phind-code-llama-34b-v2": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/playground-v2-1024px-aesthetic": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1.3e-10, + "output_cost_per_token": 1.3e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/playground-v2-5-1024px-aesthetic": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1.3e-10, + "output_cost_per_token": 1.3e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/pythia-12b": { + "max_tokens": 2048, + "max_input_tokens": 2048, + "max_output_tokens": 2048, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen-qwq-32b-preview": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen-v2p5-14b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen-v2p5-7b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen1p5-72b-chat": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2-7b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2-vl-2b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2-vl-72b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2-vl-7b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-0p5b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-14b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-1p5b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-32b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-72b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-72b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-7b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-0p5b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-0p5b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-14b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-14b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-1p5b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-1p5b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct-128k": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct-32k-rope": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-32b-instruct-64k": { + "max_tokens": 65536, + "max_input_tokens": 65536, + "max_output_tokens": 65536, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-3b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-3b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-coder-7b-instruct": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-math-72b-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-vl-32b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-vl-3b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-vl-72b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen2p5-vl-7b-instruct": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-0p6b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-14b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-1p7b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-1p7b-fp8-draft": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-1p7b-fp8-draft-131072": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-1p7b-fp8-draft-40960": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-235b-a22b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 2.2e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-235b-a22b-instruct-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2.2e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-235b-a22b-thinking-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2.2e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-30b-a3b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-30b-a3b-instruct-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 5e-07, + "output_cost_per_token": 5e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-30b-a3b-thinking-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-4b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-4b-instruct-2507": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-8b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-coder-30b-a3b-instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-coder-480b-instruct-bf16": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-embedding-0p6b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "embedding" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-embedding-4b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "embedding" + }, + "fireworks_ai/accounts/fireworks/models/": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "embedding" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-next-80b-a3b-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-next-80b-a3b-thinking": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-reranker-0p6b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "rerank" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-reranker-4b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "rerank" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-reranker-8b": { + "max_tokens": 40960, + "max_input_tokens": 40960, + "max_output_tokens": 40960, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "rerank" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-235b-a22b-instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2.2e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-235b-a22b-thinking": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 2.2e-07, + "output_cost_per_token": 8.8e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-30b-a3b-instruct": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-30b-a3b-thinking": { + "max_tokens": 262144, + "max_input_tokens": 262144, + "max_output_tokens": 262144, + "input_cost_per_token": 1.5e-07, + "output_cost_per_token": 6e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-32b-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwen3-vl-8b-instruct": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/qwq-32b": { + "max_tokens": 131072, + "max_input_tokens": 131072, + "max_output_tokens": 131072, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/rolm-ocr": { + "max_tokens": 128000, + "max_input_tokens": 128000, + "max_output_tokens": 128000, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/snorkel-mistral-7b-pairrm-dpo": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/stable-diffusion-xl-1024-v1-0": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1.3e-10, + "output_cost_per_token": 1.3e-10, + "litellm_provider": "fireworks_ai", + "mode": "image_generation" + }, + "fireworks_ai/accounts/fireworks/models/stablecode-3b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/starcoder-16b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/starcoder-7b": { + "max_tokens": 8192, + "max_input_tokens": 8192, + "max_output_tokens": 8192, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/starcoder2-15b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/starcoder2-3b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 1e-07, + "output_cost_per_token": 1e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/starcoder2-7b": { + "max_tokens": 16384, + "max_input_tokens": 16384, + "max_output_tokens": 16384, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/toppy-m-7b": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/whisper-v3": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "audio_transcription" + }, + "fireworks_ai/accounts/fireworks/models/whisper-v3-turbo": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0, + "output_cost_per_token": 0.0, + "litellm_provider": "fireworks_ai", + "mode": "audio_transcription" + }, + "fireworks_ai/accounts/fireworks/models/yi-34b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/yi-34b-200k-capybara": { + "max_tokens": 200000, + "max_input_tokens": 200000, + "max_output_tokens": 200000, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/yi-34b-chat": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 9e-07, + "output_cost_per_token": 9e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/yi-6b": { + "max_tokens": 4096, + "max_input_tokens": 4096, + "max_output_tokens": 4096, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + }, + "fireworks_ai/accounts/fireworks/models/zephyr-7b-beta": { + "max_tokens": 32768, + "max_input_tokens": 32768, + "max_output_tokens": 32768, + "input_cost_per_token": 2e-07, + "output_cost_per_token": 2e-07, + "litellm_provider": "fireworks_ai", + "mode": "chat" + } +} diff --git a/backend/tools.go b/backend/tools.go new file mode 100644 index 00000000..f06d2c78 --- /dev/null +++ b/backend/tools.go @@ -0,0 +1,9 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "entgo.io/ent/cmd/ent" + _ "github.com/google/wire/cmd/wire" +) diff --git a/build_image.sh b/build_image.sh new file mode 100644 index 00000000..2cea4925 --- /dev/null +++ b/build_image.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# 本地构建镜像的快速脚本,避免在命令行反复输入构建参数。 + +docker build -t sub2api:latest \ + --build-arg GOPROXY=https://goproxy.cn,direct \ + --build-arg GOSUMDB=sum.golang.google.cn \ + -f Dockerfile \ + . diff --git a/config.yaml b/config.yaml new file mode 100644 index 00000000..424ce9eb --- /dev/null +++ b/config.yaml @@ -0,0 +1,506 @@ +# Sub2API Configuration File +# Sub2API 配置文件 +# +# Copy this file to /etc/sub2api/config.yaml and modify as needed +# 复制此文件到 /etc/sub2api/config.yaml 并根据需要修改 +# +# Documentation / 文档: https://github.com/Wei-Shaw/sub2api + +# ============================================================================= +# Server Configuration +# 服务器配置 +# ============================================================================= +server: + # Bind address (0.0.0.0 for all interfaces) + # 绑定地址(0.0.0.0 表示监听所有网络接口) + host: "0.0.0.0" + # Port to listen on + # 监听端口 + port: 8080 + # Mode: "debug" for development, "release" for production + # 运行模式:"debug" 用于开发,"release" 用于生产环境 + mode: "release" + # Trusted proxies for X-Forwarded-For parsing (CIDR/IP). Empty disables trusted proxies. + # 信任的代理地址(CIDR/IP 格式),用于解析 X-Forwarded-For 头。留空则禁用代理信任。 + trusted_proxies: [] + +# ============================================================================= +# Run Mode Configuration +# 运行模式配置 +# ============================================================================= +# Run mode: "standard" (default) or "simple" (for internal use) +# 运行模式:"standard"(默认)或 "simple"(内部使用) +# - standard: Full SaaS features with billing/balance checks +# - standard: 完整 SaaS 功能,包含计费和余额校验 +# - simple: Hides SaaS features and skips billing/balance checks +# - simple: 隐藏 SaaS 功能,跳过计费和余额校验 +run_mode: "standard" + +# ============================================================================= +# CORS Configuration +# 跨域资源共享 (CORS) 配置 +# ============================================================================= +cors: + # Allowed origins list. Leave empty to disable cross-origin requests. + # 允许的来源列表。留空则禁用跨域请求。 + allowed_origins: [] + # Allow credentials (cookies/authorization headers). Cannot be used with "*". + # 允许携带凭证(cookies/授权头)。不能与 "*" 通配符同时使用。 + allow_credentials: true + +# ============================================================================= +# Security Configuration +# 安全配置 +# ============================================================================= +security: + url_allowlist: + # Enable URL allowlist validation (disable to skip all URL checks) + # 启用 URL 白名单验证(禁用则跳过所有 URL 检查) + enabled: false + # Allowed upstream hosts for API proxying + # 允许代理的上游 API 主机列表 + upstream_hosts: + - "api.openai.com" + - "api.anthropic.com" + - "api.kimi.com" + - "open.bigmodel.cn" + - "api.minimaxi.com" + - "generativelanguage.googleapis.com" + - "cloudcode-pa.googleapis.com" + - "*.openai.azure.com" + # Allowed hosts for pricing data download + # 允许下载定价数据的主机列表 + pricing_hosts: + - "raw.githubusercontent.com" + # Allowed hosts for CRS sync (required when using CRS sync) + # 允许 CRS 同步的主机列表(使用 CRS 同步功能时必须配置) + crs_hosts: [] + # Allow localhost/private IPs for upstream/pricing/CRS (use only in trusted networks) + # 允许本地/私有 IP 地址用于上游/定价/CRS(仅在可信网络中使用) + allow_private_hosts: true + # Allow http:// URLs when allowlist is disabled (default: false, require https) + # 白名单禁用时是否允许 http:// URL(默认: false,要求 https) + allow_insecure_http: true + response_headers: + # Enable configurable response header filtering (disable to use default allowlist) + # 启用可配置的响应头过滤(禁用则使用默认白名单) + enabled: false + # Extra allowed response headers from upstream + # 额外允许的上游响应头 + additional_allowed: [] + # Force-remove response headers from upstream + # 强制移除的上游响应头 + force_remove: [] + csp: + # Enable Content-Security-Policy header + # 启用内容安全策略 (CSP) 响应头 + enabled: true + # Default CSP policy (override if you host assets on other domains) + # 默认 CSP 策略(如果静态资源托管在其他域名,请自行覆盖) + policy: "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" + proxy_probe: + # Allow skipping TLS verification for proxy probe (debug only) + # 允许代理探测时跳过 TLS 证书验证(仅用于调试) + insecure_skip_verify: false + +# ============================================================================= +# Gateway Configuration +# 网关配置 +# ============================================================================= +gateway: + # Timeout for waiting upstream response headers (seconds) + # 等待上游响应头超时时间(秒) + response_header_timeout: 600 + # Max request body size in bytes (default: 100MB) + # 请求体最大字节数(默认 100MB) + max_body_size: 104857600 + # Connection pool isolation strategy: + # 连接池隔离策略: + # - proxy: Isolate by proxy, same proxy shares connection pool (suitable for few proxies, many accounts) + # - proxy: 按代理隔离,同一代理共享连接池(适合代理少、账户多) + # - account: Isolate by account, same account shares connection pool (suitable for few accounts, strict isolation) + # - account: 按账户隔离,同一账户共享连接池(适合账户少、需严格隔离) + # - account_proxy: Isolate by account+proxy combination (default, finest granularity) + # - account_proxy: 按账户+代理组合隔离(默认,最细粒度) + connection_pool_isolation: "account_proxy" + # HTTP upstream connection pool settings (HTTP/2 + multi-proxy scenario defaults) + # HTTP 上游连接池配置(HTTP/2 + 多代理场景默认值) + # Max idle connections across all hosts + # 所有主机的最大空闲连接数 + max_idle_conns: 240 + # Max idle connections per host + # 每个主机的最大空闲连接数 + max_idle_conns_per_host: 120 + # Max connections per host + # 每个主机的最大连接数 + max_conns_per_host: 240 + # Idle connection timeout (seconds) + # 空闲连接超时时间(秒) + idle_conn_timeout_seconds: 90 + # Upstream client cache settings + # 上游连接池客户端缓存配置 + # max_upstream_clients: Max cached clients, evicts least recently used when exceeded + # max_upstream_clients: 最大缓存客户端数量,超出后淘汰最久未使用的 + max_upstream_clients: 5000 + # client_idle_ttl_seconds: Client idle reclaim threshold (seconds), reclaimed when idle and no active requests + # client_idle_ttl_seconds: 客户端空闲回收阈值(秒),超时且无活跃请求时回收 + client_idle_ttl_seconds: 900 + # Concurrency slot expiration time (minutes) + # 并发槽位过期时间(分钟) + concurrency_slot_ttl_minutes: 30 + # Stream data interval timeout (seconds), 0=disable + # 流数据间隔超时(秒),0=禁用 + stream_data_interval_timeout: 180 + # Stream keepalive interval (seconds), 0=disable + # 流式 keepalive 间隔(秒),0=禁用 + stream_keepalive_interval: 10 + # SSE max line size in bytes (default: 40MB) + # SSE 单行最大字节数(默认 40MB) + max_line_size: 41943040 + # Log upstream error response body summary (safe/truncated; does not log request content) + # 记录上游错误响应体摘要(安全/截断;不记录请求内容) + log_upstream_error_body: true + # Max bytes to log from upstream error body + # 记录上游错误响应体的最大字节数 + log_upstream_error_body_max_bytes: 2048 + # Auto inject anthropic-beta header for API-key accounts when needed (default: off) + # 需要时自动为 API-key 账户注入 anthropic-beta 头(默认:关闭) + inject_beta_for_apikey: false + # Allow failover on selected 400 errors (default: off) + # 允许在特定 400 错误时进行故障转移(默认:关闭) + failover_on_400: false + +# ============================================================================= +# API Key Auth Cache Configuration +# API Key 认证缓存配置 +# ============================================================================= +api_key_auth_cache: + # L1 cache size (entries), in-process LRU/TTL cache + # L1 缓存容量(条目数),进程内 LRU/TTL 缓存 + l1_size: 65535 + # L1 cache TTL (seconds) + # L1 缓存 TTL(秒) + l1_ttl_seconds: 15 + # L2 cache TTL (seconds), stored in Redis + # L2 缓存 TTL(秒),Redis 中存储 + l2_ttl_seconds: 300 + # Negative cache TTL (seconds) + # 负缓存 TTL(秒) + negative_ttl_seconds: 30 + # TTL jitter percent (0-100) + # TTL 抖动百分比(0-100) + jitter_percent: 10 + # Enable singleflight for cache misses + # 缓存未命中时启用 singleflight 合并回源 + singleflight: true + +# ============================================================================= +# Dashboard Cache Configuration +# 仪表盘缓存配置 +# ============================================================================= +dashboard_cache: + # Enable dashboard cache + # 启用仪表盘缓存 + enabled: true + # Redis key prefix for multi-environment isolation + # Redis key 前缀,用于多环境隔离 + key_prefix: "sub2api:" + # Fresh TTL (seconds); within this window cached stats are considered fresh + # 新鲜阈值(秒);命中后处于该窗口视为新鲜数据 + stats_fresh_ttl_seconds: 15 + # Cache TTL (seconds) stored in Redis + # Redis 缓存 TTL(秒) + stats_ttl_seconds: 30 + # Async refresh timeout (seconds) + # 异步刷新超时(秒) + stats_refresh_timeout_seconds: 30 + +# ============================================================================= +# Dashboard Aggregation Configuration +# 仪表盘预聚合配置(重启生效) +# ============================================================================= +dashboard_aggregation: + # Enable aggregation job + # 启用聚合作业 + enabled: true + # Refresh interval (seconds) + # 刷新间隔(秒) + interval_seconds: 60 + # Lookback window (seconds) for late-arriving data + # 回看窗口(秒),处理迟到数据 + lookback_seconds: 120 + # Allow manual backfill + # 允许手动回填 + backfill_enabled: false + # Backfill max range (days) + # 回填最大跨度(天) + backfill_max_days: 31 + # Recompute recent N days on startup + # 启动时重算最近 N 天 + recompute_days: 2 + # Retention windows (days) + # 保留窗口(天) + retention: + # Raw usage_logs retention + # 原始 usage_logs 保留天数 + usage_logs_days: 90 + # Hourly aggregation retention + # 小时聚合保留天数 + hourly_days: 180 + # Daily aggregation retention + # 日聚合保留天数 + daily_days: 730 + +# ============================================================================= +# Concurrency Wait Configuration +# 并发等待配置 +# ============================================================================= +concurrency: + # SSE ping interval during concurrency wait (seconds) + # 并发等待期间的 SSE ping 间隔(秒) + ping_interval: 10 + +# ============================================================================= +# Database Configuration (PostgreSQL) +# 数据库配置 (PostgreSQL) +# ============================================================================= +database: + # Database host address + # 数据库主机地址 + host: "localhost" + # Database port + # 数据库端口 + port: 5432 + # Database username + # 数据库用户名 + user: "postgres" + # Database password + # 数据库密码 + password: "your_secure_password_here" + # Database name + # 数据库名称 + dbname: "sub2api" + # SSL mode: disable, require, verify-ca, verify-full + # SSL 模式:disable(禁用), require(要求), verify-ca(验证CA), verify-full(完全验证) + sslmode: "disable" + +# ============================================================================= +# Redis Configuration +# Redis 配置 +# ============================================================================= +redis: + # Redis host address + # Redis 主机地址 + host: "localhost" + # Redis port + # Redis 端口 + port: 6379 + # Redis password (leave empty if no password is set) + # Redis 密码(如果未设置密码则留空) + password: "" + # Database number (0-15) + # 数据库编号(0-15) + db: 0 + +# ============================================================================= +# Ops Monitoring (Optional) +# 运维监控 (可选) +# ============================================================================= +ops: + # Hard switch: disable all ops background jobs and APIs when false + # 硬开关:为 false 时禁用所有 Ops 后台任务与接口 + enabled: true + + # Prefer pre-aggregated tables (ops_metrics_hourly/ops_metrics_daily) for long-window dashboard queries. + # 优先使用预聚合表(用于长时间窗口查询性能) + use_preaggregated_tables: false + + # Data cleanup configuration + # 数据清理配置(vNext 默认统一保留 30 天) + cleanup: + enabled: true + # Cron expression (minute hour dom month dow), e.g. "0 2 * * *" = daily at 2 AM + # Cron 表达式(分 时 日 月 周),例如 "0 2 * * *" = 每天凌晨 2 点 + schedule: "0 2 * * *" + error_log_retention_days: 30 + minute_metrics_retention_days: 30 + hourly_metrics_retention_days: 30 + + # Pre-aggregation configuration + # 预聚合任务配置 + aggregation: + enabled: true + + # OpsMetricsCollector Redis cache (reduces duplicate expensive window aggregation in multi-replica deployments) + # 指标采集 Redis 缓存(多副本部署时减少重复计算) + metrics_collector_cache: + enabled: true + ttl: 65s + +# ============================================================================= +# JWT Configuration +# JWT 配置 +# ============================================================================= +jwt: + # IMPORTANT: Change this to a random string in production! + # 重要:生产环境中请更改为随机字符串! + # Generate with / 生成命令: openssl rand -hex 32 + secret: "change-this-to-a-secure-random-string" + # Token expiration time in hours (max 24) + # 令牌过期时间(小时,最大 24) + expire_hour: 24 + +# ============================================================================= +# Default Settings +# 默认设置 +# ============================================================================= +default: + # Initial admin account (created on first run) + # 初始管理员账户(首次运行时创建) + admin_email: "admin@example.com" + admin_password: "admin123" + + # Default settings for new users + # 新用户默认设置 + # Max concurrent requests per user + # 每用户最大并发请求数 + user_concurrency: 5 + # Initial balance for new users + # 新用户初始余额 + user_balance: 0 + + # API key settings + # API 密钥设置 + # Prefix for generated API keys + # 生成的 API 密钥前缀 + api_key_prefix: "sk-" + + # Rate multiplier (affects billing calculation) + # 费率倍数(影响计费计算) + rate_multiplier: 1.0 + +# ============================================================================= +# Rate Limiting +# 速率限制 +# ============================================================================= +rate_limit: + # Cooldown time (in minutes) when upstream returns 529 (overloaded) + # 上游返回 529(过载)时的冷却时间(分钟) + overload_cooldown_minutes: 10 + +# ============================================================================= +# Pricing Data Source (Optional) +# 定价数据源(可选) +# ============================================================================= +pricing: + # URL to fetch model pricing data (default: LiteLLM) + # 获取模型定价数据的 URL(默认:LiteLLM) + remote_url: "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" + # Hash verification URL (optional) + # 哈希校验 URL(可选) + hash_url: "" + # Local data directory for caching + # 本地数据缓存目录 + data_dir: "./data" + # Fallback pricing file + # 备用定价文件 + fallback_file: "./resources/model-pricing/model_prices_and_context_window.json" + # Update interval in hours + # 更新间隔(小时) + update_interval_hours: 24 + # Hash check interval in minutes + # 哈希检查间隔(分钟) + hash_check_interval_minutes: 10 + +# ============================================================================= +# Billing Configuration +# 计费配置 +# ============================================================================= +billing: + circuit_breaker: + # Enable circuit breaker for billing service + # 启用计费服务熔断器 + enabled: true + # Number of failures before opening circuit + # 触发熔断的失败次数阈值 + failure_threshold: 5 + # Time to wait before attempting reset (seconds) + # 熔断后重试等待时间(秒) + reset_timeout_seconds: 30 + # Number of requests to allow in half-open state + # 半开状态允许通过的请求数 + half_open_requests: 3 + +# ============================================================================= +# Turnstile Configuration +# Turnstile 人机验证配置 +# ============================================================================= +turnstile: + # Require Turnstile in release mode (when enabled, login/register will fail if not configured) + # 在 release 模式下要求 Turnstile 验证(启用后,若未配置则登录/注册会失败) + required: false + +# ============================================================================= +# Gemini OAuth (Required for Gemini accounts) +# Gemini OAuth 配置(Gemini 账户必需) +# ============================================================================= +# Sub2API supports TWO Gemini OAuth modes: +# Sub2API 支持两种 Gemini OAuth 模式: +# +# 1. Code Assist OAuth (requires GCP project_id) +# 1. Code Assist OAuth(需要 GCP project_id) +# - Uses: cloudcode-pa.googleapis.com (Code Assist API) +# - 使用:cloudcode-pa.googleapis.com(Code Assist API) +# +# 2. AI Studio OAuth (no project_id needed) +# 2. AI Studio OAuth(不需要 project_id) +# - Uses: generativelanguage.googleapis.com (AI Studio API) +# - 使用:generativelanguage.googleapis.com(AI Studio API) +# +# Default: Uses Gemini CLI's public OAuth credentials (same as Google's official CLI tool) +# 默认:使用 Gemini CLI 的公开 OAuth 凭证(与 Google 官方 CLI 工具相同) +gemini: + oauth: + # Gemini CLI public OAuth credentials (works for both Code Assist and AI Studio) + # Gemini CLI 公开 OAuth 凭证(适用于 Code Assist 和 AI Studio) + client_id: "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com" + client_secret: "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" + # Optional scopes (space-separated). Leave empty to auto-select based on oauth_type. + # 可选的权限范围(空格分隔)。留空则根据 oauth_type 自动选择。 + scopes: "" + quota: + # Optional: local quota simulation for Gemini Code Assist (local billing). + # 可选:Gemini Code Assist 本地配额模拟(本地计费)。 + # These values are used for UI progress + precheck scheduling, not official Google quotas. + # 这些值用于 UI 进度显示和预检调度,并非 Google 官方配额。 + tiers: + LEGACY: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 50 + # Flash model requests per day + # Flash 模型每日请求数 + flash_rpd: 1500 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 30 + PRO: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 1500 + # Flash model requests per day + # Flash 模型每日请求数 + flash_rpd: 4000 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 5 + ULTRA: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 2000 + # Flash model requests per day (0 = unlimited) + # Flash 模型每日请求数(0 = 无限制) + flash_rpd: 0 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 5 diff --git a/deploy/.env.example b/deploy/.env.example new file mode 100644 index 00000000..f21a3c62 --- /dev/null +++ b/deploy/.env.example @@ -0,0 +1,218 @@ +# ============================================================================= +# Sub2API Docker Environment Configuration +# ============================================================================= +# Copy this file to .env and modify as needed: +# cp .env.example .env +# nano .env +# +# Then start with: docker-compose up -d +# ============================================================================= + +# ----------------------------------------------------------------------------- +# Server Configuration +# ----------------------------------------------------------------------------- +# Bind address for host port mapping +BIND_HOST=0.0.0.0 + +# Server port (exposed on host) +SERVER_PORT=8080 + +# Server mode: release or debug +SERVER_MODE=release + +# 运行模式: standard (默认) 或 simple (内部自用) +# standard: 完整 SaaS 功能,包含计费/余额校验;simple: 隐藏 SaaS 功能并跳过计费/余额校验 +RUN_MODE=standard + +# Timezone +TZ=Asia/Shanghai + +# ----------------------------------------------------------------------------- +# PostgreSQL Configuration (REQUIRED) +# ----------------------------------------------------------------------------- +POSTGRES_USER=sub2api +POSTGRES_PASSWORD=change_this_secure_password +POSTGRES_DB=sub2api + +# ----------------------------------------------------------------------------- +# Redis Configuration +# ----------------------------------------------------------------------------- +# Leave empty for no password (default for local development) +REDIS_PASSWORD= +REDIS_DB=0 + +# ----------------------------------------------------------------------------- +# Admin Account +# ----------------------------------------------------------------------------- +# Email for the admin account +ADMIN_EMAIL=admin@sub2api.local + +# Password for admin account +# Leave empty to auto-generate (will be shown in logs on first run) +ADMIN_PASSWORD= + +# ----------------------------------------------------------------------------- +# JWT Configuration +# ----------------------------------------------------------------------------- +# IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being +# invalidated after container restarts. If left empty, a random secret will +# be generated on each startup, causing all users to be logged out. +# Generate a secure secret: openssl rand -hex 32 +JWT_SECRET= +JWT_EXPIRE_HOUR=24 + +# ----------------------------------------------------------------------------- +# Configuration File (Optional) +# ----------------------------------------------------------------------------- +# Path to custom config file (relative to docker-compose.yml directory) +# Copy config.example.yaml to config.yaml and modify as needed +# Leave unset to use default ./config.yaml +#CONFIG_FILE=./config.yaml + +# ----------------------------------------------------------------------------- +# Rate Limiting (Optional) +# 速率限制(可选) +# ----------------------------------------------------------------------------- +# Cooldown time (in minutes) when upstream returns 529 (overloaded) +# 上游返回 529(过载)时的冷却时间(分钟) +RATE_LIMIT_OVERLOAD_COOLDOWN_MINUTES=10 + +# ----------------------------------------------------------------------------- +# Gateway Scheduling (Optional) +# 调度缓存与受控回源配置(缓存就绪且命中时不读 DB) +# ----------------------------------------------------------------------------- +# 粘性会话最大排队长度 +GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING=3 +# 粘性会话等待超时(时间段,例如 45s) +GATEWAY_SCHEDULING_STICKY_SESSION_WAIT_TIMEOUT=120s +# 兜底排队等待超时(时间段,例如 30s) +GATEWAY_SCHEDULING_FALLBACK_WAIT_TIMEOUT=30s +# 兜底最大排队长度 +GATEWAY_SCHEDULING_FALLBACK_MAX_WAITING=100 +# 启用调度批量负载计算 +GATEWAY_SCHEDULING_LOAD_BATCH_ENABLED=true +# 并发槽位清理周期(时间段,例如 30s) +GATEWAY_SCHEDULING_SLOT_CLEANUP_INTERVAL=30s +# 是否允许受控回源到 DB(默认 true,保持现有行为) +GATEWAY_SCHEDULING_DB_FALLBACK_ENABLED=true +# 受控回源超时(秒),0 表示不额外收紧超时 +GATEWAY_SCHEDULING_DB_FALLBACK_TIMEOUT_SECONDS=0 +# 受控回源限流(实例级 QPS),0 表示不限制 +GATEWAY_SCHEDULING_DB_FALLBACK_MAX_QPS=0 +# outbox 轮询周期(秒) +GATEWAY_SCHEDULING_OUTBOX_POLL_INTERVAL_SECONDS=1 +# outbox 滞后告警阈值(秒) +GATEWAY_SCHEDULING_OUTBOX_LAG_WARN_SECONDS=5 +# outbox 触发强制重建阈值(秒) +GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_SECONDS=10 +# outbox 连续滞后触发次数 +GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_FAILURES=3 +# outbox 积压触发重建阈值(行数) +GATEWAY_SCHEDULING_OUTBOX_BACKLOG_REBUILD_ROWS=10000 +# 全量重建周期(秒) +GATEWAY_SCHEDULING_FULL_REBUILD_INTERVAL_SECONDS=300 + +# ----------------------------------------------------------------------------- +# Dashboard Aggregation (Optional) +# ----------------------------------------------------------------------------- +# Enable aggregation job +# 启用仪表盘预聚合 +DASHBOARD_AGGREGATION_ENABLED=true +# Refresh interval (seconds) +# 刷新间隔(秒) +DASHBOARD_AGGREGATION_INTERVAL_SECONDS=60 +# Lookback window (seconds) +# 回看窗口(秒) +DASHBOARD_AGGREGATION_LOOKBACK_SECONDS=120 +# Allow manual backfill +# 允许手动回填 +DASHBOARD_AGGREGATION_BACKFILL_ENABLED=false +# Backfill max range (days) +# 回填最大跨度(天) +DASHBOARD_AGGREGATION_BACKFILL_MAX_DAYS=31 +# Recompute recent N days on startup +# 启动时重算最近 N 天 +DASHBOARD_AGGREGATION_RECOMPUTE_DAYS=2 +# Retention windows (days) +# 保留窗口(天) +DASHBOARD_AGGREGATION_RETENTION_USAGE_LOGS_DAYS=90 +DASHBOARD_AGGREGATION_RETENTION_HOURLY_DAYS=180 +DASHBOARD_AGGREGATION_RETENTION_DAILY_DAYS=730 + +# ----------------------------------------------------------------------------- +# Security Configuration +# ----------------------------------------------------------------------------- +# URL Allowlist Configuration +# 启用 URL 白名单验证(false 则跳过白名单检查,仅做基本格式校验) +SECURITY_URL_ALLOWLIST_ENABLED=false + +# 关闭白名单时,是否允许 http:// URL(默认 false,只允许 https://) +# ⚠️ 警告:允许 HTTP 存在安全风险(明文传输),仅建议在开发/测试环境或可信内网中使用 +# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) +# ⚠️ WARNING: Allowing HTTP has security risks (plaintext transmission) +# Only recommended for dev/test environments or trusted networks +SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true + +# 是否允许本地/私有 IP 地址用于上游/定价/CRS(仅在可信网络中使用) +# Allow localhost/private IPs for upstream/pricing/CRS (use only in trusted networks) +SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=true + +# ----------------------------------------------------------------------------- +# Gemini OAuth (OPTIONAL, required only for Gemini OAuth accounts) +# ----------------------------------------------------------------------------- +# Sub2API supports TWO Gemini OAuth modes: +# +# 1. Code Assist OAuth (需要 GCP project_id) +# - Uses: cloudcode-pa.googleapis.com (Code Assist API) +# - Auto scopes: cloud-platform + userinfo.email + userinfo.profile +# - OAuth Client: Can use built-in Gemini CLI client (留空即可) +# - Requires: Google Cloud Platform project with Code Assist enabled +# +# 2. AI Studio OAuth (不需要 project_id) +# - Uses: generativelanguage.googleapis.com (AI Studio API) +# - Default scopes: generative-language +# - OAuth Client: Requires your own OAuth 2.0 Client (内置 Gemini CLI client 不能申请 generative-language scope) +# - Requires: Create OAuth 2.0 Client in GCP Console + OAuth consent screen +# - Setup Guide: https://ai.google.dev/gemini-api/docs/oauth +# - ⚠️ IMPORTANT: OAuth Client 必须发布为正式版本 (Production) +# Testing 模式限制: 只能添加 100 个测试用户, refresh token 7 天后过期 +# 发布步骤: GCP Console → OAuth consent screen → PUBLISH APP +# +# Configuration: +# Leave empty to use the built-in Gemini CLI OAuth client (Code Assist OAuth only). +# To enable AI Studio OAuth, set your own OAuth client ID/secret here. +GEMINI_OAUTH_CLIENT_ID= +GEMINI_OAUTH_CLIENT_SECRET= +# Optional; leave empty to auto-select scopes based on oauth_type +GEMINI_OAUTH_SCOPES= + +# ----------------------------------------------------------------------------- +# Gemini Quota Policy (OPTIONAL, local simulation) +# ----------------------------------------------------------------------------- +# JSON overrides for local quota simulation (Code Assist only). +# Example: +# GEMINI_QUOTA_POLICY={"tiers":{"LEGACY":{"pro_rpd":50,"flash_rpd":1500,"cooldown_minutes":30},"PRO":{"pro_rpd":1500,"flash_rpd":4000,"cooldown_minutes":5},"ULTRA":{"pro_rpd":2000,"flash_rpd":0,"cooldown_minutes":5}}} +GEMINI_QUOTA_POLICY= + +# ----------------------------------------------------------------------------- +# Ops Monitoring Configuration (运维监控配置) +# ----------------------------------------------------------------------------- +# Enable ops monitoring features (background jobs and APIs) +# 是否启用运维监控功能(后台任务和接口) +# Set to false to hide ops menu in sidebar and disable all ops features +# 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能 +OPS_ENABLED=true + +# ----------------------------------------------------------------------------- +# Update Configuration (在线更新配置) +# ----------------------------------------------------------------------------- +# Proxy URL for accessing GitHub (used for online updates and pricing data) +# 用于访问 GitHub 的代理地址(用于在线更新和定价数据获取) +# Supports: http, https, socks5, socks5h +# Examples: +# HTTP proxy: http://127.0.0.1:7890 +# SOCKS5 proxy: socks5://127.0.0.1:1080 +# With authentication: http://user:pass@proxy.example.com:8080 +# Leave empty for direct connection (recommended for overseas servers) +# 留空表示直连(适用于海外服务器) +UPDATE_PROXY_URL= diff --git a/deploy/Caddyfile b/deploy/Caddyfile new file mode 100644 index 00000000..3aeef51a --- /dev/null +++ b/deploy/Caddyfile @@ -0,0 +1,188 @@ +# ============================================================================= +# Sub2API Caddy Reverse Proxy Configuration (宿主机部署) +# ============================================================================= +# 使用方法: +# 1. 安装 Caddy: https://caddyserver.com/docs/install +# 2. 修改下方 example.com 为你的域名 +# 3. 确保域名 DNS 已指向服务器 +# 4. 复制配置: sudo cp Caddyfile /etc/caddy/Caddyfile +# 5. 重载配置: sudo systemctl reload caddy +# +# Caddy 会自动申请和续期 Let's Encrypt SSL 证书 +# ============================================================================= + +# 全局配置 +{ + # Let's Encrypt 邮箱通知 + email admin@example.com + + # 服务器配置 + servers { + # 启用 HTTP/2 和 HTTP/3 + protocols h1 h2 h3 + + # 超时配置 + timeouts { + read_body 30s + read_header 10s + write 300s + idle 300s + } + } +} + +# 修改为你的域名 +example.com { + # ========================================================================= + # TLS 安全配置 + # ========================================================================= + tls { + # 仅使用 TLS 1.2 和 1.3 + protocols tls1.2 tls1.3 + + # 优先使用的加密套件 + ciphers TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + } + + # ========================================================================= + # 反向代理配置 + # ========================================================================= + reverse_proxy localhost:8080 { + # 健康检查 + health_uri /health + health_interval 30s + health_timeout 10s + health_status 200 + + # 负载均衡策略(单节点可忽略,多节点时有用) + lb_policy round_robin + lb_try_duration 5s + lb_try_interval 250ms + + # 传递真实客户端信息 + # 兼容 Cloudflare 和直连:后端应优先读取 CF-Connecting-IP,其次 X-Real-IP + header_up X-Real-IP {remote_host} + header_up X-Forwarded-For {remote_host} + header_up X-Forwarded-Proto {scheme} + header_up X-Forwarded-Host {host} + # 保留 Cloudflare 原始头(如果存在) + # 后端获取 IP 的优先级建议: CF-Connecting-IP → X-Real-IP → X-Forwarded-For + header_up CF-Connecting-IP {http.request.header.CF-Connecting-IP} + + # 连接池优化 + transport http { + keepalive 120s + keepalive_idle_conns 256 + read_buffer 16KB + write_buffer 16KB + compression off + } + + # SSE/流式传输优化:禁用响应缓冲,立即刷新数据给客户端 + flush_interval -1 + + # 故障转移 + fail_duration 30s + max_fails 3 + unhealthy_status 500 502 503 504 + } + + # ========================================================================= + # 压缩配置 + # ========================================================================= + encode { + zstd + gzip 6 + minimum_length 256 + match { + # SSE 请求通常会带 Accept: text/event-stream,需排除压缩 + not header Accept text/event-stream* + # 排除已知 SSE 路径(即便 Accept 缺失) + not path /v1/messages /v1/responses /responses /antigravity/v1/messages /v1beta/models/* /antigravity/v1beta/models/* + header Content-Type text/* + header Content-Type application/json* + header Content-Type application/javascript* + header Content-Type application/xml* + header Content-Type application/rss+xml* + header Content-Type image/svg+xml* + } + } + + # ========================================================================= + # 速率限制 (需要 caddy-ratelimit 插件) + # 如未安装插件,请注释掉此段 + # ========================================================================= + # rate_limit { + # zone api { + # key {remote_host} + # events 100 + # window 1m + # } + # } + + # ========================================================================= + # 安全响应头 + # ========================================================================= + header { + # 防止点击劫持 + X-Frame-Options "SAMEORIGIN" + + # XSS 保护 + X-XSS-Protection "1; mode=block" + + # 防止 MIME 类型嗅探 + X-Content-Type-Options "nosniff" + + # 引用策略 + Referrer-Policy "strict-origin-when-cross-origin" + + # HSTS - 强制 HTTPS (max-age=1年) + Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" + + # 内容安全策略 (根据需要调整) + # Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:; connect-src 'self' https:;" + + # 权限策略 + Permissions-Policy "accelerometer=(), camera=(), geolocation=(), gyroscope=(), magnetometer=(), microphone=(), payment=(), usb=()" + + # 跨域资源策略 + Cross-Origin-Opener-Policy "same-origin" + Cross-Origin-Embedder-Policy "require-corp" + Cross-Origin-Resource-Policy "same-origin" + + # 移除敏感头 + -Server + -X-Powered-By + } + + # ========================================================================= + # 请求大小限制 (防止大文件攻击) + # ========================================================================= + request_body { + max_size 100MB + } + + # ========================================================================= + # 日志配置 + # ========================================================================= + log { + output file /var/log/caddy/sub2api.log { + roll_size 50mb + roll_keep 10 + roll_keep_for 720h + } + format json + level INFO + } + + # ========================================================================= + # 错误处理 + # ========================================================================= + handle_errors { + respond "{err.status_code} {err.status_text}" + } +} + +# ============================================================================= +# HTTP 重定向到 HTTPS (Caddy 默认自动处理,此处显式声明) +# ============================================================================= diff --git a/deploy/DOCKER.md b/deploy/DOCKER.md new file mode 100644 index 00000000..156b6c97 --- /dev/null +++ b/deploy/DOCKER.md @@ -0,0 +1,76 @@ +# Sub2API Docker Image + +Sub2API is an AI API Gateway Platform for distributing and managing AI product subscription API quotas. + +## Quick Start + +```bash +docker run -d \ + --name sub2api \ + -p 8080:8080 \ + -e DATABASE_URL="postgres://user:pass@host:5432/sub2api" \ + -e REDIS_URL="redis://host:6379" \ + weishaw/sub2api:latest +``` + +## Docker Compose + +```yaml +version: '3.8' + +services: + sub2api: + image: weishaw/sub2api:latest + ports: + - "8080:8080" + environment: + - DATABASE_URL=postgres://postgres:postgres@db:5432/sub2api?sslmode=disable + - REDIS_URL=redis://redis:6379 + depends_on: + - db + - redis + + db: + image: postgres:15-alpine + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=sub2api + volumes: + - postgres_data:/var/lib/postgresql/data + + redis: + image: redis:7-alpine + volumes: + - redis_data:/data + +volumes: + postgres_data: + redis_data: +``` + +## Environment Variables + +| Variable | Description | Required | Default | +|----------|-------------|----------|---------| +| `DATABASE_URL` | PostgreSQL connection string | Yes | - | +| `REDIS_URL` | Redis connection string | Yes | - | +| `PORT` | Server port | No | `8080` | +| `GIN_MODE` | Gin framework mode (`debug`/`release`) | No | `release` | + +## Supported Architectures + +- `linux/amd64` +- `linux/arm64` + +## Tags + +- `latest` - Latest stable release +- `x.y.z` - Specific version +- `x.y` - Latest patch of minor version +- `x` - Latest minor of major version + +## Links + +- [GitHub Repository](https://github.com/weishaw/sub2api) +- [Documentation](https://github.com/weishaw/sub2api#readme) diff --git a/deploy/Makefile b/deploy/Makefile new file mode 100644 index 00000000..2c6d5217 --- /dev/null +++ b/deploy/Makefile @@ -0,0 +1,41 @@ +.PHONY: wire build build-embed test-unit test-integration test-e2e test-cover-integration + +wire: + @echo "生成 Wire 代码..." + @cd cmd/server && go generate + @echo "Wire 代码生成完成" + +build: + @echo "构建后端(不嵌入前端)..." + @go build -o bin/server ./cmd/server + @echo "构建完成: bin/server" + +build-embed: + @echo "构建后端(嵌入前端)..." + @go build -tags embed -o bin/server ./cmd/server + @echo "构建完成: bin/server (with embedded frontend)" + +test-unit: + @go test -tags unit ./... -count=1 + +test-integration: + @go test -tags integration ./... -count=1 -race -parallel=8 + +test-e2e: + @echo "运行 E2E 测试(需要本地服务器运行)..." + @go test -tags e2e ./internal/integration/... -count=1 -v + +test-cover-integration: + @echo "运行集成测试并生成覆盖率报告..." + @go test -tags=integration -cover -coverprofile=coverage.out -count=1 -race -parallel=8 ./... + @go tool cover -func=coverage.out | tail -1 + @go tool cover -html=coverage.out -o coverage.html + @echo "覆盖率报告已生成: coverage.html" + +clean-coverage: + @rm -f coverage.out coverage.html + @echo "覆盖率文件已清理" + +clean: clean-coverage + @rm -rf bin/ + @echo "构建产物已清理" \ No newline at end of file diff --git a/deploy/README.md b/deploy/README.md new file mode 100644 index 00000000..f697247d --- /dev/null +++ b/deploy/README.md @@ -0,0 +1,403 @@ +# Sub2API Deployment Files + +This directory contains files for deploying Sub2API on Linux servers. + +## Deployment Methods + +| Method | Best For | Setup Wizard | +|--------|----------|--------------| +| **Docker Compose** | Quick setup, all-in-one | Not needed (auto-setup) | +| **Binary Install** | Production servers, systemd | Web-based wizard | + +## Files + +| File | Description | +|------|-------------| +| `docker-compose.yml` | Docker Compose configuration | +| `.env.example` | Docker environment variables template | +| `DOCKER.md` | Docker Hub documentation | +| `install.sh` | One-click binary installation script | +| `sub2api.service` | Systemd service unit file | +| `config.example.yaml` | Example configuration file | + +--- + +## Docker Deployment (Recommended) + +### Quick Start + +```bash +# Clone repository +git clone https://github.com/Wei-Shaw/sub2api.git +cd sub2api/deploy + +# Configure environment +cp .env.example .env +nano .env # Set POSTGRES_PASSWORD (required) + +# Start all services +docker-compose up -d + +# View logs (check for auto-generated admin password) +docker-compose logs -f sub2api + +# Access Web UI +# http://localhost:8080 +``` + +### How Auto-Setup Works + +When using Docker Compose with `AUTO_SETUP=true`: + +1. On first run, the system automatically: + - Connects to PostgreSQL and Redis + - Applies database migrations (SQL files in `backend/migrations/*.sql`) and records them in `schema_migrations` + - Generates JWT secret (if not provided) + - Creates admin account (password auto-generated if not provided) + - Writes config.yaml + +2. No manual Setup Wizard needed - just configure `.env` and start + +3. If `ADMIN_PASSWORD` is not set, check logs for the generated password: + ```bash + docker-compose logs sub2api | grep "admin password" + ``` + +### Database Migration Notes (PostgreSQL) + +- Migrations are applied in lexicographic order (e.g. `001_...sql`, `002_...sql`). +- `schema_migrations` tracks applied migrations (filename + checksum). +- Migrations are forward-only; rollback requires a DB backup restore or a manual compensating SQL script. + +**Verify `users.allowed_groups` → `user_allowed_groups` backfill** + +During the incremental GORM→Ent migration, `users.allowed_groups` (legacy `BIGINT[]`) is being replaced by a normalized join table `user_allowed_groups(user_id, group_id)`. + +Run this query to compare the legacy data vs the join table: + +```sql +WITH old_pairs AS ( + SELECT DISTINCT u.id AS user_id, x.group_id + FROM users u + CROSS JOIN LATERAL unnest(u.allowed_groups) AS x(group_id) + WHERE u.allowed_groups IS NOT NULL +) +SELECT + (SELECT COUNT(*) FROM old_pairs) AS old_pair_count, + (SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count; +``` + +### Commands + +```bash +# Start services +docker-compose up -d + +# Stop services +docker-compose down + +# View logs +docker-compose logs -f sub2api + +# Restart Sub2API only +docker-compose restart sub2api + +# Update to latest version +docker-compose pull +docker-compose up -d + +# Remove all data (caution!) +docker-compose down -v +``` + +### Environment Variables + +| Variable | Required | Default | Description | +|----------|----------|---------|-------------| +| `POSTGRES_PASSWORD` | **Yes** | - | PostgreSQL password | +| `SERVER_PORT` | No | `8080` | Server port | +| `ADMIN_EMAIL` | No | `admin@sub2api.local` | Admin email | +| `ADMIN_PASSWORD` | No | *(auto-generated)* | Admin password | +| `JWT_SECRET` | No | *(auto-generated)* | JWT secret | +| `TZ` | No | `Asia/Shanghai` | Timezone | +| `GEMINI_OAUTH_CLIENT_ID` | No | *(builtin)* | Google OAuth client ID (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | +| `GEMINI_OAUTH_CLIENT_SECRET` | No | *(builtin)* | Google OAuth client secret (Gemini OAuth). Leave empty to use the built-in Gemini CLI client. | +| `GEMINI_OAUTH_SCOPES` | No | *(default)* | OAuth scopes (Gemini OAuth) | +| `GEMINI_QUOTA_POLICY` | No | *(empty)* | JSON overrides for Gemini local quota simulation (Code Assist only). | + +See `.env.example` for all available options. + +--- + +## Gemini OAuth Configuration + +Sub2API supports three methods to connect to Gemini: + +### Method 1: Code Assist OAuth (Recommended for GCP Users) + +**No configuration needed** - always uses the built-in Gemini CLI OAuth client (public). + +1. Leave `GEMINI_OAUTH_CLIENT_ID` and `GEMINI_OAUTH_CLIENT_SECRET` empty +2. In the Admin UI, create a Gemini OAuth account and select **"Code Assist"** type +3. Complete the OAuth flow in your browser + +> Note: Even if you configure `GEMINI_OAUTH_CLIENT_ID` / `GEMINI_OAUTH_CLIENT_SECRET` for AI Studio OAuth, +> Code Assist OAuth will still use the built-in Gemini CLI client. + +**Requirements:** +- Google account with access to Google Cloud Platform +- A GCP project (auto-detected or manually specified) + +**How to get Project ID (if auto-detection fails):** +1. Go to [Google Cloud Console](https://console.cloud.google.com/) +2. Click the project dropdown at the top of the page +3. Copy the Project ID (not the project name) from the list +4. Common formats: `my-project-123456` or `cloud-ai-companion-xxxxx` + +### Method 2: AI Studio OAuth (For Regular Google Accounts) + +Requires your own OAuth client credentials. + +**Step 1: Create OAuth Client in Google Cloud Console** + +1. Go to [Google Cloud Console - Credentials](https://console.cloud.google.com/apis/credentials) +2. Create a new project or select an existing one +3. **Enable the Generative Language API:** + - Go to "APIs & Services" → "Library" + - Search for "Generative Language API" + - Click "Enable" +4. **Configure OAuth Consent Screen** (if not done): + - Go to "APIs & Services" → "OAuth consent screen" + - Choose "External" user type + - Fill in app name, user support email, developer contact + - Add scopes: `https://www.googleapis.com/auth/generative-language.retriever` (and optionally `https://www.googleapis.com/auth/cloud-platform`) + - Add test users (your Google account email) +5. **Create OAuth 2.0 credentials:** + - Go to "APIs & Services" → "Credentials" + - Click "Create Credentials" → "OAuth client ID" + - Application type: **Web application** (or **Desktop app**) + - Name: e.g., "Sub2API Gemini" + - Authorized redirect URIs: Add `http://localhost:1455/auth/callback` +6. Copy the **Client ID** and **Client Secret** +7. **⚠️ Publish to Production (IMPORTANT):** + - Go to "APIs & Services" → "OAuth consent screen" + - Click "PUBLISH APP" to move from Testing to Production + - **Testing mode limitations:** + - Only manually added test users can authenticate (max 100 users) + - Refresh tokens expire after 7 days + - Users must be re-added periodically + - **Production mode:** Any Google user can authenticate, tokens don't expire + - Note: For sensitive scopes, Google may require verification (demo video, privacy policy) + +**Step 2: Configure Environment Variables** + +```bash +GEMINI_OAUTH_CLIENT_ID=your-client-id.apps.googleusercontent.com +GEMINI_OAUTH_CLIENT_SECRET=GOCSPX-your-client-secret +``` + +**Step 3: Create Account in Admin UI** + +1. Create a Gemini OAuth account and select **"AI Studio"** type +2. Complete the OAuth flow + - After consent, your browser will be redirected to `http://localhost:1455/auth/callback?code=...&state=...` + - Copy the full callback URL (recommended) or just the `code` and paste it back into the Admin UI + +### Method 3: API Key (Simplest) + +1. Go to [Google AI Studio](https://aistudio.google.com/app/apikey) +2. Click "Create API key" +3. In Admin UI, create a Gemini **API Key** account +4. Paste your API key (starts with `AIza...`) + +### Comparison Table + +| Feature | Code Assist OAuth | AI Studio OAuth | API Key | +|---------|-------------------|-----------------|---------| +| Setup Complexity | Easy (no config) | Medium (OAuth client) | Easy | +| GCP Project Required | Yes | No | No | +| Custom OAuth Client | No (built-in) | Yes (required) | N/A | +| Rate Limits | GCP quota | Standard | Standard | +| Best For | GCP developers | Regular users needing OAuth | Quick testing | + +--- + +## Binary Installation + +For production servers using systemd. + +### One-Line Installation + +```bash +curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash +``` + +### Manual Installation + +1. Download the latest release from [GitHub Releases](https://github.com/Wei-Shaw/sub2api/releases) +2. Extract and copy the binary to `/opt/sub2api/` +3. Copy `sub2api.service` to `/etc/systemd/system/` +4. Run: + ```bash + sudo systemctl daemon-reload + sudo systemctl enable sub2api + sudo systemctl start sub2api + ``` +5. Open the Setup Wizard in your browser to complete configuration + +### Commands + +```bash +# Install +sudo ./install.sh + +# Upgrade +sudo ./install.sh upgrade + +# Uninstall +sudo ./install.sh uninstall +``` + +### Service Management + +```bash +# Start the service +sudo systemctl start sub2api + +# Stop the service +sudo systemctl stop sub2api + +# Restart the service +sudo systemctl restart sub2api + +# Check status +sudo systemctl status sub2api + +# View logs +sudo journalctl -u sub2api -f + +# Enable auto-start on boot +sudo systemctl enable sub2api +``` + +### Configuration + +#### Server Address and Port + +During installation, you will be prompted to configure the server listen address and port. These settings are stored in the systemd service file as environment variables. + +To change after installation: + +1. Edit the systemd service: + ```bash + sudo systemctl edit sub2api + ``` + +2. Add or modify: + ```ini + [Service] + Environment=SERVER_HOST=0.0.0.0 + Environment=SERVER_PORT=3000 + ``` + +3. Reload and restart: + ```bash + sudo systemctl daemon-reload + sudo systemctl restart sub2api + ``` + +#### Gemini OAuth Configuration + +If you need to use AI Studio OAuth for Gemini accounts, add the OAuth client credentials to the systemd service file: + +1. Edit the service file: + ```bash + sudo nano /etc/systemd/system/sub2api.service + ``` + +2. Add your OAuth credentials in the `[Service]` section (after the existing `Environment=` lines): + ```ini + Environment=GEMINI_OAUTH_CLIENT_ID=your-client-id.apps.googleusercontent.com + Environment=GEMINI_OAUTH_CLIENT_SECRET=GOCSPX-your-client-secret + ``` + +3. Reload and restart: + ```bash + sudo systemctl daemon-reload + sudo systemctl restart sub2api + ``` + +> **Note:** Code Assist OAuth does not require any configuration - it uses the built-in Gemini CLI client. +> See the [Gemini OAuth Configuration](#gemini-oauth-configuration) section above for detailed setup instructions. + +#### Application Configuration + +The main config file is at `/etc/sub2api/config.yaml` (created by Setup Wizard). + +### Prerequisites + +- Linux server (Ubuntu 20.04+, Debian 11+, CentOS 8+, etc.) +- PostgreSQL 14+ +- Redis 6+ +- systemd + +### Directory Structure + +``` +/opt/sub2api/ +├── sub2api # Main binary +├── sub2api.backup # Backup (after upgrade) +└── data/ # Runtime data + +/etc/sub2api/ +└── config.yaml # Configuration file +``` + +--- + +## Troubleshooting + +### Docker + +```bash +# Check container status +docker-compose ps + +# View detailed logs +docker-compose logs --tail=100 sub2api + +# Check database connection +docker-compose exec postgres pg_isready + +# Check Redis connection +docker-compose exec redis redis-cli ping + +# Restart all services +docker-compose restart +``` + +### Binary Install + +```bash +# Check service status +sudo systemctl status sub2api + +# View recent logs +sudo journalctl -u sub2api -n 50 + +# Check config file +sudo cat /etc/sub2api/config.yaml + +# Check PostgreSQL +sudo systemctl status postgresql + +# Check Redis +sudo systemctl status redis +``` + +### Common Issues + +1. **Port already in use**: Change `SERVER_PORT` in `.env` or systemd config +2. **Database connection failed**: Check PostgreSQL is running and credentials are correct +3. **Redis connection failed**: Check Redis is running and password is correct +4. **Permission denied**: Ensure proper file ownership for binary install diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml new file mode 100644 index 00000000..ce2439f4 --- /dev/null +++ b/deploy/config.example.yaml @@ -0,0 +1,563 @@ +# Sub2API Configuration File +# Sub2API 配置文件 +# +# Copy this file to /etc/sub2api/config.yaml and modify as needed +# 复制此文件到 /etc/sub2api/config.yaml 并根据需要修改 +# +# Documentation / 文档: https://github.com/Wei-Shaw/sub2api + +# ============================================================================= +# Server Configuration +# 服务器配置 +# ============================================================================= +server: + # Bind address (0.0.0.0 for all interfaces) + # 绑定地址(0.0.0.0 表示监听所有网络接口) + host: "0.0.0.0" + # Port to listen on + # 监听端口 + port: 8080 + # Mode: "debug" for development, "release" for production + # 运行模式:"debug" 用于开发,"release" 用于生产环境 + mode: "release" + # Trusted proxies for X-Forwarded-For parsing (CIDR/IP). Empty disables trusted proxies. + # 信任的代理地址(CIDR/IP 格式),用于解析 X-Forwarded-For 头。留空则禁用代理信任。 + trusted_proxies: [] + +# ============================================================================= +# Run Mode Configuration +# 运行模式配置 +# ============================================================================= +# Run mode: "standard" (default) or "simple" (for internal use) +# 运行模式:"standard"(默认)或 "simple"(内部使用) +# - standard: Full SaaS features with billing/balance checks +# - standard: 完整 SaaS 功能,包含计费和余额校验 +# - simple: Hides SaaS features and skips billing/balance checks +# - simple: 隐藏 SaaS 功能,跳过计费和余额校验 +run_mode: "standard" + +# ============================================================================= +# CORS Configuration +# 跨域资源共享 (CORS) 配置 +# ============================================================================= +cors: + # Allowed origins list. Leave empty to disable cross-origin requests. + # 允许的来源列表。留空则禁用跨域请求。 + allowed_origins: [] + # Allow credentials (cookies/authorization headers). Cannot be used with "*". + # 允许携带凭证(cookies/授权头)。不能与 "*" 通配符同时使用。 + allow_credentials: true + +# ============================================================================= +# Security Configuration +# 安全配置 +# ============================================================================= +security: + url_allowlist: + # Enable URL allowlist validation (disable to skip all URL checks) + # 启用 URL 白名单验证(禁用则跳过所有 URL 检查) + enabled: false + # Allowed upstream hosts for API proxying + # 允许代理的上游 API 主机列表 + upstream_hosts: + - "api.openai.com" + - "api.anthropic.com" + - "api.kimi.com" + - "open.bigmodel.cn" + - "api.minimaxi.com" + - "generativelanguage.googleapis.com" + - "cloudcode-pa.googleapis.com" + - "*.openai.azure.com" + # Allowed hosts for pricing data download + # 允许下载定价数据的主机列表 + pricing_hosts: + - "raw.githubusercontent.com" + # Allowed hosts for CRS sync (required when using CRS sync) + # 允许 CRS 同步的主机列表(使用 CRS 同步功能时必须配置) + crs_hosts: [] + # Allow localhost/private IPs for upstream/pricing/CRS (use only in trusted networks) + # 允许本地/私有 IP 地址用于上游/定价/CRS(仅在可信网络中使用) + allow_private_hosts: true + # Allow http:// URLs when allowlist is disabled (default: false, require https) + # 白名单禁用时是否允许 http:// URL(默认: false,要求 https) + allow_insecure_http: true + response_headers: + # Enable configurable response header filtering (disable to use default allowlist) + # 启用可配置的响应头过滤(禁用则使用默认白名单) + enabled: false + # Extra allowed response headers from upstream + # 额外允许的上游响应头 + additional_allowed: [] + # Force-remove response headers from upstream + # 强制移除的上游响应头 + force_remove: [] + csp: + # Enable Content-Security-Policy header + # 启用内容安全策略 (CSP) 响应头 + enabled: true + # Default CSP policy (override if you host assets on other domains) + # 默认 CSP 策略(如果静态资源托管在其他域名,请自行覆盖) + policy: "default-src 'self'; script-src 'self' https://challenges.cloudflare.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" + proxy_probe: + # Allow skipping TLS verification for proxy probe (debug only) + # 允许代理探测时跳过 TLS 证书验证(仅用于调试) + insecure_skip_verify: false + +# ============================================================================= +# Gateway Configuration +# 网关配置 +# ============================================================================= +gateway: + # Timeout for waiting upstream response headers (seconds) + # 等待上游响应头超时时间(秒) + response_header_timeout: 600 + # Max request body size in bytes (default: 100MB) + # 请求体最大字节数(默认 100MB) + max_body_size: 104857600 + # Connection pool isolation strategy: + # 连接池隔离策略: + # - proxy: Isolate by proxy, same proxy shares connection pool (suitable for few proxies, many accounts) + # - proxy: 按代理隔离,同一代理共享连接池(适合代理少、账户多) + # - account: Isolate by account, same account shares connection pool (suitable for few accounts, strict isolation) + # - account: 按账户隔离,同一账户共享连接池(适合账户少、需严格隔离) + # - account_proxy: Isolate by account+proxy combination (default, finest granularity) + # - account_proxy: 按账户+代理组合隔离(默认,最细粒度) + connection_pool_isolation: "account_proxy" + # HTTP upstream connection pool settings (HTTP/2 + multi-proxy scenario defaults) + # HTTP 上游连接池配置(HTTP/2 + 多代理场景默认值) + # Max idle connections across all hosts + # 所有主机的最大空闲连接数 + max_idle_conns: 240 + # Max idle connections per host + # 每个主机的最大空闲连接数 + max_idle_conns_per_host: 120 + # Max connections per host + # 每个主机的最大连接数 + max_conns_per_host: 240 + # Idle connection timeout (seconds) + # 空闲连接超时时间(秒) + idle_conn_timeout_seconds: 90 + # Upstream client cache settings + # 上游连接池客户端缓存配置 + # max_upstream_clients: Max cached clients, evicts least recently used when exceeded + # max_upstream_clients: 最大缓存客户端数量,超出后淘汰最久未使用的 + max_upstream_clients: 5000 + # client_idle_ttl_seconds: Client idle reclaim threshold (seconds), reclaimed when idle and no active requests + # client_idle_ttl_seconds: 客户端空闲回收阈值(秒),超时且无活跃请求时回收 + client_idle_ttl_seconds: 900 + # Concurrency slot expiration time (minutes) + # 并发槽位过期时间(分钟) + concurrency_slot_ttl_minutes: 30 + # Stream data interval timeout (seconds), 0=disable + # 流数据间隔超时(秒),0=禁用 + stream_data_interval_timeout: 180 + # Stream keepalive interval (seconds), 0=disable + # 流式 keepalive 间隔(秒),0=禁用 + stream_keepalive_interval: 10 + # SSE max line size in bytes (default: 40MB) + # SSE 单行最大字节数(默认 40MB) + max_line_size: 41943040 + # Log upstream error response body summary (safe/truncated; does not log request content) + # 记录上游错误响应体摘要(安全/截断;不记录请求内容) + log_upstream_error_body: true + # Max bytes to log from upstream error body + # 记录上游错误响应体的最大字节数 + log_upstream_error_body_max_bytes: 2048 + # Auto inject anthropic-beta header for API-key accounts when needed (default: off) + # 需要时自动为 API-key 账户注入 anthropic-beta 头(默认:关闭) + inject_beta_for_apikey: false + # Allow failover on selected 400 errors (default: off) + # 允许在特定 400 错误时进行故障转移(默认:关闭) + failover_on_400: false + # Scheduling configuration + # 调度配置 + scheduling: + # Sticky session max waiting queue size + # 粘性会话最大排队长度 + sticky_session_max_waiting: 3 + # Sticky session wait timeout (duration) + # 粘性会话等待超时(时间段) + sticky_session_wait_timeout: 120s + # Fallback wait timeout (duration) + # 兜底排队等待超时(时间段) + fallback_wait_timeout: 30s + # Fallback max waiting queue size + # 兜底最大排队长度 + fallback_max_waiting: 100 + # Enable batch load calculation for scheduling + # 启用调度批量负载计算 + load_batch_enabled: true + # Slot cleanup interval (duration) + # 并发槽位清理周期(时间段) + slot_cleanup_interval: 30s + # 是否允许受控回源到 DB(默认 true,保持现有行为) + db_fallback_enabled: true + # 受控回源超时(秒),0 表示不额外收紧超时 + db_fallback_timeout_seconds: 0 + # 受控回源限流(实例级 QPS),0 表示不限制 + db_fallback_max_qps: 0 + # outbox 轮询周期(秒) + outbox_poll_interval_seconds: 1 + # outbox 滞后告警阈值(秒) + outbox_lag_warn_seconds: 5 + # outbox 触发强制重建阈值(秒) + outbox_lag_rebuild_seconds: 10 + # outbox 连续滞后触发次数 + outbox_lag_rebuild_failures: 3 + # outbox 积压触发重建阈值(行数) + outbox_backlog_rebuild_rows: 10000 + # 全量重建周期(秒),0 表示禁用 + full_rebuild_interval_seconds: 300 + +# ============================================================================= +# API Key Auth Cache Configuration +# API Key 认证缓存配置 +# ============================================================================= +api_key_auth_cache: + # L1 cache size (entries), in-process LRU/TTL cache + # L1 缓存容量(条目数),进程内 LRU/TTL 缓存 + l1_size: 65535 + # L1 cache TTL (seconds) + # L1 缓存 TTL(秒) + l1_ttl_seconds: 15 + # L2 cache TTL (seconds), stored in Redis + # L2 缓存 TTL(秒),Redis 中存储 + l2_ttl_seconds: 300 + # Negative cache TTL (seconds) + # 负缓存 TTL(秒) + negative_ttl_seconds: 30 + # TTL jitter percent (0-100) + # TTL 抖动百分比(0-100) + jitter_percent: 10 + # Enable singleflight for cache misses + # 缓存未命中时启用 singleflight 合并回源 + singleflight: true + +# ============================================================================= +# Dashboard Cache Configuration +# 仪表盘缓存配置 +# ============================================================================= +dashboard_cache: + # Enable dashboard cache + # 启用仪表盘缓存 + enabled: true + # Redis key prefix for multi-environment isolation + # Redis key 前缀,用于多环境隔离 + key_prefix: "sub2api:" + # Fresh TTL (seconds); within this window cached stats are considered fresh + # 新鲜阈值(秒);命中后处于该窗口视为新鲜数据 + stats_fresh_ttl_seconds: 15 + # Cache TTL (seconds) stored in Redis + # Redis 缓存 TTL(秒) + stats_ttl_seconds: 30 + # Async refresh timeout (seconds) + # 异步刷新超时(秒) + stats_refresh_timeout_seconds: 30 + +# ============================================================================= +# Dashboard Aggregation Configuration +# 仪表盘预聚合配置(重启生效) +# ============================================================================= +dashboard_aggregation: + # Enable aggregation job + # 启用聚合作业 + enabled: true + # Refresh interval (seconds) + # 刷新间隔(秒) + interval_seconds: 60 + # Lookback window (seconds) for late-arriving data + # 回看窗口(秒),处理迟到数据 + lookback_seconds: 120 + # Allow manual backfill + # 允许手动回填 + backfill_enabled: false + # Backfill max range (days) + # 回填最大跨度(天) + backfill_max_days: 31 + # Recompute recent N days on startup + # 启动时重算最近 N 天 + recompute_days: 2 + # Retention windows (days) + # 保留窗口(天) + retention: + # Raw usage_logs retention + # 原始 usage_logs 保留天数 + usage_logs_days: 90 + # Hourly aggregation retention + # 小时聚合保留天数 + hourly_days: 180 + # Daily aggregation retention + # 日聚合保留天数 + daily_days: 730 + +# ============================================================================= +# Concurrency Wait Configuration +# 并发等待配置 +# ============================================================================= +concurrency: + # SSE ping interval during concurrency wait (seconds) + # 并发等待期间的 SSE ping 间隔(秒) + ping_interval: 10 + +# ============================================================================= +# Database Configuration (PostgreSQL) +# 数据库配置 (PostgreSQL) +# ============================================================================= +database: + # Database host address + # 数据库主机地址 + host: "localhost" + # Database port + # 数据库端口 + port: 5432 + # Database username + # 数据库用户名 + user: "postgres" + # Database password + # 数据库密码 + password: "your_secure_password_here" + # Database name + # 数据库名称 + dbname: "sub2api" + # SSL mode: disable, require, verify-ca, verify-full + # SSL 模式:disable(禁用), require(要求), verify-ca(验证CA), verify-full(完全验证) + sslmode: "disable" + +# ============================================================================= +# Redis Configuration +# Redis 配置 +# ============================================================================= +redis: + # Redis host address + # Redis 主机地址 + host: "localhost" + # Redis port + # Redis 端口 + port: 6379 + # Redis password (leave empty if no password is set) + # Redis 密码(如果未设置密码则留空) + password: "" + # Database number (0-15) + # 数据库编号(0-15) + db: 0 + +# ============================================================================= +# Ops Monitoring (Optional) +# 运维监控 (可选) +# ============================================================================= +ops: + # Enable ops monitoring features (background jobs and APIs) + # 是否启用运维监控功能(后台任务和接口) + # Set to false to hide ops menu in sidebar and disable all ops features + # 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能 + # Other detailed settings (cleanup, aggregation, etc.) are configured in ops settings dialog + # 其他详细设置(数据清理、预聚合等)在运维监控设置对话框中配置 + enabled: true + +# ============================================================================= +# JWT Configuration +# JWT 配置 +# ============================================================================= +jwt: + # IMPORTANT: Change this to a random string in production! + # 重要:生产环境中请更改为随机字符串! + # Generate with / 生成命令: openssl rand -hex 32 + secret: "change-this-to-a-secure-random-string" + # Token expiration time in hours (max 24) + # 令牌过期时间(小时,最大 24) + expire_hour: 24 + +# ============================================================================= +# LinuxDo Connect OAuth Login (SSO) +# LinuxDo Connect OAuth 登录(用于 Sub2API 用户登录) +# ============================================================================= +linuxdo_connect: + enabled: false + client_id: "" + client_secret: "" + authorize_url: "https://connect.linux.do/oauth2/authorize" + token_url: "https://connect.linux.do/oauth2/token" + userinfo_url: "https://connect.linux.do/api/user" + scopes: "user" + # 示例: "https://your-domain.com/api/v1/auth/oauth/linuxdo/callback" + redirect_url: "" + # 安全提示: + # - 建议使用同源相对路径(以 / 开头),避免把 token 重定向到意外的第三方域名 + # - 该地址不应包含 #fragment(本实现使用 URL fragment 传递 access_token) + frontend_redirect_url: "/auth/linuxdo/callback" + token_auth_method: "client_secret_post" # client_secret_post | client_secret_basic | none + # 注意:当 token_auth_method=none(public client)时,必须启用 PKCE + use_pkce: false + userinfo_email_path: "" + userinfo_id_path: "" + userinfo_username_path: "" + +# ============================================================================= +# Default Settings +# 默认设置 +# ============================================================================= +default: + # Initial admin account (created on first run) + # 初始管理员账户(首次运行时创建) + admin_email: "admin@example.com" + admin_password: "admin123" + + # Default settings for new users + # 新用户默认设置 + # Max concurrent requests per user + # 每用户最大并发请求数 + user_concurrency: 5 + # Initial balance for new users + # 新用户初始余额 + user_balance: 0 + + # API key settings + # API 密钥设置 + # Prefix for generated API keys + # 生成的 API 密钥前缀 + api_key_prefix: "sk-" + + # Rate multiplier (affects billing calculation) + # 费率倍数(影响计费计算) + rate_multiplier: 1.0 + +# ============================================================================= +# Rate Limiting +# 速率限制 +# ============================================================================= +rate_limit: + # Cooldown time (in minutes) when upstream returns 529 (overloaded) + # 上游返回 529(过载)时的冷却时间(分钟) + overload_cooldown_minutes: 10 + +# ============================================================================= +# Pricing Data Source (Optional) +# 定价数据源(可选) +# ============================================================================= +pricing: + # URL to fetch model pricing data (default: LiteLLM) + # 获取模型定价数据的 URL(默认:LiteLLM) + remote_url: "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json" + # Hash verification URL (optional) + # 哈希校验 URL(可选) + hash_url: "" + # Local data directory for caching + # 本地数据缓存目录 + data_dir: "./data" + # Fallback pricing file + # 备用定价文件 + fallback_file: "./resources/model-pricing/model_prices_and_context_window.json" + # Update interval in hours + # 更新间隔(小时) + update_interval_hours: 24 + # Hash check interval in minutes + # 哈希检查间隔(分钟) + hash_check_interval_minutes: 10 + +# ============================================================================= +# Billing Configuration +# 计费配置 +# ============================================================================= +billing: + circuit_breaker: + # Enable circuit breaker for billing service + # 启用计费服务熔断器 + enabled: true + # Number of failures before opening circuit + # 触发熔断的失败次数阈值 + failure_threshold: 5 + # Time to wait before attempting reset (seconds) + # 熔断后重试等待时间(秒) + reset_timeout_seconds: 30 + # Number of requests to allow in half-open state + # 半开状态允许通过的请求数 + half_open_requests: 3 + +# ============================================================================= +# Turnstile Configuration +# Turnstile 人机验证配置 +# ============================================================================= +turnstile: + # Require Turnstile in release mode (when enabled, login/register will fail if not configured) + # 在 release 模式下要求 Turnstile 验证(启用后,若未配置则登录/注册会失败) + required: false + +# ============================================================================= +# Gemini OAuth (Required for Gemini accounts) +# Gemini OAuth 配置(Gemini 账户必需) +# ============================================================================= +# Sub2API supports TWO Gemini OAuth modes: +# Sub2API 支持两种 Gemini OAuth 模式: +# +# 1. Code Assist OAuth (requires GCP project_id) +# 1. Code Assist OAuth(需要 GCP project_id) +# - Uses: cloudcode-pa.googleapis.com (Code Assist API) +# - 使用:cloudcode-pa.googleapis.com(Code Assist API) +# +# 2. AI Studio OAuth (no project_id needed) +# 2. AI Studio OAuth(不需要 project_id) +# - Uses: generativelanguage.googleapis.com (AI Studio API) +# - 使用:generativelanguage.googleapis.com(AI Studio API) +# +# Default: Uses Gemini CLI's public OAuth credentials (same as Google's official CLI tool) +# 默认:使用 Gemini CLI 的公开 OAuth 凭证(与 Google 官方 CLI 工具相同) +gemini: + oauth: + # Gemini CLI public OAuth credentials (works for both Code Assist and AI Studio) + # Gemini CLI 公开 OAuth 凭证(适用于 Code Assist 和 AI Studio) + client_id: "681255809395-oo8ft2oprdrnp9e3aqf6av3hmdib135j.apps.googleusercontent.com" + client_secret: "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" + # Optional scopes (space-separated). Leave empty to auto-select based on oauth_type. + # 可选的权限范围(空格分隔)。留空则根据 oauth_type 自动选择。 + scopes: "" + quota: + # Optional: local quota simulation for Gemini Code Assist (local billing). + # 可选:Gemini Code Assist 本地配额模拟(本地计费)。 + # These values are used for UI progress + precheck scheduling, not official Google quotas. + # 这些值用于 UI 进度显示和预检调度,并非 Google 官方配额。 + tiers: + LEGACY: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 50 + # Flash model requests per day + # Flash 模型每日请求数 + flash_rpd: 1500 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 30 + PRO: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 1500 + # Flash model requests per day + # Flash 模型每日请求数 + flash_rpd: 4000 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 5 + ULTRA: + # Pro model requests per day + # Pro 模型每日请求数 + pro_rpd: 2000 + # Flash model requests per day (0 = unlimited) + # Flash 模型每日请求数(0 = 无限制) + flash_rpd: 0 + # Cooldown time (minutes) after hitting quota + # 达到配额后的冷却时间(分钟) + cooldown_minutes: 5 + +# ============================================================================= +# Update Configuration (在线更新配置) +# ============================================================================= +update: + # Proxy URL for accessing GitHub (used for online updates and pricing data) + # 用于访问 GitHub 的代理地址(用于在线更新和定价数据获取) + # Supports: http, https, socks5, socks5h + # Examples: + # - HTTP proxy: "http://127.0.0.1:7890" + # - SOCKS5 proxy: "socks5://127.0.0.1:1080" + # - With authentication: "http://user:pass@proxy.example.com:8080" + # Leave empty for direct connection (recommended for overseas servers) + # 留空表示直连(适用于海外服务器) + proxy_url: "" diff --git a/deploy/docker-compose-test.yml b/deploy/docker-compose-test.yml new file mode 100644 index 00000000..bcda3141 --- /dev/null +++ b/deploy/docker-compose-test.yml @@ -0,0 +1,197 @@ +# ============================================================================= +# Sub2API Docker Compose Test Configuration (Local Build) +# ============================================================================= +# Quick Start: +# 1. Copy .env.example to .env and configure +# 2. docker-compose -f docker-compose-test.yml up -d --build +# 3. Check logs: docker-compose -f docker-compose-test.yml logs -f sub2api +# 4. Access: http://localhost:8080 +# +# This configuration builds the image from source (Dockerfile in project root). +# All configuration is done via environment variables. +# No Setup Wizard needed - the system auto-initializes on first run. +# ============================================================================= + +services: + # =========================================================================== + # Sub2API Application + # =========================================================================== + sub2api: + image: sub2api:latest + build: + context: .. + dockerfile: Dockerfile + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + # Data persistence (config.yaml will be auto-generated here) + - sub2api_data:/app/data + # Mount custom config.yaml (optional, overrides auto-generated config) + - ./config.yaml:/app/data/config.yaml:ro + environment: + # ======================================================================= + # Auto Setup (REQUIRED for Docker deployment) + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) + # ======================================================================= + - DATABASE_HOST=postgres + - DATABASE_PORT=5432 + - DATABASE_USER=${POSTGRES_USER:-sub2api} + - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} + - DATABASE_SSLMODE=disable + + # ======================================================================= + # Redis Configuration + # ======================================================================= + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + # Leave empty to auto-generate (recommended) + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # Timezone Configuration + # This affects ALL time operations in the application: + # - Database timestamps + # - Usage statistics "today" boundary + # - Subscription expiry times + # - Log timestamps + # Common values: Asia/Shanghai, America/New_York, Europe/London, UTC + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (for Gemini accounts) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + + # ======================================================================= + # Security Configuration (URL Allowlist) + # ======================================================================= + # Allow private IP addresses for CRS sync (for internal deployments) + - SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-true} + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + networks: + - sub2api-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + + # =========================================================================== + # PostgreSQL Database + # =========================================================================== + postgres: + image: postgres:18-alpine + container_name: sub2api-postgres + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + - postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-sub2api} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - POSTGRES_DB=${POSTGRES_DB:-sub2api} + - TZ=${TZ:-Asia/Shanghai} + networks: + - sub2api-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + # 注意:不暴露端口到宿主机,应用通过内部网络连接 + # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] + + # =========================================================================== + # Redis Cache + # =========================================================================== + redis: + image: redis:7-alpine + container_name: sub2api-redis + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + - redis_data:/data + command: > + redis-server + --save 60 1 + --appendonly yes + --appendfsync everysec + ${REDIS_PASSWORD:+--requirepass ${REDIS_PASSWORD}} + environment: + - TZ=${TZ:-Asia/Shanghai} + # REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag) + - REDISCLI_AUTH=${REDIS_PASSWORD:-} + networks: + - sub2api-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 5s + +# ============================================================================= +# Volumes +# ============================================================================= +volumes: + sub2api_data: + driver: local + postgres_data: + driver: local + redis_data: + driver: local + +# ============================================================================= +# Networks +# ============================================================================= +networks: + sub2api-network: + driver: bridge diff --git a/deploy/docker-compose.override.yml.example b/deploy/docker-compose.override.yml.example new file mode 100644 index 00000000..297724f5 --- /dev/null +++ b/deploy/docker-compose.override.yml.example @@ -0,0 +1,137 @@ +# ============================================================================= +# Docker Compose Override Configuration Example +# ============================================================================= +# This file provides examples for customizing the Docker Compose setup. +# Copy this file to docker-compose.override.yml and modify as needed. +# +# Usage: +# cp docker-compose.override.yml.example docker-compose.override.yml +# # Edit docker-compose.override.yml with your settings +# docker-compose up -d +# +# IMPORTANT: docker-compose.override.yml is gitignored and will not be committed. +# ============================================================================= + +# ============================================================================= +# Scenario 1: Use External Database and Redis (Recommended for Production) +# ============================================================================= +# Use this when you have PostgreSQL and Redis running on the host machine +# or on separate servers. +# +# Prerequisites: +# - PostgreSQL running on host (accessible via host.docker.internal) +# - Redis running on host (accessible via host.docker.internal) +# - Update DATABASE_PORT and REDIS_PORT in .env file if using non-standard ports +# +# Security Notes: +# - Ensure PostgreSQL pg_hba.conf allows connections from Docker network +# - Use strong passwords for database and Redis +# - Consider using SSL/TLS for database connections in production +# ============================================================================= + +services: + sub2api: + # Remove dependencies on containerized postgres/redis + depends_on: [] + + # Enable access to host machine services + extra_hosts: + - "host.docker.internal:host-gateway" + + # Override database and Redis connection settings + environment: + # PostgreSQL Configuration + DATABASE_HOST: host.docker.internal + DATABASE_PORT: "5678" # Change to your PostgreSQL port + # DATABASE_USER: postgres # Uncomment to override + # DATABASE_PASSWORD: your_password # Uncomment to override + # DATABASE_DBNAME: sub2api # Uncomment to override + + # Redis Configuration + REDIS_HOST: host.docker.internal + REDIS_PORT: "6379" # Change to your Redis port + # REDIS_PASSWORD: your_redis_password # Uncomment if Redis requires auth + # REDIS_DB: 0 # Uncomment to override + + # Disable containerized PostgreSQL + postgres: + deploy: + replicas: 0 + scale: 0 + + # Disable containerized Redis + redis: + deploy: + replicas: 0 + scale: 0 + +# ============================================================================= +# Scenario 2: Development with Local Services (Alternative) +# ============================================================================= +# Uncomment this section if you want to use the containerized postgres/redis +# but expose their ports for local development tools. +# +# Usage: Comment out Scenario 1 above and uncomment this section. +# ============================================================================= + +# services: +# sub2api: +# # Keep default dependencies +# pass +# +# postgres: +# ports: +# - "127.0.0.1:5432:5432" # Expose PostgreSQL on localhost +# +# redis: +# ports: +# - "127.0.0.1:6379:6379" # Expose Redis on localhost + +# ============================================================================= +# Scenario 3: Custom Network Configuration +# ============================================================================= +# Uncomment if you need to connect to an existing Docker network +# ============================================================================= + +# networks: +# default: +# external: true +# name: your-existing-network + +# ============================================================================= +# Scenario 4: Resource Limits (Production) +# ============================================================================= +# Uncomment to set resource limits for the sub2api container +# ============================================================================= + +# services: +# sub2api: +# deploy: +# resources: +# limits: +# cpus: '2.0' +# memory: 2G +# reservations: +# cpus: '1.0' +# memory: 1G + +# ============================================================================= +# Scenario 5: Custom Volumes +# ============================================================================= +# Uncomment to mount additional volumes (e.g., for logs, backups) +# ============================================================================= + +# services: +# sub2api: +# volumes: +# - ./logs:/app/logs +# - ./backups:/app/backups + +# ============================================================================= +# Additional Notes +# ============================================================================= +# - This file overrides settings in docker-compose.yml +# - Environment variables in .env file take precedence +# - For more information, see: https://docs.docker.com/compose/extends/ +# - Check the main README.md for detailed configuration instructions +# ============================================================================= diff --git a/deploy/docker-compose.standalone.yml b/deploy/docker-compose.standalone.yml new file mode 100644 index 00000000..1bf247c7 --- /dev/null +++ b/deploy/docker-compose.standalone.yml @@ -0,0 +1,93 @@ +# ============================================================================= +# Sub2API Docker Compose - Standalone Configuration +# ============================================================================= +# This configuration runs only the Sub2API application. +# PostgreSQL and Redis must be provided externally. +# +# Usage: +# 1. Copy .env.example to .env and configure database/redis connection +# 2. docker-compose -f docker-compose.standalone.yml up -d +# 3. Access: http://localhost:8080 +# ============================================================================= + +services: + sub2api: + image: weishaw/sub2api:latest + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + - sub2api_data:/app/data + extra_hosts: + - "host.docker.internal:host-gateway" + environment: + # ======================================================================= + # Auto Setup + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) - Required + # ======================================================================= + - DATABASE_HOST=${DATABASE_HOST:?DATABASE_HOST is required} + - DATABASE_PORT=${DATABASE_PORT:-5432} + - DATABASE_USER=${DATABASE_USER:-sub2api} + - DATABASE_PASSWORD=${DATABASE_PASSWORD:?DATABASE_PASSWORD is required} + - DATABASE_DBNAME=${DATABASE_DBNAME:-sub2api} + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} + + # ======================================================================= + # Redis Configuration - Required + # ======================================================================= + - REDIS_HOST=${REDIS_HOST:?REDIS_HOST is required} + - REDIS_PORT=${REDIS_PORT:-6379} + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # Timezone Configuration + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (optional) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + +volumes: + sub2api_data: + driver: local diff --git a/deploy/docker-compose.yml b/deploy/docker-compose.yml new file mode 100644 index 00000000..484df3a8 --- /dev/null +++ b/deploy/docker-compose.yml @@ -0,0 +1,211 @@ +# ============================================================================= +# Sub2API Docker Compose Configuration +# ============================================================================= +# Quick Start: +# 1. Copy .env.example to .env and configure +# 2. docker-compose up -d +# 3. Check logs: docker-compose logs -f sub2api +# 4. Access: http://localhost:8080 +# +# All configuration is done via environment variables. +# No Setup Wizard needed - the system auto-initializes on first run. +# ============================================================================= + +services: + # =========================================================================== + # Sub2API Application + # =========================================================================== + sub2api: + image: weishaw/sub2api:latest + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + # Data persistence (config.yaml will be auto-generated here) + - sub2api_data:/app/data + # Optional: Mount custom config.yaml (uncomment and create the file first) + # Copy config.example.yaml to config.yaml, modify it, then uncomment: + # - ./config.yaml:/app/data/config.yaml:ro + environment: + # ======================================================================= + # Auto Setup (REQUIRED for Docker deployment) + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) + # ======================================================================= + - DATABASE_HOST=postgres + - DATABASE_PORT=5432 + - DATABASE_USER=${POSTGRES_USER:-sub2api} + - DATABASE_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - DATABASE_DBNAME=${POSTGRES_DB:-sub2api} + - DATABASE_SSLMODE=disable + + # ======================================================================= + # Redis Configuration + # ======================================================================= + - REDIS_HOST=redis + - REDIS_PORT=6379 + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + # IMPORTANT: Set a fixed JWT_SECRET to prevent login sessions from being + # invalidated after container restarts. If left empty, a random secret + # will be generated on each startup. + # Generate a secure secret: openssl rand -hex 32 + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # Timezone Configuration + # This affects ALL time operations in the application: + # - Database timestamps + # - Usage statistics "today" boundary + # - Subscription expiry times + # - Log timestamps + # Common values: Asia/Shanghai, America/New_York, Europe/London, UTC + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (for Gemini accounts) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + + # ======================================================================= + # Security Configuration (URL Allowlist) + # ======================================================================= + # Enable URL allowlist validation (false to skip allowlist checks) + - SECURITY_URL_ALLOWLIST_ENABLED=${SECURITY_URL_ALLOWLIST_ENABLED:-false} + # Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https) + - SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=${SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP:-false} + # Allow private IP addresses for upstream/pricing/CRS (for internal deployments) + - SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=${SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS:-false} + # Upstream hosts whitelist (comma-separated, only used when enabled=true) + - SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS=${SECURITY_URL_ALLOWLIST_UPSTREAM_HOSTS:-} + + # ======================================================================= + # Update Configuration (在线更新配置) + # ======================================================================= + # Proxy for accessing GitHub (online updates + pricing data) + # Examples: http://host:port, socks5://host:port + - UPDATE_PROXY_URL=${UPDATE_PROXY_URL:-} + depends_on: + postgres: + condition: service_healthy + redis: + condition: service_healthy + networks: + - sub2api-network + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + + # =========================================================================== + # PostgreSQL Database + # =========================================================================== + postgres: + image: postgres:18-alpine + container_name: sub2api-postgres + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + - postgres_data:/var/lib/postgresql/data + environment: + - POSTGRES_USER=${POSTGRES_USER:-sub2api} + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required} + - POSTGRES_DB=${POSTGRES_DB:-sub2api} + - TZ=${TZ:-Asia/Shanghai} + networks: + - sub2api-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-sub2api} -d ${POSTGRES_DB:-sub2api}"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 10s + # 注意:不暴露端口到宿主机,应用通过内部网络连接 + # 如需调试,可临时添加:ports: ["127.0.0.1:5433:5432"] + + # =========================================================================== + # Redis Cache + # =========================================================================== + redis: + image: redis:8-alpine + container_name: sub2api-redis + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + volumes: + - redis_data:/data + command: > + sh -c ' + redis-server + --save 60 1 + --appendonly yes + --appendfsync everysec + ${REDIS_PASSWORD:+--requirepass "$REDIS_PASSWORD"}' + environment: + - TZ=${TZ:-Asia/Shanghai} + # REDISCLI_AUTH is used by redis-cli for authentication (safer than -a flag) + - REDISCLI_AUTH=${REDIS_PASSWORD:-} + networks: + - sub2api-network + healthcheck: + test: ["CMD", "redis-cli", "ping"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 5s + +# ============================================================================= +# Volumes +# ============================================================================= +volumes: + sub2api_data: + driver: local + postgres_data: + driver: local + redis_data: + driver: local + +# ============================================================================= +# Networks +# ============================================================================= +networks: + sub2api-network: + driver: bridge diff --git a/deploy/flow.md b/deploy/flow.md new file mode 100644 index 00000000..0904c72f --- /dev/null +++ b/deploy/flow.md @@ -0,0 +1,222 @@ +```mermaid +flowchart TD + %% Master dispatch + A[HTTP Request] --> B{Route} + B -->|v1 messages| GA0 + B -->|openai v1 responses| OA0 + B -->|v1beta models model action| GM0 + B -->|v1 messages count tokens| GT0 + B -->|v1beta models list or get| GL0 + + %% ========================= + %% FLOW A: Claude Gateway + %% ========================= + subgraph FLOW_A["v1 messages Claude Gateway"] + GA0[Auth middleware] --> GA1[Read body] + GA1 -->|empty| GA1E[400 invalid_request_error] + GA1 --> GA2[ParseGatewayRequest] + GA2 -->|parse error| GA2E[400 invalid_request_error] + GA2 --> GA3{model present} + GA3 -->|no| GA3E[400 invalid_request_error] + GA3 --> GA4[streamStarted false] + GA4 --> GA5[IncrementWaitCount user] + GA5 -->|queue full| GA5E[429 rate_limit_error] + GA5 --> GA6[AcquireUserSlotWithWait] + GA6 -->|timeout or fail| GA6E[429 rate_limit_error] + GA6 --> GA7[BillingEligibility check post wait] + GA7 -->|fail| GA7E[403 billing_error] + GA7 --> GA8[Generate sessionHash] + GA8 --> GA9[Resolve platform] + GA9 --> GA10{platform gemini} + GA10 -->|yes| GA10Y[sessionKey gemini hash] + GA10 -->|no| GA10N[sessionKey hash] + GA10Y --> GA11 + GA10N --> GA11 + + GA11[SelectAccountWithLoadAwareness] -->|err and no failed| GA11E1[503 no available accounts] + GA11 -->|err and failed| GA11E2[map failover error] + GA11 --> GA12[Warmup intercept] + GA12 -->|yes| GA12Y[return mock and release if held] + GA12 -->|no| GA13[Acquire account slot or wait] + GA13 -->|wait queue full| GA13E1[429 rate_limit_error] + GA13 -->|wait timeout| GA13E2[429 concurrency limit] + GA13 --> GA14[BindStickySession if waited] + GA14 --> GA15{account platform antigravity} + GA15 -->|yes| GA15Y[ForwardGemini antigravity] + GA15 -->|no| GA15N[Forward Claude] + GA15Y --> GA16[Release account slot and dec account wait] + GA15N --> GA16 + GA16 --> GA17{UpstreamFailoverError} + GA17 -->|yes| GA18[mark failedAccountIDs and map error if exceed] + GA18 -->|loop| GA11 + GA17 -->|no| GA19[success async RecordUsage and return] + GA19 --> GA20[defer release user slot and dec wait count] + end + + %% ========================= + %% FLOW B: OpenAI + %% ========================= + subgraph FLOW_B["openai v1 responses"] + OA0[Auth middleware] --> OA1[Read body] + OA1 -->|empty| OA1E[400 invalid_request_error] + OA1 --> OA2[json Unmarshal body] + OA2 -->|parse error| OA2E[400 invalid_request_error] + OA2 --> OA3{model present} + OA3 -->|no| OA3E[400 invalid_request_error] + OA3 --> OA4{User Agent Codex CLI} + OA4 -->|no| OA4N[set default instructions] + OA4 -->|yes| OA4Y[no change] + OA4N --> OA5 + OA4Y --> OA5 + OA5[streamStarted false] --> OA6[IncrementWaitCount user] + OA6 -->|queue full| OA6E[429 rate_limit_error] + OA6 --> OA7[AcquireUserSlotWithWait] + OA7 -->|timeout or fail| OA7E[429 rate_limit_error] + OA7 --> OA8[BillingEligibility check post wait] + OA8 -->|fail| OA8E[403 billing_error] + OA8 --> OA9[sessionHash sha256 session_id] + OA9 --> OA10[SelectAccountWithLoadAwareness] + OA10 -->|err and no failed| OA10E1[503 no available accounts] + OA10 -->|err and failed| OA10E2[map failover error] + OA10 --> OA11[Acquire account slot or wait] + OA11 -->|wait queue full| OA11E1[429 rate_limit_error] + OA11 -->|wait timeout| OA11E2[429 concurrency limit] + OA11 --> OA12[BindStickySession openai hash if waited] + OA12 --> OA13[Forward OpenAI upstream] + OA13 --> OA14[Release account slot and dec account wait] + OA14 --> OA15{UpstreamFailoverError} + OA15 -->|yes| OA16[mark failedAccountIDs and map error if exceed] + OA16 -->|loop| OA10 + OA15 -->|no| OA17[success async RecordUsage and return] + OA17 --> OA18[defer release user slot and dec wait count] + end + + %% ========================= + %% FLOW C: Gemini Native + %% ========================= + subgraph FLOW_C["v1beta models model action Gemini Native"] + GM0[Auth middleware] --> GM1[Validate platform] + GM1 -->|invalid| GM1E[400 googleError] + GM1 --> GM2[Parse path modelName action] + GM2 -->|invalid| GM2E[400 googleError] + GM2 --> GM3{action supported} + GM3 -->|no| GM3E[404 googleError] + GM3 --> GM4[Read body] + GM4 -->|empty| GM4E[400 googleError] + GM4 --> GM5[streamStarted false] + GM5 --> GM6[IncrementWaitCount user] + GM6 -->|queue full| GM6E[429 googleError] + GM6 --> GM7[AcquireUserSlotWithWait] + GM7 -->|timeout or fail| GM7E[429 googleError] + GM7 --> GM8[BillingEligibility check post wait] + GM8 -->|fail| GM8E[403 googleError] + GM8 --> GM9[Generate sessionHash] + GM9 --> GM10[sessionKey gemini hash] + GM10 --> GM11[SelectAccountWithLoadAwareness] + GM11 -->|err and no failed| GM11E1[503 googleError] + GM11 -->|err and failed| GM11E2[mapGeminiUpstreamError] + GM11 --> GM12[Acquire account slot or wait] + GM12 -->|wait queue full| GM12E1[429 googleError] + GM12 -->|wait timeout| GM12E2[429 googleError] + GM12 --> GM13[BindStickySession if waited] + GM13 --> GM14{account platform antigravity} + GM14 -->|yes| GM14Y[ForwardGemini antigravity] + GM14 -->|no| GM14N[ForwardNative] + GM14Y --> GM15[Release account slot and dec account wait] + GM14N --> GM15 + GM15 --> GM16{UpstreamFailoverError} + GM16 -->|yes| GM17[mark failedAccountIDs and map error if exceed] + GM17 -->|loop| GM11 + GM16 -->|no| GM18[success async RecordUsage and return] + GM18 --> GM19[defer release user slot and dec wait count] + end + + %% ========================= + %% FLOW D: CountTokens + %% ========================= + subgraph FLOW_D["v1 messages count tokens"] + GT0[Auth middleware] --> GT1[Read body] + GT1 -->|empty| GT1E[400 invalid_request_error] + GT1 --> GT2[ParseGatewayRequest] + GT2 -->|parse error| GT2E[400 invalid_request_error] + GT2 --> GT3{model present} + GT3 -->|no| GT3E[400 invalid_request_error] + GT3 --> GT4[BillingEligibility check] + GT4 -->|fail| GT4E[403 billing_error] + GT4 --> GT5[ForwardCountTokens] + end + + %% ========================= + %% FLOW E: Gemini Models List Get + %% ========================= + subgraph FLOW_E["v1beta models list or get"] + GL0[Auth middleware] --> GL1[Validate platform] + GL1 -->|invalid| GL1E[400 googleError] + GL1 --> GL2{force platform antigravity} + GL2 -->|yes| GL2Y[return static fallback models] + GL2 -->|no| GL3[SelectAccountForAIStudioEndpoints] + GL3 -->|no gemini and has antigravity| GL3Y[return fallback models] + GL3 -->|no accounts| GL3E[503 googleError] + GL3 --> GL4[ForwardAIStudioGET] + GL4 -->|error| GL4E[502 googleError] + GL4 --> GL5[Passthrough response or fallback] + end + + %% ========================= + %% SHARED: Account Selection + %% ========================= + subgraph SELECT["SelectAccountWithLoadAwareness detail"] + S0[Start] --> S1{concurrencyService nil OR load batch disabled} + S1 -->|yes| S2[SelectAccountForModelWithExclusions legacy] + S2 --> S3[tryAcquireAccountSlot] + S3 -->|acquired| S3Y[SelectionResult Acquired true ReleaseFunc] + S3 -->|not acquired| S3N[WaitPlan FallbackTimeout MaxWaiting] + S1 -->|no| S4[Resolve platform] + S4 --> S5[List schedulable accounts] + S5 --> S6[Layer1 Sticky session] + S6 -->|hit and valid| S6A[tryAcquireAccountSlot] + S6A -->|acquired| S6AY[SelectionResult Acquired true] + S6A -->|not acquired and waitingCount < StickyMax| S6AN[WaitPlan StickyTimeout Max] + S6 --> S7[Layer2 Load aware] + S7 --> S7A[Load batch concurrency plus wait to loadRate] + S7A --> S7B[Sort priority load LRU OAuth prefer for Gemini] + S7B --> S7C[tryAcquireAccountSlot in order] + S7C -->|first success| S7CY[SelectionResult Acquired true] + S7C -->|none| S8[Layer3 Fallback wait] + S8 --> S8A[Sort priority LRU] + S8A --> S8B[WaitPlan FallbackTimeout Max] + end + + %% ========================= + %% SHARED: Wait Acquire + %% ========================= + subgraph WAIT["AcquireXSlotWithWait detail"] + W0[Try AcquireXSlot immediately] -->|acquired| W1[return ReleaseFunc] + W0 -->|not acquired| W2[Wait loop with timeout] + W2 --> W3[Backoff 100ms x1.5 jitter max2s] + W2 --> W4[If streaming and ping format send SSE ping] + W2 --> W5[Retry AcquireXSlot on timer] + W5 -->|acquired| W1 + W2 -->|timeout| W6[ConcurrencyError IsTimeout true] + end + + %% ========================= + %% SHARED: Account Wait Queue + %% ========================= + subgraph AQ["Account Wait Queue Redis Lua"] + Q1[IncrementAccountWaitCount] --> Q2{current >= max} + Q2 -->|yes| Q2Y[return false] + Q2 -->|no| Q3[INCR and if first set TTL] + Q3 --> Q4[return true] + Q5[DecrementAccountWaitCount] --> Q6[if current > 0 then DECR] + end + + %% ========================= + %% SHARED: Background cleanup + %% ========================= + subgraph CLEANUP["Slot Cleanup Worker"] + C0[StartSlotCleanupWorker interval] --> C1[List schedulable accounts] + C1 --> C2[CleanupExpiredAccountSlots per account] + C2 --> C3[Repeat every interval] + end +``` diff --git a/deploy/install.sh b/deploy/install.sh new file mode 100644 index 00000000..6dcf4123 --- /dev/null +++ b/deploy/install.sh @@ -0,0 +1,1169 @@ +#!/bin/bash +# +# Sub2API Installation Script +# Sub2API 安装脚本 +# Usage: curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | bash +# + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +# Configuration +GITHUB_REPO="Wei-Shaw/sub2api" +INSTALL_DIR="/opt/sub2api" +SERVICE_NAME="sub2api" +SERVICE_USER="sub2api" +CONFIG_DIR="/etc/sub2api" + +# Server configuration (will be set by user) +SERVER_HOST="0.0.0.0" +SERVER_PORT="8080" + +# Language (default: zh = Chinese) +LANG_CHOICE="zh" + +# ============================================================ +# Language strings / 语言字符串 +# ============================================================ + +# Chinese strings +declare -A MSG_ZH=( + # General + ["info"]="信息" + ["success"]="成功" + ["warning"]="警告" + ["error"]="错误" + + # Language selection + ["select_lang"]="请选择语言 / Select language" + ["lang_zh"]="中文" + ["lang_en"]="English" + ["enter_choice"]="请输入选择 (默认: 1)" + + # Installation + ["install_title"]="Sub2API 安装脚本" + ["run_as_root"]="请使用 root 权限运行 (使用 sudo)" + ["detected_platform"]="检测到平台" + ["unsupported_arch"]="不支持的架构" + ["unsupported_os"]="不支持的操作系统" + ["missing_deps"]="缺少依赖" + ["install_deps_first"]="请先安装以下依赖" + ["fetching_version"]="正在获取最新版本..." + ["latest_version"]="最新版本" + ["failed_get_version"]="获取最新版本失败" + ["downloading"]="正在下载" + ["download_failed"]="下载失败" + ["verifying_checksum"]="正在校验文件..." + ["checksum_verified"]="校验通过" + ["checksum_failed"]="校验失败" + ["checksum_not_found"]="无法验证校验和(checksums.txt 未找到)" + ["extracting"]="正在解压..." + ["binary_installed"]="二进制文件已安装到" + ["user_exists"]="用户已存在" + ["creating_user"]="正在创建系统用户" + ["user_created"]="用户已创建" + ["setting_up_dirs"]="正在设置目录..." + ["dirs_configured"]="目录配置完成" + ["installing_service"]="正在安装 systemd 服务..." + ["service_installed"]="systemd 服务已安装" + ["ready_for_setup"]="准备就绪,可以启动设置向导" + + # Completion + ["install_complete"]="Sub2API 安装完成!" + ["install_dir"]="安装目录" + ["next_steps"]="后续步骤" + ["step1_check_services"]="确保 PostgreSQL 和 Redis 正在运行:" + ["step2_start_service"]="启动 Sub2API 服务:" + ["step3_enable_autostart"]="设置开机自启:" + ["step4_open_wizard"]="在浏览器中打开设置向导:" + ["wizard_guide"]="设置向导将引导您完成:" + ["wizard_db"]="数据库配置" + ["wizard_redis"]="Redis 配置" + ["wizard_admin"]="管理员账号创建" + ["useful_commands"]="常用命令" + ["cmd_status"]="查看状态" + ["cmd_logs"]="查看日志" + ["cmd_restart"]="重启服务" + ["cmd_stop"]="停止服务" + + # Upgrade + ["upgrading"]="正在升级 Sub2API..." + ["current_version"]="当前版本" + ["stopping_service"]="正在停止服务..." + ["backup_created"]="备份已创建" + ["starting_service"]="正在启动服务..." + ["upgrade_complete"]="升级完成!" + + # Version install + ["installing_version"]="正在安装指定版本" + ["version_not_found"]="指定版本不存在" + ["same_version"]="已经是该版本,无需操作" + ["rollback_complete"]="版本回退完成!" + ["install_version_complete"]="指定版本安装完成!" + ["validating_version"]="正在验证版本..." + ["available_versions"]="可用版本列表" + ["fetching_versions"]="正在获取可用版本..." + ["not_installed"]="Sub2API 尚未安装,请先执行全新安装" + ["fresh_install_hint"]="用法" + + # Uninstall + ["uninstall_confirm"]="这将从系统中移除 Sub2API。" + ["are_you_sure"]="确定要继续吗?(y/N)" + ["uninstall_cancelled"]="卸载已取消" + ["removing_files"]="正在移除文件..." + ["removing_install_dir"]="正在移除安装目录..." + ["removing_user"]="正在移除用户..." + ["config_not_removed"]="配置目录未被移除" + ["remove_manually"]="如不再需要,请手动删除" + ["removing_install_lock"]="正在移除安装锁文件..." + ["install_lock_removed"]="安装锁文件已移除,重新安装时将进入设置向导" + ["purge_prompt"]="是否同时删除配置目录?这将清除所有配置和数据 [y/N]: " + ["removing_config_dir"]="正在移除配置目录..." + ["uninstall_complete"]="Sub2API 已卸载" + + # Help + ["usage"]="用法" + ["cmd_none"]="(无参数)" + ["cmd_install"]="安装 Sub2API" + ["cmd_upgrade"]="升级到最新版本" + ["cmd_uninstall"]="卸载 Sub2API" + ["cmd_install_version"]="安装/回退到指定版本" + ["cmd_list_versions"]="列出可用版本" + ["opt_version"]="指定要安装的版本号 (例如: v1.0.0)" + + # Server configuration + ["server_config_title"]="服务器配置" + ["server_config_desc"]="配置 Sub2API 服务监听地址" + ["server_host_prompt"]="服务器监听地址" + ["server_host_hint"]="0.0.0.0 表示监听所有网卡,127.0.0.1 仅本地访问" + ["server_port_prompt"]="服务器端口" + ["server_port_hint"]="建议使用 1024-65535 之间的端口" + ["server_config_summary"]="服务器配置" + ["invalid_port"]="无效端口号,请输入 1-65535 之间的数字" + + # Service management + ["starting_service"]="正在启动服务..." + ["service_started"]="服务已启动" + ["service_start_failed"]="服务启动失败,请检查日志" + ["enabling_autostart"]="正在设置开机自启..." + ["autostart_enabled"]="开机自启已启用" + ["getting_public_ip"]="正在获取公网 IP..." + ["public_ip_failed"]="无法获取公网 IP,使用本地 IP" +) + +# English strings +declare -A MSG_EN=( + # General + ["info"]="INFO" + ["success"]="SUCCESS" + ["warning"]="WARNING" + ["error"]="ERROR" + + # Language selection + ["select_lang"]="请选择语言 / Select language" + ["lang_zh"]="中文" + ["lang_en"]="English" + ["enter_choice"]="Enter your choice (default: 1)" + + # Installation + ["install_title"]="Sub2API Installation Script" + ["run_as_root"]="Please run as root (use sudo)" + ["detected_platform"]="Detected platform" + ["unsupported_arch"]="Unsupported architecture" + ["unsupported_os"]="Unsupported OS" + ["missing_deps"]="Missing dependencies" + ["install_deps_first"]="Please install them first" + ["fetching_version"]="Fetching latest version..." + ["latest_version"]="Latest version" + ["failed_get_version"]="Failed to get latest version" + ["downloading"]="Downloading" + ["download_failed"]="Download failed" + ["verifying_checksum"]="Verifying checksum..." + ["checksum_verified"]="Checksum verified" + ["checksum_failed"]="Checksum verification failed" + ["checksum_not_found"]="Could not verify checksum (checksums.txt not found)" + ["extracting"]="Extracting..." + ["binary_installed"]="Binary installed to" + ["user_exists"]="User already exists" + ["creating_user"]="Creating system user" + ["user_created"]="User created" + ["setting_up_dirs"]="Setting up directories..." + ["dirs_configured"]="Directories configured" + ["installing_service"]="Installing systemd service..." + ["service_installed"]="Systemd service installed" + ["ready_for_setup"]="Ready for Setup Wizard" + + # Completion + ["install_complete"]="Sub2API installation completed!" + ["install_dir"]="Installation directory" + ["next_steps"]="NEXT STEPS" + ["step1_check_services"]="Make sure PostgreSQL and Redis are running:" + ["step2_start_service"]="Start Sub2API service:" + ["step3_enable_autostart"]="Enable auto-start on boot:" + ["step4_open_wizard"]="Open the Setup Wizard in your browser:" + ["wizard_guide"]="The Setup Wizard will guide you through:" + ["wizard_db"]="Database configuration" + ["wizard_redis"]="Redis configuration" + ["wizard_admin"]="Admin account creation" + ["useful_commands"]="USEFUL COMMANDS" + ["cmd_status"]="Check status" + ["cmd_logs"]="View logs" + ["cmd_restart"]="Restart" + ["cmd_stop"]="Stop" + + # Upgrade + ["upgrading"]="Upgrading Sub2API..." + ["current_version"]="Current version" + ["stopping_service"]="Stopping service..." + ["backup_created"]="Backup created" + ["starting_service"]="Starting service..." + ["upgrade_complete"]="Upgrade completed!" + + # Version install + ["installing_version"]="Installing specified version" + ["version_not_found"]="Specified version not found" + ["same_version"]="Already at this version, no action needed" + ["rollback_complete"]="Version rollback completed!" + ["install_version_complete"]="Specified version installed!" + ["validating_version"]="Validating version..." + ["available_versions"]="Available versions" + ["fetching_versions"]="Fetching available versions..." + ["not_installed"]="Sub2API is not installed. Please run a fresh install first" + ["fresh_install_hint"]="Usage" + + # Uninstall + ["uninstall_confirm"]="This will remove Sub2API from your system." + ["are_you_sure"]="Are you sure? (y/N)" + ["uninstall_cancelled"]="Uninstall cancelled" + ["removing_files"]="Removing files..." + ["removing_install_dir"]="Removing installation directory..." + ["removing_user"]="Removing user..." + ["config_not_removed"]="Config directory was NOT removed." + ["remove_manually"]="Remove it manually if you no longer need it." + ["removing_install_lock"]="Removing install lock file..." + ["install_lock_removed"]="Install lock removed. Setup wizard will appear on next install." + ["purge_prompt"]="Also remove config directory? This will delete all config and data [y/N]: " + ["removing_config_dir"]="Removing config directory..." + ["uninstall_complete"]="Sub2API has been uninstalled" + + # Help + ["usage"]="Usage" + ["cmd_none"]="(none)" + ["cmd_install"]="Install Sub2API" + ["cmd_upgrade"]="Upgrade to the latest version" + ["cmd_uninstall"]="Remove Sub2API" + ["cmd_install_version"]="Install/rollback to a specific version" + ["cmd_list_versions"]="List available versions" + ["opt_version"]="Specify version to install (e.g., v1.0.0)" + + # Server configuration + ["server_config_title"]="Server Configuration" + ["server_config_desc"]="Configure Sub2API server listen address" + ["server_host_prompt"]="Server listen address" + ["server_host_hint"]="0.0.0.0 listens on all interfaces, 127.0.0.1 for local only" + ["server_port_prompt"]="Server port" + ["server_port_hint"]="Recommended range: 1024-65535" + ["server_config_summary"]="Server configuration" + ["invalid_port"]="Invalid port number, please enter a number between 1-65535" + + # Service management + ["starting_service"]="Starting service..." + ["service_started"]="Service started" + ["service_start_failed"]="Service failed to start, please check logs" + ["enabling_autostart"]="Enabling auto-start on boot..." + ["autostart_enabled"]="Auto-start enabled" + ["getting_public_ip"]="Getting public IP..." + ["public_ip_failed"]="Failed to get public IP, using local IP" +) + +# Get message based on current language +msg() { + local key="$1" + if [ "$LANG_CHOICE" = "en" ]; then + echo "${MSG_EN[$key]}" + else + echo "${MSG_ZH[$key]}" + fi +} + +# Print functions +print_info() { + echo -e "${BLUE}[$(msg 'info')]${NC} $1" +} + +print_success() { + echo -e "${GREEN}[$(msg 'success')]${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}[$(msg 'warning')]${NC} $1" +} + +print_error() { + echo -e "${RED}[$(msg 'error')]${NC} $1" +} + +# Check if running interactively (can access terminal) +# When piped (curl | bash), stdin is not a terminal, but /dev/tty may still be available +is_interactive() { + # Check if /dev/tty is available (works even when piped) + [ -e /dev/tty ] && [ -r /dev/tty ] && [ -w /dev/tty ] +} + +# Select language +select_language() { + # If not interactive (piped), use default language + if ! is_interactive; then + LANG_CHOICE="zh" + return + fi + + echo "" + echo -e "${CYAN}==============================================" + echo " $(msg 'select_lang')" + echo "==============================================${NC}" + echo "" + echo " 1) $(msg 'lang_zh') (默认/default)" + echo " 2) $(msg 'lang_en')" + echo "" + + read -p "$(msg 'enter_choice'): " lang_input < /dev/tty + + case "$lang_input" in + 2|en|EN|english|English) + LANG_CHOICE="en" + ;; + *) + LANG_CHOICE="zh" + ;; + esac + + echo "" +} + +# Validate port number +validate_port() { + local port="$1" + if [[ "$port" =~ ^[0-9]+$ ]] && [ "$port" -ge 1 ] && [ "$port" -le 65535 ]; then + return 0 + fi + return 1 +} + +# Configure server settings +configure_server() { + # If not interactive (piped), use default settings + if ! is_interactive; then + print_info "$(msg 'server_config_summary'): ${SERVER_HOST}:${SERVER_PORT} (default)" + return + fi + + echo "" + echo -e "${CYAN}==============================================" + echo " $(msg 'server_config_title')" + echo "==============================================${NC}" + echo "" + echo -e "${BLUE}$(msg 'server_config_desc')${NC}" + echo "" + + # Server host + echo -e "${YELLOW}$(msg 'server_host_hint')${NC}" + read -p "$(msg 'server_host_prompt') [${SERVER_HOST}]: " input_host < /dev/tty + if [ -n "$input_host" ]; then + SERVER_HOST="$input_host" + fi + + echo "" + + # Server port + echo -e "${YELLOW}$(msg 'server_port_hint')${NC}" + while true; do + read -p "$(msg 'server_port_prompt') [${SERVER_PORT}]: " input_port < /dev/tty + if [ -z "$input_port" ]; then + # Use default + break + elif validate_port "$input_port"; then + SERVER_PORT="$input_port" + break + else + print_error "$(msg 'invalid_port')" + fi + done + + echo "" + print_info "$(msg 'server_config_summary'): ${SERVER_HOST}:${SERVER_PORT}" + echo "" +} + +# Check if running as root +check_root() { + # Use 'id -u' instead of $EUID for better compatibility + # $EUID may not work reliably when script is piped to bash + if [ "$(id -u)" -ne 0 ]; then + print_error "$(msg 'run_as_root')" + exit 1 + fi +} + +# Detect OS and architecture +detect_platform() { + OS=$(uname -s | tr '[:upper:]' '[:lower:]') + ARCH=$(uname -m) + + case "$ARCH" in + x86_64) + ARCH="amd64" + ;; + aarch64|arm64) + ARCH="arm64" + ;; + *) + print_error "$(msg 'unsupported_arch'): $ARCH" + exit 1 + ;; + esac + + case "$OS" in + linux) + OS="linux" + ;; + darwin) + OS="darwin" + ;; + *) + print_error "$(msg 'unsupported_os'): $OS" + exit 1 + ;; + esac + + print_info "$(msg 'detected_platform'): ${OS}_${ARCH}" +} + +# Check dependencies +check_dependencies() { + local missing=() + + if ! command -v curl &> /dev/null; then + missing+=("curl") + fi + + if ! command -v tar &> /dev/null; then + missing+=("tar") + fi + + if [ ${#missing[@]} -gt 0 ]; then + print_error "$(msg 'missing_deps'): ${missing[*]}" + print_info "$(msg 'install_deps_first')" + exit 1 + fi +} + +# Get latest release version +get_latest_version() { + print_info "$(msg 'fetching_version')" + LATEST_VERSION=$(curl -s --connect-timeout 10 --max-time 30 "https://api.github.com/repos/${GITHUB_REPO}/releases/latest" 2>/dev/null | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/') + + if [ -z "$LATEST_VERSION" ]; then + print_error "$(msg 'failed_get_version')" + print_info "Please check your network connection or try again later." + exit 1 + fi + + print_info "$(msg 'latest_version'): $LATEST_VERSION" +} + +# List available versions +list_versions() { + print_info "$(msg 'fetching_versions')" + + local versions + versions=$(curl -s --connect-timeout 10 --max-time 30 "https://api.github.com/repos/${GITHUB_REPO}/releases" 2>/dev/null | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/' | head -20) + + if [ -z "$versions" ]; then + print_error "$(msg 'failed_get_version')" + print_info "Please check your network connection or try again later." + exit 1 + fi + + echo "" + echo "$(msg 'available_versions'):" + echo "----------------------------------------" + echo "$versions" | while read -r version; do + echo " $version" + done + echo "----------------------------------------" + echo "" +} + +# Validate if a version exists +validate_version() { + local version="$1" + + # Check for empty version + if [ -z "$version" ]; then + print_error "$(msg 'opt_version')" >&2 + exit 1 + fi + + # Ensure version starts with 'v' + if [[ ! "$version" =~ ^v ]]; then + version="v$version" + fi + + print_info "$(msg 'validating_version') $version" >&2 + + # Check if the release exists + local http_code + http_code=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 10 --max-time 30 "https://api.github.com/repos/${GITHUB_REPO}/releases/tags/${version}" 2>/dev/null) + + # Check for network errors (empty or non-numeric response) + if [ -z "$http_code" ] || ! [[ "$http_code" =~ ^[0-9]+$ ]]; then + print_error "Network error: Failed to connect to GitHub API" >&2 + exit 1 + fi + + if [ "$http_code" != "200" ]; then + print_error "$(msg 'version_not_found'): $version" >&2 + echo "" >&2 + list_versions >&2 + exit 1 + fi + + # Return the normalized version (to stdout) + echo "$version" +} + +# Get current installed version +get_current_version() { + if [ -f "$INSTALL_DIR/sub2api" ]; then + # Use grep -E for better compatibility (works on macOS and Linux) + "$INSTALL_DIR/sub2api" --version 2>/dev/null | grep -oE 'v?[0-9]+\.[0-9]+\.[0-9]+' | head -1 || echo "unknown" + else + echo "not_installed" + fi +} + +# Download and extract +download_and_extract() { + local version_num=${LATEST_VERSION#v} + local archive_name="sub2api_${version_num}_${OS}_${ARCH}.tar.gz" + local download_url="https://github.com/${GITHUB_REPO}/releases/download/${LATEST_VERSION}/${archive_name}" + local checksum_url="https://github.com/${GITHUB_REPO}/releases/download/${LATEST_VERSION}/checksums.txt" + + print_info "$(msg 'downloading') ${archive_name}..." + + # Create temp directory + TEMP_DIR=$(mktemp -d) + trap "rm -rf $TEMP_DIR" EXIT + + # Download archive + if ! curl -sL "$download_url" -o "$TEMP_DIR/$archive_name"; then + print_error "$(msg 'download_failed')" + exit 1 + fi + + # Download and verify checksum + print_info "$(msg 'verifying_checksum')" + if curl -sL "$checksum_url" -o "$TEMP_DIR/checksums.txt" 2>/dev/null; then + local expected_checksum=$(grep "$archive_name" "$TEMP_DIR/checksums.txt" | awk '{print $1}') + local actual_checksum=$(sha256sum "$TEMP_DIR/$archive_name" | awk '{print $1}') + + if [ "$expected_checksum" != "$actual_checksum" ]; then + print_error "$(msg 'checksum_failed')" + print_error "Expected: $expected_checksum" + print_error "Actual: $actual_checksum" + exit 1 + fi + print_success "$(msg 'checksum_verified')" + else + print_warning "$(msg 'checksum_not_found')" + fi + + # Extract + print_info "$(msg 'extracting')" + tar -xzf "$TEMP_DIR/$archive_name" -C "$TEMP_DIR" + + # Create install directory + mkdir -p "$INSTALL_DIR" + + # Copy binary + cp "$TEMP_DIR/sub2api" "$INSTALL_DIR/sub2api" + chmod +x "$INSTALL_DIR/sub2api" + + # Copy deploy files if they exist in the archive + if [ -d "$TEMP_DIR/deploy" ]; then + cp -r "$TEMP_DIR/deploy/"* "$INSTALL_DIR/" 2>/dev/null || true + fi + + print_success "$(msg 'binary_installed') $INSTALL_DIR/sub2api" +} + +# Create system user +create_user() { + if id "$SERVICE_USER" &>/dev/null; then + print_info "$(msg 'user_exists'): $SERVICE_USER" + # Fix: Ensure existing user has /bin/sh shell for sudo to work + # Previous versions used /bin/false which prevents sudo execution + local current_shell + current_shell=$(getent passwd "$SERVICE_USER" 2>/dev/null | cut -d: -f7) + if [ "$current_shell" = "/bin/false" ] || [ "$current_shell" = "/sbin/nologin" ]; then + print_info "Fixing user shell for sudo compatibility..." + if usermod -s /bin/sh "$SERVICE_USER" 2>/dev/null; then + print_success "User shell updated to /bin/sh" + else + print_warning "Failed to update user shell. Service restart may not work automatically." + print_warning "Manual fix: sudo usermod -s /bin/sh $SERVICE_USER" + fi + fi + else + print_info "$(msg 'creating_user') $SERVICE_USER..." + # Use /bin/sh instead of /bin/false to allow sudo execution + # The user still cannot login interactively (no password set) + useradd -r -s /bin/sh -d "$INSTALL_DIR" "$SERVICE_USER" + print_success "$(msg 'user_created')" + fi +} + +# Setup directories and permissions +setup_directories() { + print_info "$(msg 'setting_up_dirs')" + + # Create directories + mkdir -p "$INSTALL_DIR" + mkdir -p "$INSTALL_DIR/data" + mkdir -p "$CONFIG_DIR" + + # Set ownership + chown -R "$SERVICE_USER:$SERVICE_USER" "$INSTALL_DIR" + chown -R "$SERVICE_USER:$SERVICE_USER" "$CONFIG_DIR" + + print_success "$(msg 'dirs_configured')" +} + +# Install systemd service +install_service() { + print_info "$(msg 'installing_service')" + + # Create service file with configured host and port + cat > /etc/systemd/system/sub2api.service << EOF +[Unit] +Description=Sub2API - AI API Gateway Platform +Documentation=https://github.com/Wei-Shaw/sub2api +After=network.target postgresql.service redis.service +Wants=postgresql.service redis.service + +[Service] +Type=simple +User=sub2api +Group=sub2api +WorkingDirectory=/opt/sub2api +ExecStart=/opt/sub2api/sub2api +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=sub2api + +# Security hardening +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +PrivateTmp=true +ReadWritePaths=/opt/sub2api + +# Environment - Server configuration +Environment=GIN_MODE=release +Environment=SERVER_HOST=${SERVER_HOST} +Environment=SERVER_PORT=${SERVER_PORT} + +[Install] +WantedBy=multi-user.target +EOF + + # Reload systemd + systemctl daemon-reload + + print_success "$(msg 'service_installed')" +} + +# Prepare for setup wizard (no config file needed - setup wizard will create it) +prepare_for_setup() { + print_success "$(msg 'ready_for_setup')" +} + +# Get public IP address +get_public_ip() { + print_info "$(msg 'getting_public_ip')" + + # Try to get public IP from ipinfo.io + local response + response=$(curl -s --connect-timeout 5 --max-time 10 "https://ipinfo.io/json" 2>/dev/null) + + if [ -n "$response" ]; then + # Extract IP from JSON response using grep and sed (no jq dependency) + PUBLIC_IP=$(echo "$response" | grep -o '"ip": *"[^"]*"' | sed 's/"ip": *"\([^"]*\)"/\1/') + if [ -n "$PUBLIC_IP" ]; then + print_success "Public IP: $PUBLIC_IP" + return 0 + fi + fi + + # Fallback to local IP + print_warning "$(msg 'public_ip_failed')" + PUBLIC_IP=$(hostname -I 2>/dev/null | awk '{print $1}' || echo "YOUR_SERVER_IP") + return 1 +} + +# Start service +start_service() { + print_info "$(msg 'starting_service')" + + if systemctl start sub2api; then + print_success "$(msg 'service_started')" + return 0 + else + print_error "$(msg 'service_start_failed')" + print_info "sudo journalctl -u sub2api -n 50" + return 1 + fi +} + +# Enable service auto-start +enable_autostart() { + print_info "$(msg 'enabling_autostart')" + + if systemctl enable sub2api 2>/dev/null; then + print_success "$(msg 'autostart_enabled')" + return 0 + else + print_warning "Failed to enable auto-start" + return 1 + fi +} + +# Print completion message +print_completion() { + # Use PUBLIC_IP which was set by get_public_ip() + # Determine display address + local display_host="${PUBLIC_IP:-YOUR_SERVER_IP}" + if [ "$SERVER_HOST" = "127.0.0.1" ]; then + display_host="127.0.0.1" + fi + + echo "" + echo "==============================================" + print_success "$(msg 'install_complete')" + echo "==============================================" + echo "" + echo "$(msg 'install_dir'): $INSTALL_DIR" + echo "$(msg 'server_config_summary'): ${SERVER_HOST}:${SERVER_PORT}" + echo "" + echo "==============================================" + echo " $(msg 'step4_open_wizard')" + echo "==============================================" + echo "" + print_info " http://${display_host}:${SERVER_PORT}" + echo "" + echo " $(msg 'wizard_guide')" + echo " - $(msg 'wizard_db')" + echo " - $(msg 'wizard_redis')" + echo " - $(msg 'wizard_admin')" + echo "" + echo "==============================================" + echo " $(msg 'useful_commands')" + echo "==============================================" + echo "" + echo " $(msg 'cmd_status'): sudo systemctl status sub2api" + echo " $(msg 'cmd_logs'): sudo journalctl -u sub2api -f" + echo " $(msg 'cmd_restart'): sudo systemctl restart sub2api" + echo " $(msg 'cmd_stop'): sudo systemctl stop sub2api" + echo "" + echo "==============================================" +} + +# Upgrade function +upgrade() { + # Check if Sub2API is installed + if [ ! -f "$INSTALL_DIR/sub2api" ]; then + print_error "$(msg 'not_installed')" + print_info "$(msg 'fresh_install_hint'): $0 install" + exit 1 + fi + + print_info "$(msg 'upgrading')" + + # Get current version + CURRENT_VERSION=$("$INSTALL_DIR/sub2api" --version 2>/dev/null | grep -oE 'v?[0-9]+\.[0-9]+\.[0-9]+' || echo "unknown") + print_info "$(msg 'current_version'): $CURRENT_VERSION" + + # Stop service + if systemctl is-active --quiet sub2api; then + print_info "$(msg 'stopping_service')" + systemctl stop sub2api + fi + + # Backup current binary + cp "$INSTALL_DIR/sub2api" "$INSTALL_DIR/sub2api.backup" + print_info "$(msg 'backup_created'): $INSTALL_DIR/sub2api.backup" + + # Download and install new version + get_latest_version + download_and_extract + + # Set permissions + chown "$SERVICE_USER:$SERVICE_USER" "$INSTALL_DIR/sub2api" + + # Start service + print_info "$(msg 'starting_service')" + systemctl start sub2api + + print_success "$(msg 'upgrade_complete')" +} + +# Install specific version (for upgrade or rollback) +# Requires: Sub2API must already be installed +install_version() { + local target_version="$1" + + # Check if Sub2API is installed + if [ ! -f "$INSTALL_DIR/sub2api" ]; then + print_error "$(msg 'not_installed')" + print_info "$(msg 'fresh_install_hint'): $0 install -v $target_version" + exit 1 + fi + + # Validate and normalize version + target_version=$(validate_version "$target_version") + + print_info "$(msg 'installing_version'): $target_version" + + # Get current version + local current_version + current_version=$(get_current_version) + print_info "$(msg 'current_version'): $current_version" + + # Check if same version + if [ "$current_version" = "$target_version" ] || [ "$current_version" = "${target_version#v}" ]; then + print_warning "$(msg 'same_version')" + exit 0 + fi + + # Stop service if running + if systemctl is-active --quiet sub2api; then + print_info "$(msg 'stopping_service')" + systemctl stop sub2api + fi + + # Backup current binary (for potential recovery) + if [ -f "$INSTALL_DIR/sub2api" ]; then + local backup_name + if [ "$current_version" != "unknown" ] && [ "$current_version" != "not_installed" ]; then + backup_name="sub2api.backup.${current_version}" + else + backup_name="sub2api.backup.$(date +%Y%m%d%H%M%S)" + fi + cp "$INSTALL_DIR/sub2api" "$INSTALL_DIR/$backup_name" + print_info "$(msg 'backup_created'): $INSTALL_DIR/$backup_name" + fi + + # Set LATEST_VERSION to the target version for download_and_extract + LATEST_VERSION="$target_version" + + # Download and install + download_and_extract + + # Set permissions + chown "$SERVICE_USER:$SERVICE_USER" "$INSTALL_DIR/sub2api" + + # Start service + print_info "$(msg 'starting_service')" + if systemctl start sub2api; then + print_success "$(msg 'service_started')" + else + print_error "$(msg 'service_start_failed')" + print_info "sudo journalctl -u sub2api -n 50" + fi + + # Print completion message + local new_version + new_version=$(get_current_version) + echo "" + echo "==============================================" + print_success "$(msg 'install_version_complete')" + echo "==============================================" + echo "" + echo " $(msg 'current_version'): $new_version" + echo "" +} + +# Uninstall function +uninstall() { + print_warning "$(msg 'uninstall_confirm')" + + # If not interactive (piped), require -y flag or skip confirmation + if ! is_interactive; then + if [ "${FORCE_YES:-}" != "true" ]; then + print_error "Non-interactive mode detected. Use 'curl ... | bash -s -- uninstall -y' to confirm." + exit 1 + fi + else + read -p "$(msg 'are_you_sure') " -n 1 -r < /dev/tty + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "$(msg 'uninstall_cancelled')" + exit 0 + fi + fi + + print_info "$(msg 'stopping_service')" + systemctl stop sub2api 2>/dev/null || true + systemctl disable sub2api 2>/dev/null || true + + print_info "$(msg 'removing_files')" + rm -f /etc/systemd/system/sub2api.service + systemctl daemon-reload + + print_info "$(msg 'removing_install_dir')" + rm -rf "$INSTALL_DIR" + + print_info "$(msg 'removing_user')" + userdel "$SERVICE_USER" 2>/dev/null || true + + # Remove install lock file (.installed) to allow fresh setup on reinstall + print_info "$(msg 'removing_install_lock')" + rm -f "$CONFIG_DIR/.installed" 2>/dev/null || true + rm -f "$INSTALL_DIR/.installed" 2>/dev/null || true + print_success "$(msg 'install_lock_removed')" + + # Ask about config directory removal (interactive mode only) + local remove_config=false + if [ "${PURGE:-}" = "true" ]; then + remove_config=true + elif is_interactive; then + read -p "$(msg 'purge_prompt')" -n 1 -r < /dev/tty + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + remove_config=true + fi + fi + + if [ "$remove_config" = true ]; then + print_info "$(msg 'removing_config_dir')" + rm -rf "$CONFIG_DIR" + else + print_warning "$(msg 'config_not_removed'): $CONFIG_DIR" + print_warning "$(msg 'remove_manually')" + fi + + print_success "$(msg 'uninstall_complete')" +} + +# Main +main() { + # Parse flags first + local target_version="" + local positional_args=() + + while [[ $# -gt 0 ]]; do + case "$1" in + -y|--yes) + FORCE_YES="true" + shift + ;; + --purge) + PURGE="true" + shift + ;; + -v|--version) + if [ -n "${2:-}" ] && [[ ! "$2" =~ ^- ]]; then + target_version="$2" + shift 2 + else + echo "Error: --version requires a version argument" + exit 1 + fi + ;; + --version=*) + target_version="${1#*=}" + if [ -z "$target_version" ]; then + echo "Error: --version requires a version argument" + exit 1 + fi + shift + ;; + *) + positional_args+=("$1") + shift + ;; + esac + done + + # Restore positional arguments + set -- "${positional_args[@]}" + + # Select language first + select_language + + echo "" + echo "==============================================" + echo " $(msg 'install_title')" + echo "==============================================" + echo "" + + # Parse commands + case "${1:-}" in + upgrade|update) + check_root + detect_platform + check_dependencies + if [ -n "$target_version" ]; then + # Upgrade to specific version + install_version "$target_version" + else + # Upgrade to latest + upgrade + fi + exit 0 + ;; + install) + # Install with optional version + check_root + detect_platform + check_dependencies + if [ -n "$target_version" ]; then + # Install specific version (fresh install or rollback) + if [ -f "$INSTALL_DIR/sub2api" ]; then + # Already installed, treat as version change + install_version "$target_version" + else + # Fresh install with specific version + configure_server + LATEST_VERSION=$(validate_version "$target_version") + download_and_extract + create_user + setup_directories + install_service + prepare_for_setup + get_public_ip + start_service + enable_autostart + print_completion + fi + else + # Fresh install with latest version + configure_server + get_latest_version + download_and_extract + create_user + setup_directories + install_service + prepare_for_setup + get_public_ip + start_service + enable_autostart + print_completion + fi + exit 0 + ;; + rollback) + # Rollback to a specific version (alias for install with version) + if [ -z "$target_version" ] && [ -n "${2:-}" ]; then + target_version="$2" + fi + if [ -z "$target_version" ]; then + print_error "$(msg 'opt_version')" + echo "" + echo "Usage: $0 rollback -v " + echo " $0 rollback " + echo "" + list_versions + exit 1 + fi + check_root + detect_platform + check_dependencies + install_version "$target_version" + exit 0 + ;; + list-versions|versions) + list_versions + exit 0 + ;; + uninstall|remove) + check_root + uninstall + exit 0 + ;; + --help|-h) + echo "$(msg 'usage'): $0 [command] [options]" + echo "" + echo "Commands:" + echo " $(msg 'cmd_none') $(msg 'cmd_install')" + echo " install $(msg 'cmd_install')" + echo " upgrade $(msg 'cmd_upgrade')" + echo " rollback $(msg 'cmd_install_version')" + echo " list-versions $(msg 'cmd_list_versions')" + echo " uninstall $(msg 'cmd_uninstall')" + echo "" + echo "Options:" + echo " -v, --version $(msg 'opt_version')" + echo " -y, --yes Skip confirmation prompts (for uninstall)" + echo "" + echo "Examples:" + echo " $0 # Install latest version" + echo " $0 install -v v0.1.0 # Install specific version" + echo " $0 upgrade # Upgrade to latest" + echo " $0 upgrade -v v0.2.0 # Upgrade to specific version" + echo " $0 rollback v0.1.0 # Rollback to v0.1.0" + echo " $0 list-versions # List available versions" + echo "" + exit 0 + ;; + esac + + # Default: Fresh install with latest version + check_root + detect_platform + check_dependencies + + if [ -n "$target_version" ]; then + # Install specific version + if [ -f "$INSTALL_DIR/sub2api" ]; then + install_version "$target_version" + else + configure_server + LATEST_VERSION=$(validate_version "$target_version") + download_and_extract + create_user + setup_directories + install_service + prepare_for_setup + get_public_ip + start_service + enable_autostart + print_completion + fi + else + # Install latest version + configure_server + get_latest_version + download_and_extract + create_user + setup_directories + install_service + prepare_for_setup + get_public_ip + start_service + enable_autostart + print_completion + fi +} + +main "$@" diff --git a/deploy/sub2api.service b/deploy/sub2api.service new file mode 100644 index 00000000..1a59ad03 --- /dev/null +++ b/deploy/sub2api.service @@ -0,0 +1,33 @@ +[Unit] +Description=Sub2API - AI API Gateway Platform +Documentation=https://github.com/Wei-Shaw/sub2api +After=network.target postgresql.service redis.service +Wants=postgresql.service redis.service + +[Service] +Type=simple +User=sub2api +Group=sub2api +WorkingDirectory=/opt/sub2api +ExecStart=/opt/sub2api/sub2api +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal +SyslogIdentifier=sub2api + +# Security hardening +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +PrivateTmp=true +ReadWritePaths=/opt/sub2api + +# Environment - Server configuration +# Modify these values to change listen address and port +Environment=GIN_MODE=release +Environment=SERVER_HOST=0.0.0.0 +Environment=SERVER_PORT=8080 + +[Install] +WantedBy=multi-user.target diff --git a/frontend/.eslintignore b/frontend/.eslintignore new file mode 100644 index 00000000..d8682246 --- /dev/null +++ b/frontend/.eslintignore @@ -0,0 +1,14 @@ +# 忽略编译后的文件 +vite.config.js +vite.config.d.ts + +# 忽略依赖 +node_modules/ + +# 忽略构建输出 +dist/ +../backend/internal/web/dist/ + +# 忽略缓存 +.cache/ +.vite/ diff --git a/frontend/.eslintrc.cjs b/frontend/.eslintrc.cjs new file mode 100644 index 00000000..e5dba6d4 --- /dev/null +++ b/frontend/.eslintrc.cjs @@ -0,0 +1,36 @@ +module.exports = { + root: true, + env: { + browser: true, + es2021: true, + node: true, + }, + parser: "vue-eslint-parser", + parserOptions: { + parser: "@typescript-eslint/parser", + ecmaVersion: "latest", + sourceType: "module", + extraFileExtensions: [".vue"], + }, + plugins: ["vue", "@typescript-eslint"], + extends: [ + "eslint:recommended", + "plugin:vue/vue3-essential", + "plugin:@typescript-eslint/recommended", + ], + rules: { + "no-constant-condition": "off", + "no-mixed-spaces-and-tabs": "off", + "no-useless-escape": "off", + "no-unused-vars": "off", + "@typescript-eslint/no-unused-vars": [ + "warn", + { argsIgnorePattern: "^_", varsIgnorePattern: "^_" }, + ], + "@typescript-eslint/ban-types": "off", + "@typescript-eslint/ban-ts-comment": "off", + "@typescript-eslint/no-explicit-any": "off", + "vue/multi-word-component-names": "off", + "vue/no-use-v-if-with-v-for": "off", + }, +}; diff --git a/frontend/.npmrc b/frontend/.npmrc new file mode 100644 index 00000000..fe6bf117 --- /dev/null +++ b/frontend/.npmrc @@ -0,0 +1,4 @@ +legacy-peer-deps=true +# 允许运行所有包的构建脚本 +# esbuild 和 vue-demi 是已知安全的包,需要 postinstall 脚本才能正常工作 +ignore-scripts=false diff --git a/frontend/audit.json b/frontend/audit.json new file mode 100644 index 00000000..18831c33 --- /dev/null +++ b/frontend/audit.json @@ -0,0 +1,118 @@ +{ + "actions": [ + { + "action": "review", + "module": "xlsx", + "resolves": [ + { + "id": 1108110, + "path": ".>xlsx", + "dev": false, + "bundled": false, + "optional": false + }, + { + "id": 1108111, + "path": ".>xlsx", + "dev": false, + "bundled": false, + "optional": false + } + ] + } + ], + "advisories": { + "1108110": { + "findings": [ + { + "version": "0.18.5", + "paths": [ + ".>xlsx" + ] + } + ], + "found_by": null, + "deleted": null, + "references": "- https://nvd.nist.gov/vuln/detail/CVE-2023-30533\n- https://cdn.sheetjs.com/advisories/CVE-2023-30533\n- https://git.sheetjs.com/sheetjs/sheetjs/src/branch/master/CHANGELOG.md\n- https://git.sheetjs.com/sheetjs/sheetjs/issues/2667\n- https://git.sheetjs.com/sheetjs/sheetjs/issues/2986\n- https://cdn.sheetjs.com\n- https://github.com/advisories/GHSA-4r6h-8v6p-xvw6", + "created": "2023-04-24T09:30:19.000Z", + "id": 1108110, + "npm_advisory_id": null, + "overview": "All versions of SheetJS CE through 0.19.2 are vulnerable to \"Prototype Pollution\" when reading specially crafted files. Workflows that do not read arbitrary files (for example, exporting data to spreadsheet files) are unaffected.\n\nA non-vulnerable version cannot be found via npm, as the repository hosted on GitHub and the npm package `xlsx` are no longer maintained. Version 0.19.3 can be downloaded via https://cdn.sheetjs.com/.", + "reported_by": null, + "title": "Prototype Pollution in sheetJS", + "metadata": null, + "cves": [ + "CVE-2023-30533" + ], + "access": "public", + "severity": "high", + "module_name": "xlsx", + "vulnerable_versions": "<0.19.3", + "github_advisory_id": "GHSA-4r6h-8v6p-xvw6", + "recommendation": "None", + "patched_versions": "<0.0.0", + "updated": "2025-09-19T15:23:41.000Z", + "cvss": { + "score": 7.8, + "vectorString": "CVSS:3.1/AV:L/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:H" + }, + "cwe": [ + "CWE-1321" + ], + "url": "https://github.com/advisories/GHSA-4r6h-8v6p-xvw6" + }, + "1108111": { + "findings": [ + { + "version": "0.18.5", + "paths": [ + ".>xlsx" + ] + } + ], + "found_by": null, + "deleted": null, + "references": "- https://nvd.nist.gov/vuln/detail/CVE-2024-22363\n- https://cdn.sheetjs.com/advisories/CVE-2024-22363\n- https://cwe.mitre.org/data/definitions/1333.html\n- https://git.sheetjs.com/sheetjs/sheetjs/src/tag/v0.20.2\n- https://cdn.sheetjs.com\n- https://github.com/advisories/GHSA-5pgg-2g8v-p4x9", + "created": "2024-04-05T06:30:46.000Z", + "id": 1108111, + "npm_advisory_id": null, + "overview": "SheetJS Community Edition before 0.20.2 is vulnerable.to Regular Expression Denial of Service (ReDoS).\n\nA non-vulnerable version cannot be found via npm, as the repository hosted on GitHub and the npm package `xlsx` are no longer maintained. Version 0.20.2 can be downloaded via https://cdn.sheetjs.com/.", + "reported_by": null, + "title": "SheetJS Regular Expression Denial of Service (ReDoS)", + "metadata": null, + "cves": [ + "CVE-2024-22363" + ], + "access": "public", + "severity": "high", + "module_name": "xlsx", + "vulnerable_versions": "<0.20.2", + "github_advisory_id": "GHSA-5pgg-2g8v-p4x9", + "recommendation": "None", + "patched_versions": "<0.0.0", + "updated": "2025-09-19T15:23:26.000Z", + "cvss": { + "score": 7.5, + "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" + }, + "cwe": [ + "CWE-1333" + ], + "url": "https://github.com/advisories/GHSA-5pgg-2g8v-p4x9" + } + }, + "muted": [], + "metadata": { + "vulnerabilities": { + "info": 0, + "low": 0, + "moderate": 0, + "high": 2, + "critical": 0 + }, + "dependencies": 639, + "devDependencies": 0, + "optionalDependencies": 0, + "totalDependencies": 639 + } +} diff --git a/frontend/index.html b/frontend/index.html new file mode 100644 index 00000000..ce54c687 --- /dev/null +++ b/frontend/index.html @@ -0,0 +1,13 @@ + + + + + + + StarFireAPI - AI API Gateway + + +
+ + + diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 00000000..e7e1288d --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,5304 @@ +{ + "name": "sub2api-frontend", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "sub2api-frontend", + "version": "1.0.0", + "dependencies": { + "@lobehub/icons": "^4.0.2", + "@vueuse/core": "^10.7.0", + "axios": "^1.6.2", + "chart.js": "^4.4.1", + "driver.js": "^1.4.0", + "file-saver": "^2.0.5", + "pinia": "^2.1.7", + "vue": "^3.4.0", + "vue-chartjs": "^5.3.0", + "vue-i18n": "^9.14.5", + "vue-router": "^4.2.5", + "xlsx": "^0.18.5" + }, + "devDependencies": { + "@types/file-saver": "^2.0.7", + "@types/mdx": "^2.0.13", + "@types/node": "^20.10.5", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", + "@vitejs/plugin-vue": "^5.2.3", + "autoprefixer": "^10.4.16", + "eslint": "^8.57.0", + "eslint-plugin-vue": "^9.25.0", + "postcss": "^8.4.32", + "tailwindcss": "^3.4.0", + "typescript": "~5.6.0", + "vite": "^5.0.10", + "vite-plugin-checker": "^0.9.1", + "vue-tsc": "^2.2.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-2.0.2.tgz", + "integrity": "sha512-7KDVIigtqlamOLtJ0hbjECX/sDGDaJXsM/KHala8I/1E4lpl9RAO585kbVvh/k1rIrFAV6JeGkXmdWyYj9XvuA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1", + "csstype": "^3.1.3", + "stylis": "^4.3.4" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@emotion/babel-plugin": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz", + "integrity": "sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.16.7", + "@babel/runtime": "^7.18.3", + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/serialize": "^1.3.3", + "babel-plugin-macros": "^3.1.0", + "convert-source-map": "^1.5.0", + "escape-string-regexp": "^4.0.0", + "find-root": "^1.1.0", + "source-map": "^0.5.7", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/babel-plugin/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/babel-plugin/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/cache": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz", + "integrity": "sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==", + "license": "MIT", + "dependencies": { + "@emotion/memoize": "^0.9.0", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/cache/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/css": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/css/-/css-11.13.5.tgz", + "integrity": "sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==", + "license": "MIT", + "dependencies": { + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.13.5", + "@emotion/serialize": "^1.3.3", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", + "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", + "license": "MIT" + }, + "node_modules/@emotion/memoize": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz", + "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==", + "license": "MIT" + }, + "node_modules/@emotion/react": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz", + "integrity": "sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.14.0", + "@emotion/serialize": "^1.3.3", + "@emotion/use-insertion-effect-with-fallbacks": "^1.2.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "hoist-non-react-statics": "^3.3.1" + }, + "peerDependencies": { + "react": ">=16.8.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@emotion/serialize": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz", + "integrity": "sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==", + "license": "MIT", + "dependencies": { + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/unitless": "^0.10.0", + "@emotion/utils": "^1.4.2", + "csstype": "^3.0.2" + } + }, + "node_modules/@emotion/serialize/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/serialize/node_modules/@emotion/unitless": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz", + "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==", + "license": "MIT" + }, + "node_modules/@emotion/sheet": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz", + "integrity": "sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", + "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", + "license": "MIT" + }, + "node_modules/@emotion/use-insertion-effect-with-fallbacks": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz", + "integrity": "sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==", + "license": "MIT", + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@emotion/utils": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz", + "integrity": "sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==", + "license": "MIT" + }, + "node_modules/@emotion/weak-memoize": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz", + "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@intlify/core-base": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.14.5.tgz", + "integrity": "sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==", + "license": "MIT", + "dependencies": { + "@intlify/message-compiler": "9.14.5", + "@intlify/shared": "9.14.5" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/message-compiler": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.14.5.tgz", + "integrity": "sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==", + "license": "MIT", + "dependencies": { + "@intlify/shared": "9.14.5", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/shared": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.14.5.tgz", + "integrity": "sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@kurkle/color": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz", + "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", + "license": "MIT" + }, + "node_modules/@lobehub/icons": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@lobehub/icons/-/icons-4.0.2.tgz", + "integrity": "sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==", + "license": "MIT", + "workspaces": [ + "packages/*" + ], + "dependencies": { + "antd-style": "^4.1.0", + "lucide-react": "^0.469.0", + "polished": "^4.3.1" + }, + "peerDependencies": { + "@lobehub/ui": "^4.3.3", + "antd": "^6.1.1", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@rc-component/util": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@rc-component/util/-/util-1.7.0.tgz", + "integrity": "sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==", + "license": "MIT", + "dependencies": { + "is-mobile": "^5.0.0", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", + "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", + "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", + "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", + "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", + "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", + "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", + "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", + "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", + "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", + "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", + "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", + "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", + "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", + "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", + "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", + "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", + "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", + "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", + "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", + "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", + "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", + "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", + "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", + "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", + "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/file-saver": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/file-saver/-/file-saver-2.0.7.tgz", + "integrity": "sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.27", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.27.tgz", + "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", + "license": "MIT" + }, + "node_modules/@types/web-bluetooth": { + "version": "0.0.20", + "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", + "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==", + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz", + "integrity": "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/type-utils": "7.18.0", + "@typescript-eslint/utils": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "graphemer": "^1.4.0", + "ignore": "^5.3.1", + "natural-compare": "^1.4.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^7.0.0", + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz", + "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz", + "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz", + "integrity": "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/utils": "7.18.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz", + "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz", + "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.18.0.tgz", + "integrity": "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz", + "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "dev": true, + "license": "ISC" + }, + "node_modules/@vitejs/plugin-vue": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-5.2.4.tgz", + "integrity": "sha512-7Yx/SXSOcQq5HiiV3orevHUFn+pmMB4cgbEkDYgnkUWb0WfeQ/wa2yFv6D5ICiCQOVpjA7vYDXrC7AGO8yjDHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "vite": "^5.0.0 || ^6.0.0", + "vue": "^3.2.25" + } + }, + "node_modules/@volar/language-core": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.15.tgz", + "integrity": "sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/source-map": "2.4.15" + } + }, + "node_modules/@volar/source-map": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-2.4.15.tgz", + "integrity": "sha512-CPbMWlUN6hVZJYGcU/GSoHu4EnCHiLaXI9n8c9la6RaI9W5JHX+NqG+GSQcB0JdC2FIBLdZJwGsfKyBB71VlTg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@volar/typescript": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-2.4.15.tgz", + "integrity": "sha512-2aZ8i0cqPGjXb4BhkMsPYDkkuc2ZQ6yOpqwAuNwUoncELqoy5fRgOQtLR9gB0g902iS0NAkvpIzs27geVyVdPg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.15", + "path-browserify": "^1.0.1", + "vscode-uri": "^3.0.8" + } + }, + "node_modules/@vue/compiler-core": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.26.tgz", + "integrity": "sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@vue/shared": "3.5.26", + "entities": "^7.0.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.26.tgz", + "integrity": "sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.26.tgz", + "integrity": "sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@vue/compiler-core": "3.5.26", + "@vue/compiler-dom": "3.5.26", + "@vue/compiler-ssr": "3.5.26", + "@vue/shared": "3.5.26", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.21", + "postcss": "^8.5.6", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.26.tgz", + "integrity": "sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/compiler-vue2": { + "version": "2.7.16", + "resolved": "https://registry.npmjs.org/@vue/compiler-vue2/-/compiler-vue2-2.7.16.tgz", + "integrity": "sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==", + "dev": true, + "license": "MIT", + "dependencies": { + "de-indent": "^1.0.2", + "he": "^1.2.0" + } + }, + "node_modules/@vue/devtools-api": { + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", + "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", + "license": "MIT" + }, + "node_modules/@vue/language-core": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-2.2.12.tgz", + "integrity": "sha512-IsGljWbKGU1MZpBPN+BvPAdr55YPkj2nB/TBNGNC32Vy2qLG25DYu/NBN2vNtZqdRbTRjaoYrahLrToim2NanA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.15", + "@vue/compiler-dom": "^3.5.0", + "@vue/compiler-vue2": "^2.7.16", + "@vue/shared": "^3.5.0", + "alien-signals": "^1.0.3", + "minimatch": "^9.0.3", + "muggle-string": "^0.4.1", + "path-browserify": "^1.0.1" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@vue/reactivity": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.26.tgz", + "integrity": "sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.26.tgz", + "integrity": "sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.26.tgz", + "integrity": "sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.26", + "@vue/runtime-core": "3.5.26", + "@vue/shared": "3.5.26", + "csstype": "^3.2.3" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.26.tgz", + "integrity": "sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.5.26", + "@vue/shared": "3.5.26" + }, + "peerDependencies": { + "vue": "3.5.26" + } + }, + "node_modules/@vue/shared": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.26.tgz", + "integrity": "sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==", + "license": "MIT" + }, + "node_modules/@vueuse/core": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-10.11.1.tgz", + "integrity": "sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==", + "license": "MIT", + "dependencies": { + "@types/web-bluetooth": "^0.0.20", + "@vueuse/metadata": "10.11.1", + "@vueuse/shared": "10.11.1", + "vue-demi": ">=0.14.8" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@vueuse/metadata": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-10.11.1.tgz", + "integrity": "sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@vueuse/shared": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-10.11.1.tgz", + "integrity": "sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==", + "license": "MIT", + "dependencies": { + "vue-demi": ">=0.14.8" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/adler-32": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/adler-32/-/adler-32-1.3.1.tgz", + "integrity": "sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/alien-signals": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-1.0.13.tgz", + "integrity": "sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/antd-style": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/antd-style/-/antd-style-4.1.0.tgz", + "integrity": "sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^2.0.0", + "@babel/runtime": "^7.24.1", + "@emotion/cache": "^11.11.0", + "@emotion/css": "^11.11.2", + "@emotion/react": "^11.11.4", + "@emotion/serialize": "^1.1.3", + "@emotion/utils": "^1.2.1", + "use-merge-value": "^1.2.0" + }, + "peerDependencies": { + "antd": ">=6.0.0", + "react": ">=18" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.23", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", + "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001760", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-plugin-macros": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", + "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5", + "cosmiconfig": "^7.0.0", + "resolve": "^1.19.0" + }, + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.14", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz", + "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "dev": true, + "license": "ISC" + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001763", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001763.tgz", + "integrity": "sha512-mh/dGtq56uN98LlNX9qdbKnzINhX0QzhiWBFEkFfsFO4QyCvL8YegrJAazCwXIeqkIob8BlZPGM3xdnY+sgmvQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/cfb": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cfb/-/cfb-1.2.2.tgz", + "integrity": "sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==", + "license": "Apache-2.0", + "dependencies": { + "adler-32": "~1.3.0", + "crc-32": "~1.2.0" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chart.js": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz", + "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", + "license": "MIT", + "dependencies": { + "@kurkle/color": "^0.3.0" + }, + "engines": { + "pnpm": ">=8" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/codepage": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/codepage/-/codepage-1.15.0.tgz", + "integrity": "sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "license": "MIT" + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/crc-32": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", + "integrity": "sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==", + "license": "Apache-2.0", + "bin": { + "crc32": "bin/crc32.njs" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/de-indent": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", + "integrity": "sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/driver.js": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/driver.js/-/driver.js-1.4.0.tgz", + "integrity": "sha512-Gm64jm6PmcU+si21sQhBrTAM1JvUrR0QhNmjkprNLxohOBzul9+pNHXgQaT9lW84gwg9GMLB3NZGuGolsz5uew==", + "license": "MIT" + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + "dev": true, + "license": "ISC" + }, + "node_modules/entities": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.0.tgz", + "integrity": "sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-vue": { + "version": "9.33.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.33.0.tgz", + "integrity": "sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "globals": "^13.24.0", + "natural-compare": "^1.4.0", + "nth-check": "^2.1.1", + "postcss-selector-parser": "^6.0.15", + "semver": "^7.6.3", + "vue-eslint-parser": "^9.4.3", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-saver": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", + "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==", + "license": "MIT" + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-root": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", + "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", + "license": "MIT" + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/frac": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/frac/-/frac-1.1.2.tgz", + "integrity": "sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hoist-non-react-statics/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-mobile": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-5.0.0.tgz", + "integrity": "sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==", + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lucide-react": { + "version": "0.469.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.469.0.tgz", + "integrity": "sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/muggle-string": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", + "integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pinia": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.3.1.tgz", + "integrity": "sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.6.3", + "vue-demi": "^0.14.10" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "typescript": ">=4.4.4", + "vue": "^2.7.0 || ^3.5.11" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/polished": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", + "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.17.8" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", + "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.55.1", + "@rollup/rollup-android-arm64": "4.55.1", + "@rollup/rollup-darwin-arm64": "4.55.1", + "@rollup/rollup-darwin-x64": "4.55.1", + "@rollup/rollup-freebsd-arm64": "4.55.1", + "@rollup/rollup-freebsd-x64": "4.55.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", + "@rollup/rollup-linux-arm-musleabihf": "4.55.1", + "@rollup/rollup-linux-arm64-gnu": "4.55.1", + "@rollup/rollup-linux-arm64-musl": "4.55.1", + "@rollup/rollup-linux-loong64-gnu": "4.55.1", + "@rollup/rollup-linux-loong64-musl": "4.55.1", + "@rollup/rollup-linux-ppc64-gnu": "4.55.1", + "@rollup/rollup-linux-ppc64-musl": "4.55.1", + "@rollup/rollup-linux-riscv64-gnu": "4.55.1", + "@rollup/rollup-linux-riscv64-musl": "4.55.1", + "@rollup/rollup-linux-s390x-gnu": "4.55.1", + "@rollup/rollup-linux-x64-gnu": "4.55.1", + "@rollup/rollup-linux-x64-musl": "4.55.1", + "@rollup/rollup-openbsd-x64": "4.55.1", + "@rollup/rollup-openharmony-arm64": "4.55.1", + "@rollup/rollup-win32-arm64-msvc": "4.55.1", + "@rollup/rollup-win32-ia32-msvc": "4.55.1", + "@rollup/rollup-win32-x64-gnu": "4.55.1", + "@rollup/rollup-win32-x64-msvc": "4.55.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ssf": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/ssf/-/ssf-0.11.2.tgz", + "integrity": "sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==", + "license": "Apache-2.0", + "dependencies": { + "frac": "~1.1.2" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", + "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-merge-value": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-merge-value/-/use-merge-value-1.2.0.tgz", + "integrity": "sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==", + "license": "MIT", + "peerDependencies": { + "react": ">= 16.x" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-plugin-checker": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/vite-plugin-checker/-/vite-plugin-checker-0.9.3.tgz", + "integrity": "sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "chokidar": "^4.0.3", + "npm-run-path": "^6.0.0", + "picocolors": "^1.1.1", + "picomatch": "^4.0.2", + "strip-ansi": "^7.1.0", + "tiny-invariant": "^1.3.3", + "tinyglobby": "^0.2.13", + "vscode-uri": "^3.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "peerDependencies": { + "@biomejs/biome": ">=1.7", + "eslint": ">=7", + "meow": "^13.2.0", + "optionator": "^0.9.4", + "stylelint": ">=16", + "typescript": "*", + "vite": ">=2.0.0", + "vls": "*", + "vti": "*", + "vue-tsc": "~2.2.10" + }, + "peerDependenciesMeta": { + "@biomejs/biome": { + "optional": true + }, + "eslint": { + "optional": true + }, + "meow": { + "optional": true + }, + "optionator": { + "optional": true + }, + "stylelint": { + "optional": true + }, + "typescript": { + "optional": true + }, + "vls": { + "optional": true + }, + "vti": { + "optional": true + }, + "vue-tsc": { + "optional": true + } + } + }, + "node_modules/vite-plugin-checker/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/vite-plugin-checker/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/vite-plugin-checker/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/vite-plugin-checker/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/vite-plugin-checker/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/vscode-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", + "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vue": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.26.tgz", + "integrity": "sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.26", + "@vue/compiler-sfc": "3.5.26", + "@vue/runtime-dom": "3.5.26", + "@vue/server-renderer": "3.5.26", + "@vue/shared": "3.5.26" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vue-chartjs": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/vue-chartjs/-/vue-chartjs-5.3.3.tgz", + "integrity": "sha512-jqxtL8KZ6YJ5NTv6XzrzLS7osyegOi28UGNZW0h9OkDL7Sh1396ht4Dorh04aKrl2LiSalQ84WtqiG0RIJb0tA==", + "license": "MIT", + "peerDependencies": { + "chart.js": "^4.1.1", + "vue": "^3.0.0-0 || ^2.7.0" + } + }, + "node_modules/vue-demi": { + "version": "0.14.10", + "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.10.tgz", + "integrity": "sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/vue-eslint-parser": { + "version": "9.4.3", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", + "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "eslint-scope": "^7.1.1", + "eslint-visitor-keys": "^3.3.0", + "espree": "^9.3.1", + "esquery": "^1.4.0", + "lodash": "^4.17.21", + "semver": "^7.3.6" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=6.0.0" + } + }, + "node_modules/vue-i18n": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-9.14.5.tgz", + "integrity": "sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==", + "deprecated": "v9 and v10 no longer supported. please migrate to v11. about maintenance status, see https://vue-i18n.intlify.dev/guide/maintenance.html", + "license": "MIT", + "dependencies": { + "@intlify/core-base": "9.14.5", + "@intlify/shared": "9.14.5", + "@vue/devtools-api": "^6.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, + "node_modules/vue-router": { + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", + "integrity": "sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.6.4" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "vue": "^3.5.0" + } + }, + "node_modules/vue-tsc": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-2.2.12.tgz", + "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/typescript": "2.4.15", + "@vue/language-core": "2.2.12" + }, + "bin": { + "vue-tsc": "bin/vue-tsc.js" + }, + "peerDependencies": { + "typescript": ">=5.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wmf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wmf/-/wmf-1.0.2.tgz", + "integrity": "sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/word": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/word/-/word-0.3.0.tgz", + "integrity": "sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/xlsx": { + "version": "0.18.5", + "resolved": "https://registry.npmjs.org/xlsx/-/xlsx-0.18.5.tgz", + "integrity": "sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==", + "license": "Apache-2.0", + "dependencies": { + "adler-32": "~1.3.0", + "cfb": "~1.2.1", + "codepage": "~1.15.0", + "crc-32": "~1.2.1", + "ssf": "~0.11.2", + "wmf": "~1.0.1", + "word": "~0.3.0" + }, + "bin": { + "xlsx": "bin/xlsx.njs" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12" + } + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json new file mode 100644 index 00000000..2a85f585 --- /dev/null +++ b/frontend/package.json @@ -0,0 +1,45 @@ +{ + "name": "sub2api-frontend", + "private": true, + "version": "1.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "vue-tsc -b && vite build", + "preview": "vite preview", + "lint": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix", + "lint:check": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts", + "typecheck": "vue-tsc --noEmit" + }, + "dependencies": { + "@lobehub/icons": "^4.0.2", + "@vueuse/core": "^10.7.0", + "axios": "^1.6.2", + "chart.js": "^4.4.1", + "driver.js": "^1.4.0", + "file-saver": "^2.0.5", + "pinia": "^2.1.7", + "vue": "^3.4.0", + "vue-chartjs": "^5.3.0", + "vue-i18n": "^9.14.5", + "vue-router": "^4.2.5", + "xlsx": "^0.18.5" + }, + "devDependencies": { + "@types/file-saver": "^2.0.7", + "@types/mdx": "^2.0.13", + "@types/node": "^20.10.5", + "@vitejs/plugin-vue": "^5.2.3", + "autoprefixer": "^10.4.16", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", + "eslint": "^8.57.0", + "eslint-plugin-vue": "^9.25.0", + "postcss": "^8.4.32", + "tailwindcss": "^3.4.0", + "typescript": "~5.6.0", + "vite": "^5.0.10", + "vite-plugin-checker": "^0.9.1", + "vue-tsc": "^2.2.0" + } +} diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml new file mode 100644 index 00000000..c295165d --- /dev/null +++ b/frontend/pnpm-lock.yaml @@ -0,0 +1,8419 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@lobehub/icons': + specifier: ^4.0.2 + version: 4.0.2(@lobehub/ui@4.9.2)(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@vueuse/core': + specifier: ^10.7.0 + version: 10.11.1(vue@3.5.26(typescript@5.6.3)) + axios: + specifier: ^1.6.2 + version: 1.13.2 + chart.js: + specifier: ^4.4.1 + version: 4.5.1 + driver.js: + specifier: ^1.4.0 + version: 1.4.0 + file-saver: + specifier: ^2.0.5 + version: 2.0.5 + pinia: + specifier: ^2.1.7 + version: 2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)) + vue: + specifier: ^3.4.0 + version: 3.5.26(typescript@5.6.3) + vue-chartjs: + specifier: ^5.3.0 + version: 5.3.3(chart.js@4.5.1)(vue@3.5.26(typescript@5.6.3)) + vue-i18n: + specifier: ^9.14.5 + version: 9.14.5(vue@3.5.26(typescript@5.6.3)) + vue-router: + specifier: ^4.2.5 + version: 4.6.4(vue@3.5.26(typescript@5.6.3)) + xlsx: + specifier: ^0.18.5 + version: 0.18.5 + devDependencies: + '@types/file-saver': + specifier: ^2.0.7 + version: 2.0.7 + '@types/mdx': + specifier: ^2.0.13 + version: 2.0.13 + '@types/node': + specifier: ^20.10.5 + version: 20.19.27 + '@typescript-eslint/eslint-plugin': + specifier: ^7.18.0 + version: 7.18.0(@typescript-eslint/parser@7.18.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/parser': + specifier: ^7.18.0 + version: 7.18.0(eslint@8.57.1)(typescript@5.6.3) + '@vitejs/plugin-vue': + specifier: ^5.2.3 + version: 5.2.4(vite@5.4.21(@types/node@20.19.27))(vue@3.5.26(typescript@5.6.3)) + autoprefixer: + specifier: ^10.4.16 + version: 10.4.23(postcss@8.5.6) + eslint: + specifier: ^8.57.0 + version: 8.57.1 + eslint-plugin-vue: + specifier: ^9.25.0 + version: 9.33.0(eslint@8.57.1) + postcss: + specifier: ^8.4.32 + version: 8.5.6 + tailwindcss: + specifier: ^3.4.0 + version: 3.4.19 + typescript: + specifier: ~5.6.0 + version: 5.6.3 + vite: + specifier: ^5.0.10 + version: 5.4.21(@types/node@20.19.27) + vite-plugin-checker: + specifier: ^0.9.1 + version: 0.9.3(eslint@8.57.1)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.21(@types/node@20.19.27))(vue-tsc@2.2.12(typescript@5.6.3)) + vue-tsc: + specifier: ^2.2.0 + version: 2.2.12(typescript@5.6.3) + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ant-design/colors@8.0.0': + resolution: {integrity: sha512-6YzkKCw30EI/E9kHOIXsQDHmMvTllT8STzjMb4K2qzit33RW2pqCJP0sk+hidBntXxE+Vz4n1+RvCTfBw6OErw==} + + '@ant-design/cssinjs-utils@2.0.2': + resolution: {integrity: sha512-Mq3Hm6fJuQeFNKSp3+yT4bjuhVbdrsyXE2RyfpJFL0xiYNZdaJ6oFaE3zFrzmHbmvTd2Wp3HCbRtkD4fU+v2ZA==} + peerDependencies: + react: '>=18' + react-dom: '>=18' + + '@ant-design/cssinjs@2.0.1': + resolution: {integrity: sha512-Lw1Z4cUQxdMmTNir67gU0HCpTl5TtkKCJPZ6UBvCqzcOTl/QmMFB6qAEoj8qFl0CuZDX9qQYa3m9+rEKfaBSbA==} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + '@ant-design/fast-color@3.0.0': + resolution: {integrity: sha512-eqvpP7xEDm2S7dUzl5srEQCBTXZMmY3ekf97zI+M2DHOYyKdJGH0qua0JACHTqbkRnD/KHFQP9J1uMJ/XWVzzA==} + engines: {node: '>=8.x'} + + '@ant-design/icons-svg@4.4.2': + resolution: {integrity: sha512-vHbT+zJEVzllwP+CM+ul7reTEfBR0vgxFe7+lREAsAA7YGsYpboiq2sQNeQeRvh09GfQgs/GyFEvZpJ9cLXpXA==} + + '@ant-design/icons@6.1.0': + resolution: {integrity: sha512-KrWMu1fIg3w/1F2zfn+JlfNDU8dDqILfA5Tg85iqs1lf8ooyGlbkA+TkwfOKKgqpUmAiRY1PTFpuOU2DAIgSUg==} + engines: {node: '>=8'} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + '@ant-design/react-slick@2.0.0': + resolution: {integrity: sha512-HMS9sRoEmZey8LsE/Yo6+klhlzU12PisjrVcydW3So7RdklyEd2qehyU6a7Yp+OYN72mgsYs3NFCyP2lCPFVqg==} + peerDependencies: + react: ^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^0.14.0 || ^15.0.1 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + '@antfu/install-pkg@1.1.0': + resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + + '@babel/code-frame@7.27.1': + resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.28.5': + resolution: {integrity: sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-globals@7.28.0': + resolution: {integrity: sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.27.1': + resolution: {integrity: sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.27.1': + resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.28.5': + resolution: {integrity: sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/runtime@7.28.4': + resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==} + engines: {node: '>=6.9.0'} + + '@babel/template@7.27.2': + resolution: {integrity: sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.28.5': + resolution: {integrity: sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.28.5': + resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==} + engines: {node: '>=6.9.0'} + + '@base-ui/react@1.0.0': + resolution: {integrity: sha512-4USBWz++DUSLTuIYpbYkSgy1F9ZmNG9S/lXvlUN6qMK0P0RlW+6eQmDUB4DgZ7HVvtXl4pvi4z5J2fv6Z3+9hg==} + engines: {node: '>=14.0.0'} + peerDependencies: + '@types/react': ^17 || ^18 || ^19 + react: ^17 || ^18 || ^19 + react-dom: ^17 || ^18 || ^19 + peerDependenciesMeta: + '@types/react': + optional: true + + '@base-ui/utils@0.2.3': + resolution: {integrity: sha512-/CguQ2PDaOzeVOkllQR8nocJ0FFIDqsWIcURsVmm53QGo8NhFNpePjNlyPIB41luxfOqnG7PU0xicMEw3ls7XQ==} + peerDependencies: + '@types/react': ^17 || ^18 || ^19 + react: ^17 || ^18 || ^19 + react-dom: ^17 || ^18 || ^19 + peerDependenciesMeta: + '@types/react': + optional: true + + '@braintree/sanitize-url@7.1.1': + resolution: {integrity: sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==} + + '@chevrotain/cst-dts-gen@11.0.3': + resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + + '@chevrotain/gast@11.0.3': + resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} + + '@chevrotain/regexp-to-ast@11.0.3': + resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + + '@chevrotain/types@11.0.3': + resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + + '@chevrotain/utils@11.0.3': + resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} + + '@dnd-kit/accessibility@3.1.1': + resolution: {integrity: sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==} + peerDependencies: + react: '>=16.8.0' + + '@dnd-kit/core@6.3.1': + resolution: {integrity: sha512-xkGBRQQab4RLwgXxoqETICr6S5JlogafbhNsidmrkVv2YRs5MLwpjoF2qpiGjQt8S9AoxtIV603s0GIUpY5eYQ==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@dnd-kit/modifiers@9.0.0': + resolution: {integrity: sha512-ybiLc66qRGuZoC20wdSSG6pDXFikui/dCNGthxv4Ndy8ylErY0N3KVxY2bgo7AWwIbxDmXDg3ylAFmnrjcbVvw==} + peerDependencies: + '@dnd-kit/core': ^6.3.0 + react: '>=16.8.0' + + '@dnd-kit/sortable@10.0.0': + resolution: {integrity: sha512-+xqhmIIzvAYMGfBYYnbKuNicfSsk4RksY2XdmJhT+HAC01nix6fHCztU68jooFiMUB01Ky3F0FyOvhG/BZrWkg==} + peerDependencies: + '@dnd-kit/core': ^6.3.0 + react: '>=16.8.0' + + '@dnd-kit/utilities@3.2.2': + resolution: {integrity: sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==} + peerDependencies: + react: '>=16.8.0' + + '@emoji-mart/data@1.2.1': + resolution: {integrity: sha512-no2pQMWiBy6gpBEiqGeU77/bFejDqUTRY7KX+0+iur13op3bqUsXdnwoZs6Xb1zbv0gAj5VvS1PWoUUckSr5Dw==} + + '@emoji-mart/react@1.1.1': + resolution: {integrity: sha512-NMlFNeWgv1//uPsvLxvGQoIerPuVdXwK/EUek8OOkJ6wVOWPUizRBJU0hDqWZCOROVpfBgCemaC3m6jDOXi03g==} + peerDependencies: + emoji-mart: ^5.2 + react: ^16.8 || ^17 || ^18 + + '@emotion/babel-plugin@11.13.5': + resolution: {integrity: sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==} + + '@emotion/cache@11.14.0': + resolution: {integrity: sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==} + + '@emotion/css@11.13.5': + resolution: {integrity: sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==} + + '@emotion/hash@0.8.0': + resolution: {integrity: sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==} + + '@emotion/hash@0.9.2': + resolution: {integrity: sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==} + + '@emotion/is-prop-valid@1.4.0': + resolution: {integrity: sha512-QgD4fyscGcbbKwJmqNvUMSE02OsHUa+lAWKdEUIJKgqe5IwRSKd7+KhibEWdaKwgjLj0DRSHA9biAIqGBk05lw==} + + '@emotion/memoize@0.9.0': + resolution: {integrity: sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==} + + '@emotion/react@11.14.0': + resolution: {integrity: sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==} + peerDependencies: + '@types/react': '*' + react: '>=16.8.0' + peerDependenciesMeta: + '@types/react': + optional: true + + '@emotion/serialize@1.3.3': + resolution: {integrity: sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==} + + '@emotion/sheet@1.4.0': + resolution: {integrity: sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==} + + '@emotion/unitless@0.10.0': + resolution: {integrity: sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==} + + '@emotion/unitless@0.7.5': + resolution: {integrity: sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==} + + '@emotion/use-insertion-effect-with-fallbacks@1.2.0': + resolution: {integrity: sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==} + peerDependencies: + react: '>=16.8.0' + + '@emotion/utils@1.4.2': + resolution: {integrity: sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==} + + '@emotion/weak-memoize@0.4.0': + resolution: {integrity: sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==} + + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.9.1': + resolution: {integrity: sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.12.2': + resolution: {integrity: sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/eslintrc@2.1.4': + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@eslint/js@8.57.1': + resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@floating-ui/core@1.7.3': + resolution: {integrity: sha512-sGnvb5dmrJaKEZ+LDIpguvdX3bDlEllmv4/ClQ9awcmCZrlx5jQyyMWFM5kBI+EyNOCDDiKk8il0zeuX3Zlg/w==} + + '@floating-ui/dom@1.7.4': + resolution: {integrity: sha512-OOchDgh4F2CchOX94cRVqhvy7b3AFb+/rQXyswmzmGakRfkMgoWVjfnLWkRirfLEfuD4ysVW16eXzwt3jHIzKA==} + + '@floating-ui/react-dom@2.1.6': + resolution: {integrity: sha512-4JX6rEatQEvlmgU80wZyq9RT96HZJa88q8hp0pBd+LrczeDI4o6uA2M+uvxngVHo4Ihr8uibXxH6+70zhAFrVw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + '@floating-ui/react@0.27.16': + resolution: {integrity: sha512-9O8N4SeG2z++TSM8QA/KTeKFBVCNEz/AGS7gWPJf6KFRzmRWixFRnCnkPHRDwSVZW6QPDO6uT0P2SpWNKCc9/g==} + peerDependencies: + react: '>=17.0.0' + react-dom: '>=17.0.0' + + '@floating-ui/utils@0.2.10': + resolution: {integrity: sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ==} + + '@giscus/react@3.1.0': + resolution: {integrity: sha512-0TCO2TvL43+oOdyVVGHDItwxD1UMKP2ZYpT6gXmhFOqfAJtZxTzJ9hkn34iAF/b6YzyJ4Um89QIt9z/ajmAEeg==} + peerDependencies: + react: ^16 || ^17 || ^18 || ^19 + react-dom: ^16 || ^17 || ^18 || ^19 + + '@humanwhocodes/config-array@0.13.0': + resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==} + engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@2.0.3': + resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead + + '@iconify/types@2.0.0': + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + + '@iconify/utils@3.1.0': + resolution: {integrity: sha512-Zlzem1ZXhI1iHeeERabLNzBHdOa4VhQbqAcOQaMKuTuyZCpwKbC2R4Dd0Zo3g9EAc+Y4fiarO8HIHRAth7+skw==} + + '@intlify/core-base@9.14.5': + resolution: {integrity: sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==} + engines: {node: '>= 16'} + + '@intlify/message-compiler@9.14.5': + resolution: {integrity: sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==} + engines: {node: '>= 16'} + + '@intlify/shared@9.14.5': + resolution: {integrity: sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==} + engines: {node: '>= 16'} + + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@kurkle/color@0.3.4': + resolution: {integrity: sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==} + + '@lit-labs/ssr-dom-shim@1.5.0': + resolution: {integrity: sha512-HLomZXMmrCFHSRKESF5vklAKsDY7/fsT/ZhqCu3V0UoW/Qbv8wxmO4W9bx4KnCCF2Zak4yuk+AGraK/bPmI4kA==} + + '@lit/reactive-element@2.1.2': + resolution: {integrity: sha512-pbCDiVMnne1lYUIaYNN5wrwQXDtHaYtg7YEFPeW+hws6U47WeFvISGUWekPGKWOP1ygrs0ef0o1VJMk1exos5A==} + + '@lobehub/emojilib@1.0.0': + resolution: {integrity: sha512-s9KnjaPjsEefaNv150G3aifvB+J3P4eEKG+epY9zDPS2BeB6+V2jELWqAZll+nkogMaVovjEE813z3V751QwGw==} + + '@lobehub/fluent-emoji@4.1.0': + resolution: {integrity: sha512-R1MB2lfUkDvB7XAQdRzY75c1dx/tB7gEvBPaEEMarzKfCJWmXm7rheS6caVzmgwAlq5sfmTbxPL+un99sp//Yw==} + peerDependencies: + react: ^19.0.0 + react-dom: ^19.0.0 + + '@lobehub/icons@4.0.2': + resolution: {integrity: sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==} + peerDependencies: + '@lobehub/ui': ^4.3.3 + antd: ^6.1.1 + react: ^19.0.0 + react-dom: ^19.0.0 + + '@lobehub/ui@4.9.2': + resolution: {integrity: sha512-PT9PWXgT/PoIAyAPOaxF25enofBeeWL3zPD6CqlO3lSw1A1ENHC3+lG4lZsSquD+zBf3ATKLOqp5FuyoVWPUpA==} + peerDependencies: + '@lobehub/fluent-emoji': ^4.0.0 + '@lobehub/icons': ^4.0.0 + antd: ^6.1.1 + motion: ^12.0.0 + react: ^19.0.0 + react-dom: ^19.0.0 + + '@mdx-js/mdx@3.1.1': + resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==} + + '@mdx-js/react@3.1.1': + resolution: {integrity: sha512-f++rKLQgUVYDAtECQ6fn/is15GkEH9+nZPM3MS0RcxVqoTfawHvDlSCH7JbMhAM6uJ32v3eXLvLmLvjGu7PTQw==} + peerDependencies: + '@types/react': '>=16' + react: '>=16' + + '@mermaid-js/parser@0.6.3': + resolution: {integrity: sha512-lnjOhe7zyHjc+If7yT4zoedx2vo4sHaTmtkl1+or8BRTnCtDmcTpAjpzDSfCZrshM5bCoz0GyidzadJAH1xobA==} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@primer/octicons@19.21.1': + resolution: {integrity: sha512-7tgtBkCNcg75YJnckinzvES+uxysYQCe+CHSEnzr3VYgxttzKRvfmrnVogl3aEuHCQP4xhiE9k2lFDhYwGtTzQ==} + + '@radix-ui/primitive@1.1.3': + resolution: {integrity: sha512-JTF99U/6XIjCBo0wqkU5sK10glYe27MRRsfwoiq5zzOEZLHU3A3KCMa5X/azekYRCJ0HlwI0crAXS/5dEHTzDg==} + + '@radix-ui/react-arrow@1.1.7': + resolution: {integrity: sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-compose-refs@1.1.2': + resolution: {integrity: sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-context@1.1.2': + resolution: {integrity: sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-dismissable-layer@1.1.11': + resolution: {integrity: sha512-Nqcp+t5cTB8BinFkZgXiMJniQH0PsUt2k51FUhbdfeKvc4ACcG2uQniY/8+h1Yv6Kza4Q7lD7PQV0z0oicE0Mg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-id@1.1.1': + resolution: {integrity: sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-popper@1.2.8': + resolution: {integrity: sha512-0NJQ4LFFUuWkE7Oxf0htBKS6zLkkjBH+hM1uk7Ng705ReR8m/uelduy1DBo0PyBXPKVnBA6YBlU94MBGXrSBCw==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-portal@1.1.10': + resolution: {integrity: sha512-4kY9IVa6+9nJPsYmngK5Uk2kUmZnv7ChhHAFeQ5oaj8jrR1bIi3xww8nH71pz1/Ve4d/cXO3YxT8eikt1B0a8w==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-portal@1.1.9': + resolution: {integrity: sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-presence@1.1.5': + resolution: {integrity: sha512-/jfEwNDdQVBCNvjkGit4h6pMOzq8bHkopq458dPt2lMjx+eBQUohZNG9A7DtO/O5ukSbxuaNGXMjHicgwy6rQQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-primitive@2.1.3': + resolution: {integrity: sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-primitive@2.1.4': + resolution: {integrity: sha512-9hQc4+GNVtJAIEPEqlYqW5RiYdrr8ea5XQ0ZOnD6fgru+83kqT15mq2OCcbe8KnjRZl5vF3ks69AKz3kh1jrhg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-slot@1.2.3': + resolution: {integrity: sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-slot@1.2.4': + resolution: {integrity: sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-tooltip@1.2.8': + resolution: {integrity: sha512-tY7sVt1yL9ozIxvmbtN5qtmH2krXcBCfjEiCgKGLqunJHvgvZG2Pcl2oQ3kbcZARb1BGEHdkLzcYGO8ynVlieg==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/react-use-callback-ref@1.1.1': + resolution: {integrity: sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-controllable-state@1.2.2': + resolution: {integrity: sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-effect-event@0.0.2': + resolution: {integrity: sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-escape-keydown@1.1.1': + resolution: {integrity: sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-layout-effect@1.1.1': + resolution: {integrity: sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-rect@1.1.1': + resolution: {integrity: sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-use-size@1.1.1': + resolution: {integrity: sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ==} + peerDependencies: + '@types/react': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + + '@radix-ui/react-visually-hidden@1.2.3': + resolution: {integrity: sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug==} + peerDependencies: + '@types/react': '*' + '@types/react-dom': '*' + react: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + react-dom: ^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc + peerDependenciesMeta: + '@types/react': + optional: true + '@types/react-dom': + optional: true + + '@radix-ui/rect@1.1.1': + resolution: {integrity: sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw==} + + '@rc-component/async-validator@5.0.4': + resolution: {integrity: sha512-qgGdcVIF604M9EqjNF0hbUTz42bz/RDtxWdWuU5EQe3hi7M8ob54B6B35rOsvX5eSvIHIzT9iH1R3n+hk3CGfg==} + engines: {node: '>=14.x'} + + '@rc-component/cascader@1.10.0': + resolution: {integrity: sha512-D1XOKvbhdo9kX+cG1p8qJOnSq+sMK3L84iVYjGQIx950kJt0ixN+Xac75ykyK/AC8V3GUanjNK14Qkv149RrEw==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/checkbox@1.0.1': + resolution: {integrity: sha512-08yTH8m+bSm8TOqbybbJ9KiAuIATti6bDs2mVeSfu4QfEnyeF6X0enHVvD1NEAyuBWEAo56QtLe++MYs2D9XiQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/collapse@1.1.2': + resolution: {integrity: sha512-ilBYk1dLLJHu5Q74dF28vwtKUYQ42ZXIIDmqTuVy4rD8JQVvkXOs+KixVNbweyuIEtJYJ7+t+9GVD9dPc6N02w==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/color-picker@3.0.3': + resolution: {integrity: sha512-V7gFF9O7o5XwIWafdbOtqI4BUUkEUkgdBwp6favy3xajMX/2dDqytFaiXlcwrpq6aRyPLp5dKLAG5RFKLXMeGA==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/context@2.0.1': + resolution: {integrity: sha512-HyZbYm47s/YqtP6pKXNMjPEMaukyg7P0qVfgMLzr7YiFNMHbK2fKTAGzms9ykfGHSfyf75nBbgWw+hHkp+VImw==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/dialog@1.5.1': + resolution: {integrity: sha512-by4Sf/a3azcb89WayWuwG19/Y312xtu8N81HoVQQtnsBDylfs+dog98fTAvLinnpeoWG52m/M7QLRW6fXR3l1g==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/drawer@1.3.0': + resolution: {integrity: sha512-rE+sdXEmv2W25VBQ9daGbnb4J4hBIEKmdbj0b3xpY+K7TUmLXDIlSnoXraIbFZdGyek9WxxGKK887uRnFgI+pQ==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/dropdown@1.0.2': + resolution: {integrity: sha512-6PY2ecUSYhDPhkNHHb4wfeAya04WhpmUSKzdR60G+kMNVUCX2vjT/AgTS0Lz0I/K6xrPMJ3enQbwVpeN3sHCgg==} + peerDependencies: + react: '>=16.11.0' + react-dom: '>=16.11.0' + + '@rc-component/form@1.6.0': + resolution: {integrity: sha512-A7vrN8kExtw4sW06mrsgCb1rowhvBFFvQU6Bk/NL0Fj6Wet/5GF0QnGCxBu/sG3JI9FEhsJWES0D44BW2d0hzg==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/image@1.5.3': + resolution: {integrity: sha512-/NR7QW9uCN8Ugar+xsHZOPvzPySfEhcW2/vLcr7VPRM+THZMrllMRv7LAUgW7ikR+Z67Ab67cgPp5K5YftpJsQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/input-number@1.6.2': + resolution: {integrity: sha512-Gjcq7meZlCOiWN1t1xCC+7/s85humHVokTBI7PJgTfoyw5OWF74y3e6P8PHX104g9+b54jsodFIzyaj6p8LI9w==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/input@1.1.2': + resolution: {integrity: sha512-Q61IMR47piUBudgixJ30CciKIy9b1H95qe7GgEKOmSJVJXvFRWJllJfQry9tif+MX2cWFXWJf/RXz4kaCeq/Fg==} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + '@rc-component/mentions@1.6.0': + resolution: {integrity: sha512-KIkQNP6habNuTsLhUv0UGEOwG67tlmE7KNIJoQZZNggEZl5lQJTytFDb69sl5CK3TDdISCTjKP3nGEBKgT61CQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/menu@1.2.0': + resolution: {integrity: sha512-VWwDuhvYHSnTGj4n6bV3ISrLACcPAzdPOq3d0BzkeiM5cve8BEYfvkEhNoM0PLzv51jpcejeyrLXeMVIJ+QJlg==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/mini-decimal@1.1.0': + resolution: {integrity: sha512-jS4E7T9Li2GuYwI6PyiVXmxTiM6b07rlD9Ge8uGZSCz3WlzcG5ZK7g5bbuKNeZ9pgUuPK/5guV781ujdVpm4HQ==} + engines: {node: '>=8.x'} + + '@rc-component/motion@1.1.6': + resolution: {integrity: sha512-aEQobs/YA0kqRvHIPjQvOytdtdRVyhf/uXAal4chBjxDu6odHckExJzjn2D+Ju1aKK6hx3pAs6BXdV9+86xkgQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/mutate-observer@2.0.1': + resolution: {integrity: sha512-AyarjoLU5YlxuValRi+w8JRH2Z84TBbFO2RoGWz9d8bSu0FqT8DtugH3xC3BV7mUwlmROFauyWuXFuq4IFbH+w==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/notification@1.2.0': + resolution: {integrity: sha512-OX3J+zVU7rvoJCikjrfW7qOUp7zlDeFBK2eA3SFbGSkDqo63Sl4Ss8A04kFP+fxHSxMDIS9jYVEZtU1FNCFuBA==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/overflow@1.0.0': + resolution: {integrity: sha512-GSlBeoE0XTBi5cf3zl8Qh7Uqhn7v8RrlJ8ajeVpEkNe94HWy5l5BQ0Mwn2TVUq9gdgbfEMUmTX7tJFAg7mz0Rw==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/pagination@1.2.0': + resolution: {integrity: sha512-YcpUFE8dMLfSo6OARJlK6DbHHvrxz7pMGPGmC/caZSJJz6HRKHC1RPP001PRHCvG9Z/veD039uOQmazVuLJzlw==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/picker@1.9.0': + resolution: {integrity: sha512-OLisdk8AWVCG9goBU1dWzuH5QlBQk8jktmQ6p0/IyBFwdKGwyIZOSjnBYo8hooHiTdl0lU+wGf/OfMtVBw02KQ==} + engines: {node: '>=12.x'} + peerDependencies: + date-fns: '>= 2.x' + dayjs: '>= 1.x' + luxon: '>= 3.x' + moment: '>= 2.x' + react: '>=16.9.0' + react-dom: '>=16.9.0' + peerDependenciesMeta: + date-fns: + optional: true + dayjs: + optional: true + luxon: + optional: true + moment: + optional: true + + '@rc-component/portal@1.1.2': + resolution: {integrity: sha512-6f813C0IsasTZms08kfA8kPAGxbbkYToa8ALaiDIGGECU4i9hj8Plgbx0sNJDrey3EtHO30hmdaxtT0138xZcg==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/portal@2.2.0': + resolution: {integrity: sha512-oc6FlA+uXCMiwArHsJyHcIkX4q6uKyndrPol2eWX8YPkAnztHOPsFIRtmWG4BMlGE5h7YIRE3NiaJ5VS8Lb1QQ==} + engines: {node: '>=12.x'} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/progress@1.0.2': + resolution: {integrity: sha512-WZUnH9eGxH1+xodZKqdrHke59uyGZSWgj5HBM5Kwk5BrTMuAORO7VJ2IP5Qbm9aH3n9x3IcesqHHR0NWPBC7fQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/qrcode@1.1.1': + resolution: {integrity: sha512-LfLGNymzKdUPjXUbRP+xOhIWY4jQ+YMj5MmWAcgcAq1Ij8XP7tRmAXqyuv96XvLUBE/5cA8hLFl9eO1JQMujrA==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/rate@1.0.1': + resolution: {integrity: sha512-bkXxeBqDpl5IOC7yL7GcSYjQx9G8H+6kLYQnNZWeBYq2OYIv1MONd6mqKTjnnJYpV0cQIU2z3atdW0j1kttpTw==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/resize-observer@1.0.1': + resolution: {integrity: sha512-r+w+Mz1EiueGk1IgjB3ptNXLYSLZ5vnEfKHH+gfgj7JMupftyzvUUl3fRcMZe5uMM04x0n8+G2o/c6nlO2+Wag==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/segmented@1.3.0': + resolution: {integrity: sha512-5J/bJ01mbDnoA6P/FW8SxUvKn+OgUSTZJPzCNnTBntG50tzoP7DydGhqxp7ggZXZls7me3mc2EQDXakU3iTVFg==} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + '@rc-component/select@1.4.0': + resolution: {integrity: sha512-DDCsUkx3lHAO42fyPiBADzZgbqOp3gepjBCusuy6DDN51Vx73cwX0aqsid1asxpIwHPMYGgYg+wXbLi4YctzLQ==} + engines: {node: '>=8.x'} + peerDependencies: + react: '*' + react-dom: '*' + + '@rc-component/slider@1.0.1': + resolution: {integrity: sha512-uDhEPU1z3WDfCJhaL9jfd2ha/Eqpdfxsn0Zb0Xcq1NGQAman0TWaR37OWp2vVXEOdV2y0njSILTMpTfPV1454g==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/steps@1.2.2': + resolution: {integrity: sha512-/yVIZ00gDYYPHSY0JP+M+s3ZvuXLu2f9rEjQqiUDs7EcYsUYrpJ/1bLj9aI9R7MBR3fu/NGh6RM9u2qGfqp+Nw==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/switch@1.0.3': + resolution: {integrity: sha512-Jgi+EbOBquje/XNdofr7xbJQZPYJP+BlPfR0h+WN4zFkdtB2EWqEfvkXJWeipflwjWip0/17rNbxEAqs8hVHfw==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/table@1.9.1': + resolution: {integrity: sha512-FVI5ZS/GdB3BcgexfCYKi3iHhZS3Fr59EtsxORszYGrfpH1eWr33eDNSYkVfLI6tfJ7vftJDd9D5apfFWqkdJg==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/tabs@1.7.0': + resolution: {integrity: sha512-J48cs2iBi7Ho3nptBxxIqizEliUC+ExE23faspUQKGQ550vaBlv3aGF8Epv/UB1vFWeoJDTW/dNzgIU0Qj5i/w==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/textarea@1.1.2': + resolution: {integrity: sha512-9rMUEODWZDMovfScIEHXWlVZuPljZ2pd1LKNjslJVitn4SldEzq5vO1CL3yy3Dnib6zZal2r2DPtjy84VVpF6A==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/tooltip@1.4.0': + resolution: {integrity: sha512-8Rx5DCctIlLI4raR0I0xHjVTf1aF48+gKCNeAAo5bmF5VoR5YED+A/XEqzXv9KKqrJDRcd3Wndpxh2hyzrTtSg==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/tour@2.2.1': + resolution: {integrity: sha512-BUCrVikGJsXli38qlJ+h2WyDD6dYxzDA9dV3o0ij6gYhAq6ooT08SUMWOikva9v4KZ2BEuluGl5bPcsjrSoBgQ==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/tree-select@1.5.0': + resolution: {integrity: sha512-1nBAMreFJXkCIeZlWG0l+6i0jLWzlmmRv/TrtZjLkoq8WmpzSuDhP32YroC7rAhGFR34thpHkvCedPzBXIL/XQ==} + peerDependencies: + react: '*' + react-dom: '*' + + '@rc-component/tree@1.1.0': + resolution: {integrity: sha512-HZs3aOlvFgQdgrmURRc/f4IujiNBf4DdEeXUlkS0lPoLlx9RoqsZcF0caXIAMVb+NaWqKtGQDnrH8hqLCN5zlA==} + engines: {node: '>=10.x'} + peerDependencies: + react: '*' + react-dom: '*' + + '@rc-component/trigger@2.3.0': + resolution: {integrity: sha512-iwaxZyzOuK0D7lS+0AQEtW52zUWxoGqTGkke3dRyb8pYiShmRpCjB/8TzPI4R6YySCH7Vm9BZj/31VPiiQTLBg==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/trigger@3.8.1': + resolution: {integrity: sha512-walnDJnKq+OcPQFHBMN+YZmdHV8+6z75+Rgpc0dW1c+Dmy6O7tRueDs4LdbwjlryQfTdsw84PIkNPzcx5yQ7qQ==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/upload@1.1.0': + resolution: {integrity: sha512-LIBV90mAnUE6VK5N4QvForoxZc4XqEYZimcp7fk+lkE4XwHHyJWxpIXQQwMU8hJM+YwBbsoZkGksL1sISWHQxw==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rc-component/util@1.7.0': + resolution: {integrity: sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + '@rc-component/virtual-list@1.0.2': + resolution: {integrity: sha512-uvTol/mH74FYsn5loDGJxo+7kjkO4i+y4j87Re1pxJBs0FaeuMuLRzQRGaXwnMcV1CxpZLi2Z56Rerj2M00fjQ==} + engines: {node: '>=8.x'} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@rollup/rollup-android-arm-eabi@4.54.0': + resolution: {integrity: sha512-OywsdRHrFvCdvsewAInDKCNyR3laPA2mc9bRYJ6LBp5IyvF3fvXbbNR0bSzHlZVFtn6E0xw2oZlyjg4rKCVcng==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.54.0': + resolution: {integrity: sha512-Skx39Uv+u7H224Af+bDgNinitlmHyQX1K/atIA32JP3JQw6hVODX5tkbi2zof/E69M1qH2UoN3Xdxgs90mmNYw==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.54.0': + resolution: {integrity: sha512-k43D4qta/+6Fq+nCDhhv9yP2HdeKeP56QrUUTW7E6PhZP1US6NDqpJj4MY0jBHlJivVJD5P8NxrjuobZBJTCRw==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.54.0': + resolution: {integrity: sha512-cOo7biqwkpawslEfox5Vs8/qj83M/aZCSSNIWpVzfU2CYHa2G3P1UN5WF01RdTHSgCkri7XOlTdtk17BezlV3A==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.54.0': + resolution: {integrity: sha512-miSvuFkmvFbgJ1BevMa4CPCFt5MPGw094knM64W9I0giUIMMmRYcGW/JWZDriaw/k1kOBtsWh1z6nIFV1vPNtA==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.54.0': + resolution: {integrity: sha512-KGXIs55+b/ZfZsq9aR026tmr/+7tq6VG6MsnrvF4H8VhwflTIuYh+LFUlIsRdQSgrgmtM3fVATzEAj4hBQlaqQ==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.54.0': + resolution: {integrity: sha512-EHMUcDwhtdRGlXZsGSIuXSYwD5kOT9NVnx9sqzYiwAc91wfYOE1g1djOEDseZJKKqtHAHGwnGPQu3kytmfaXLQ==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.54.0': + resolution: {integrity: sha512-+pBrqEjaakN2ySv5RVrj/qLytYhPKEUwk+e3SFU5jTLHIcAtqh2rLrd/OkbNuHJpsBgxsD8ccJt5ga/SeG0JmA==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.54.0': + resolution: {integrity: sha512-NSqc7rE9wuUaRBsBp5ckQ5CVz5aIRKCwsoa6WMF7G01sX3/qHUw/z4pv+D+ahL1EIKy6Enpcnz1RY8pf7bjwng==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.54.0': + resolution: {integrity: sha512-gr5vDbg3Bakga5kbdpqx81m2n9IX8M6gIMlQQIXiLTNeQW6CucvuInJ91EuCJ/JYvc+rcLLsDFcfAD1K7fMofg==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loong64-gnu@4.54.0': + resolution: {integrity: sha512-gsrtB1NA3ZYj2vq0Rzkylo9ylCtW/PhpLEivlgWe0bpgtX5+9j9EZa0wtZiCjgu6zmSeZWyI/e2YRX1URozpIw==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-ppc64-gnu@4.54.0': + resolution: {integrity: sha512-y3qNOfTBStmFNq+t4s7Tmc9hW2ENtPg8FeUD/VShI7rKxNW7O4fFeaYbMsd3tpFlIg1Q8IapFgy7Q9i2BqeBvA==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.54.0': + resolution: {integrity: sha512-89sepv7h2lIVPsFma8iwmccN7Yjjtgz0Rj/Ou6fEqg3HDhpCa+Et+YSufy27i6b0Wav69Qv4WBNl3Rs6pwhebQ==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.54.0': + resolution: {integrity: sha512-ZcU77ieh0M2Q8Ur7D5X7KvK+UxbXeDHwiOt/CPSBTI1fBmeDMivW0dPkdqkT4rOgDjrDDBUed9x4EgraIKoR2A==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.54.0': + resolution: {integrity: sha512-2AdWy5RdDF5+4YfG/YesGDDtbyJlC9LHmL6rZw6FurBJ5n4vFGupsOBGfwMRjBYH7qRQowT8D/U4LoSvVwOhSQ==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.54.0': + resolution: {integrity: sha512-WGt5J8Ij/rvyqpFexxk3ffKqqbLf9AqrTBbWDk7ApGUzaIs6V+s2s84kAxklFwmMF/vBNGrVdYgbblCOFFezMQ==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.54.0': + resolution: {integrity: sha512-JzQmb38ATzHjxlPHuTH6tE7ojnMKM2kYNzt44LO/jJi8BpceEC8QuXYA908n8r3CNuG/B3BV8VR3Hi1rYtmPiw==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-openharmony-arm64@4.54.0': + resolution: {integrity: sha512-huT3fd0iC7jigGh7n3q/+lfPcXxBi+om/Rs3yiFxjvSxbSB6aohDFXbWvlspaqjeOh+hx7DDHS+5Es5qRkWkZg==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.54.0': + resolution: {integrity: sha512-c2V0W1bsKIKfbLMBu/WGBz6Yci8nJ/ZJdheE0EwB73N3MvHYKiKGs3mVilX4Gs70eGeDaMqEob25Tw2Gb9Nqyw==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.54.0': + resolution: {integrity: sha512-woEHgqQqDCkAzrDhvDipnSirm5vxUXtSKDYTVpZG3nUdW/VVB5VdCYA2iReSj/u3yCZzXID4kuKG7OynPnB3WQ==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.54.0': + resolution: {integrity: sha512-dzAc53LOuFvHwbCEOS0rPbXp6SIhAf2txMP5p6mGyOXXw5mWY8NGGbPMPrs4P1WItkfApDathBj/NzMLUZ9rtQ==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.54.0': + resolution: {integrity: sha512-hYT5d3YNdSh3mbCU1gwQyPgQd3T2ne0A3KG8KSBdav5TiBg6eInVmV+TeR5uHufiIgSFg0XsOWGW5/RhNcSvPg==} + cpu: [x64] + os: [win32] + + '@shikijs/core@3.20.0': + resolution: {integrity: sha512-f2ED7HYV4JEk827mtMDwe/yQ25pRiXZmtHjWF8uzZKuKiEsJR7Ce1nuQ+HhV9FzDcbIo4ObBCD9GPTzNuy9S1g==} + + '@shikijs/engine-javascript@3.20.0': + resolution: {integrity: sha512-OFx8fHAZuk7I42Z9YAdZ95To6jDePQ9Rnfbw9uSRTSbBhYBp1kEOKv/3jOimcj3VRUKusDYM6DswLauwfhboLg==} + + '@shikijs/engine-oniguruma@3.20.0': + resolution: {integrity: sha512-Yx3gy7xLzM0ZOjqoxciHjA7dAt5tyzJE3L4uQoM83agahy+PlW244XJSrmJRSBvGYELDhYXPacD4R/cauV5bzQ==} + + '@shikijs/langs@3.20.0': + resolution: {integrity: sha512-le+bssCxcSHrygCWuOrYJHvjus6zhQ2K7q/0mgjiffRbkhM4o1EWu2m+29l0yEsHDbWaWPNnDUTRVVBvBBeKaA==} + + '@shikijs/themes@3.20.0': + resolution: {integrity: sha512-U1NSU7Sl26Q7ErRvJUouArxfM2euWqq1xaSrbqMu2iqa+tSp0D1Yah8216sDYbdDHw4C8b75UpE65eWorm2erQ==} + + '@shikijs/transformers@3.20.0': + resolution: {integrity: sha512-PrHHMRr3Q5W1qB/42kJW6laqFyWdhrPF2hNR9qjOm1xcSiAO3hAHo7HaVyHE6pMyevmy3i51O8kuGGXC78uK3g==} + + '@shikijs/types@3.20.0': + resolution: {integrity: sha512-lhYAATn10nkZcBQ0BlzSbJA3wcmL5MXUUF8d2Zzon6saZDlToKaiRX60n2+ZaHJCmXEcZRWNzn+k9vplr8Jhsw==} + + '@shikijs/vscode-textmate@10.0.2': + resolution: {integrity: sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==} + + '@splinetool/runtime@0.9.526': + resolution: {integrity: sha512-qznHbXA5aKwDbCgESAothCNm1IeEZcmNWG145p5aXj4w5uoqR1TZ9qkTHTKLTsUbHeitCwdhzmRqan1kxboLgQ==} + + '@stitches/react@1.2.8': + resolution: {integrity: sha512-9g9dWI4gsSVe8bNLlb+lMkBYsnIKCZTmvqvDG+Avnn69XfmHZKiaMrx7cgTaddq7aTPPmXiTsbFcUy0xgI4+wA==} + peerDependencies: + react: '>= 16.3.0' + + '@types/d3-array@3.2.2': + resolution: {integrity: sha512-hOLWVbm7uRza0BYXpIIW5pxfrKe0W+D5lrFiAEYR+pb6w3N2SwSMaJbXdUfSEv+dT4MfHBLtn5js0LAWaO6otw==} + + '@types/d3-axis@3.0.6': + resolution: {integrity: sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==} + + '@types/d3-brush@3.0.6': + resolution: {integrity: sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==} + + '@types/d3-chord@3.0.6': + resolution: {integrity: sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==} + + '@types/d3-color@3.1.3': + resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} + + '@types/d3-contour@3.0.6': + resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} + + '@types/d3-delaunay@6.0.4': + resolution: {integrity: sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==} + + '@types/d3-dispatch@3.0.7': + resolution: {integrity: sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==} + + '@types/d3-drag@3.0.7': + resolution: {integrity: sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==} + + '@types/d3-dsv@3.0.7': + resolution: {integrity: sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==} + + '@types/d3-ease@3.0.2': + resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} + + '@types/d3-fetch@3.0.7': + resolution: {integrity: sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==} + + '@types/d3-force@3.0.10': + resolution: {integrity: sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==} + + '@types/d3-format@3.0.4': + resolution: {integrity: sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==} + + '@types/d3-geo@3.1.0': + resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} + + '@types/d3-hierarchy@3.1.7': + resolution: {integrity: sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==} + + '@types/d3-interpolate@3.0.4': + resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} + + '@types/d3-path@3.1.1': + resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} + + '@types/d3-polygon@3.0.2': + resolution: {integrity: sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==} + + '@types/d3-quadtree@3.0.6': + resolution: {integrity: sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==} + + '@types/d3-random@3.0.3': + resolution: {integrity: sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==} + + '@types/d3-scale-chromatic@3.1.0': + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + + '@types/d3-scale@4.0.9': + resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} + + '@types/d3-selection@3.0.11': + resolution: {integrity: sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==} + + '@types/d3-shape@3.1.7': + resolution: {integrity: sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==} + + '@types/d3-time-format@4.0.3': + resolution: {integrity: sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==} + + '@types/d3-time@3.0.4': + resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} + + '@types/d3-timer@3.0.2': + resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==} + + '@types/d3-transition@3.0.9': + resolution: {integrity: sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==} + + '@types/d3-zoom@3.0.8': + resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + + '@types/d3@7.4.3': + resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} + + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + + '@types/estree-jsx@1.0.5': + resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/file-saver@2.0.7': + resolution: {integrity: sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==} + + '@types/geojson@7946.0.16': + resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} + + '@types/hast@3.0.4': + resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} + + '@types/js-cookie@3.0.6': + resolution: {integrity: sha512-wkw9yd1kEXOPnvEeEV1Go1MmxtBJL0RR79aOTAApecWFVu7w0NNXNqhcWgvw2YgZDYadliXkl14pa3WXw5jlCQ==} + + '@types/katex@0.16.7': + resolution: {integrity: sha512-HMwFiRujE5PjrgwHQ25+bsLJgowjGjm5Z8FVSf0N6PwgJrwxH0QxzHYDcKsTfV3wva0vzrpqMTJS2jXPr5BMEQ==} + + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} + + '@types/mdx@2.0.13': + resolution: {integrity: sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/node@20.19.27': + resolution: {integrity: sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==} + + '@types/parse-json@4.0.2': + resolution: {integrity: sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==} + + '@types/react@19.2.7': + resolution: {integrity: sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==} + + '@types/trusted-types@2.0.7': + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + + '@types/unist@2.0.11': + resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} + + '@types/unist@3.0.3': + resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + + '@types/web-bluetooth@0.0.20': + resolution: {integrity: sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==} + + '@typescript-eslint/eslint-plugin@7.18.0': + resolution: {integrity: sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + '@typescript-eslint/parser': ^7.0.0 + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@7.18.0': + resolution: {integrity: sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@7.18.0': + resolution: {integrity: sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@typescript-eslint/type-utils@7.18.0': + resolution: {integrity: sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@7.18.0': + resolution: {integrity: sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@typescript-eslint/typescript-estree@7.18.0': + resolution: {integrity: sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@7.18.0': + resolution: {integrity: sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + + '@typescript-eslint/visitor-keys@7.18.0': + resolution: {integrity: sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + '@use-gesture/core@10.3.1': + resolution: {integrity: sha512-WcINiDt8WjqBdUXye25anHiNxPc0VOrlT8F6LLkU6cycrOGUDyY/yyFmsg3k8i5OLvv25llc0QC45GhR/C8llw==} + + '@use-gesture/react@10.3.1': + resolution: {integrity: sha512-Yy19y6O2GJq8f7CHf7L0nxL8bf4PZCPaVOCgJrusOeFHY1LvHgYXnmnXg6N5iwAnbgbZCDjo60SiM6IPJi9C5g==} + peerDependencies: + react: '>= 16.8.0' + + '@vitejs/plugin-vue@5.2.4': + resolution: {integrity: sha512-7Yx/SXSOcQq5HiiV3orevHUFn+pmMB4cgbEkDYgnkUWb0WfeQ/wa2yFv6D5ICiCQOVpjA7vYDXrC7AGO8yjDHA==} + engines: {node: ^18.0.0 || >=20.0.0} + peerDependencies: + vite: ^5.0.0 || ^6.0.0 + vue: ^3.2.25 + + '@volar/language-core@2.4.15': + resolution: {integrity: sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==} + + '@volar/source-map@2.4.15': + resolution: {integrity: sha512-CPbMWlUN6hVZJYGcU/GSoHu4EnCHiLaXI9n8c9la6RaI9W5JHX+NqG+GSQcB0JdC2FIBLdZJwGsfKyBB71VlTg==} + + '@volar/typescript@2.4.15': + resolution: {integrity: sha512-2aZ8i0cqPGjXb4BhkMsPYDkkuc2ZQ6yOpqwAuNwUoncELqoy5fRgOQtLR9gB0g902iS0NAkvpIzs27geVyVdPg==} + + '@vue/compiler-core@3.5.26': + resolution: {integrity: sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==} + + '@vue/compiler-dom@3.5.26': + resolution: {integrity: sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==} + + '@vue/compiler-sfc@3.5.26': + resolution: {integrity: sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==} + + '@vue/compiler-ssr@3.5.26': + resolution: {integrity: sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==} + + '@vue/compiler-vue2@2.7.16': + resolution: {integrity: sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==} + + '@vue/devtools-api@6.6.4': + resolution: {integrity: sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==} + + '@vue/language-core@2.2.12': + resolution: {integrity: sha512-IsGljWbKGU1MZpBPN+BvPAdr55YPkj2nB/TBNGNC32Vy2qLG25DYu/NBN2vNtZqdRbTRjaoYrahLrToim2NanA==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@vue/reactivity@3.5.26': + resolution: {integrity: sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==} + + '@vue/runtime-core@3.5.26': + resolution: {integrity: sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==} + + '@vue/runtime-dom@3.5.26': + resolution: {integrity: sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==} + + '@vue/server-renderer@3.5.26': + resolution: {integrity: sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==} + peerDependencies: + vue: 3.5.26 + + '@vue/shared@3.5.26': + resolution: {integrity: sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==} + + '@vueuse/core@10.11.1': + resolution: {integrity: sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==} + + '@vueuse/metadata@10.11.1': + resolution: {integrity: sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==} + + '@vueuse/shared@10.11.1': + resolution: {integrity: sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==} + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.15.0: + resolution: {integrity: sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==} + engines: {node: '>=0.4.0'} + hasBin: true + + adler-32@1.3.1: + resolution: {integrity: sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==} + engines: {node: '>=0.8'} + + ahooks@3.9.6: + resolution: {integrity: sha512-Mr7f05swd5SmKlR9SZo5U6M0LsL4ErweLzpdgXjA1JPmnZ78Vr6wzx0jUtvoxrcqGKYnX0Yjc02iEASVxHFPjQ==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + alien-signals@1.0.13: + resolution: {integrity: sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.2.2: + resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==} + engines: {node: '>=12'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + antd-style@4.1.0: + resolution: {integrity: sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==} + peerDependencies: + antd: '>=6.0.0' + react: '>=18' + + antd@6.1.3: + resolution: {integrity: sha512-kvaLtOm0UwCIdtR424/Mo6pyJxN34/6003e1io3GIKWQOdlddplFylv767iGxXLMrxfNoQmxuNJcF1miFbxCZQ==} + peerDependencies: + react: '>=18.0.0' + react-dom: '>=18.0.0' + + any-promise@1.3.0: + resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + assign-symbols@1.0.0: + resolution: {integrity: sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==} + engines: {node: '>=0.10.0'} + + astring@1.9.0: + resolution: {integrity: sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==} + hasBin: true + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + attr-accept@2.2.5: + resolution: {integrity: sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==} + engines: {node: '>=4'} + + autoprefixer@10.4.23: + resolution: {integrity: sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + axios@1.13.2: + resolution: {integrity: sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==} + + babel-plugin-macros@3.1.0: + resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} + engines: {node: '>=10', npm: '>=6'} + + bail@2.0.2: + resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + baseline-browser-mapping@2.9.11: + resolution: {integrity: sha512-Sg0xJUNDU1sJNGdfGWhVHX0kkZ+HWcvmVymJbj6NSgZZmW/8S9Y2HQ5euytnIgakgxN6papOAWiwDo1ctFDcoQ==} + hasBin: true + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + brace-expansion@1.1.12: + resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} + + brace-expansion@2.0.2: + resolution: {integrity: sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.28.1: + resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase-css@2.0.1: + resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} + engines: {node: '>= 6'} + + caniuse-lite@1.0.30001761: + resolution: {integrity: sha512-JF9ptu1vP2coz98+5051jZ4PwQgd2ni8A+gYSN7EA7dPKIMf0pDlSUxhdmVOaV3/fYK5uWBkgSXJaRLr4+3A6g==} + + ccount@2.0.1: + resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} + + cfb@1.2.2: + resolution: {integrity: sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==} + engines: {node: '>=0.8'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + character-entities-html4@2.1.0: + resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==} + + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} + + character-entities@2.0.2: + resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} + + character-reference-invalid@2.0.1: + resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==} + + chart.js@4.5.1: + resolution: {integrity: sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==} + engines: {pnpm: '>=8'} + + chevrotain-allstar@0.3.1: + resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} + peerDependencies: + chevrotain: ^11.0.0 + + chevrotain@11.0.3: + resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} + + chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} + engines: {node: '>= 8.10.0'} + + chokidar@4.0.3: + resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} + engines: {node: '>= 14.16.0'} + + chroma-js@3.2.0: + resolution: {integrity: sha512-os/OippSlX1RlWWr+QDPcGUZs0uoqr32urfxESG9U93lhUfbnlyckte84Q8P1UQY/qth983AS1JONKmLS4T0nw==} + + class-variance-authority@0.7.1: + resolution: {integrity: sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==} + + classnames@2.5.1: + resolution: {integrity: sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow==} + + clsx@1.2.1: + resolution: {integrity: sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==} + engines: {node: '>=6'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + codepage@1.15.0: + resolution: {integrity: sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==} + engines: {node: '>=0.8'} + + collapse-white-space@2.1.0: + resolution: {integrity: sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + colord@2.9.3: + resolution: {integrity: sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw==} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + comma-separated-tokens@2.0.3: + resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + + commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + + commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + + commander@8.3.0: + resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} + engines: {node: '>= 12'} + + compute-scroll-into-view@3.1.1: + resolution: {integrity: sha512-VRhuHOLoKYOy4UbilLbUzbYg93XLjv2PncJC50EuTWPA3gaja1UjBsUP/D/9/juV3vQFr6XBEzn9KCAHdUvOHw==} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + confbox@0.1.8: + resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + + convert-source-map@1.9.0: + resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} + + cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + + cose-base@2.2.0: + resolution: {integrity: sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==} + + cosmiconfig@7.1.0: + resolution: {integrity: sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==} + engines: {node: '>=10'} + + crc-32@1.2.2: + resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==} + engines: {node: '>=0.8'} + hasBin: true + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@3.2.3: + resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} + + cytoscape-cose-bilkent@4.1.0: + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape-fcose@2.2.0: + resolution: {integrity: sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==} + peerDependencies: + cytoscape: ^3.2.0 + + cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + + d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + + d3-array@3.2.4: + resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} + engines: {node: '>=12'} + + d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + + d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + + d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + + d3-color@3.1.0: + resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} + engines: {node: '>=12'} + + d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + + d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + + d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + + d3-drag@3.0.0: + resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} + engines: {node: '>=12'} + + d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + + d3-ease@3.0.1: + resolution: {integrity: sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==} + engines: {node: '>=12'} + + d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + + d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + + d3-format@3.1.0: + resolution: {integrity: sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==} + engines: {node: '>=12'} + + d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + + d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + + d3-interpolate@3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} + + d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + + d3-path@3.1.0: + resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} + engines: {node: '>=12'} + + d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + + d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + + d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + + d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + + d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + + d3-scale@4.0.2: + resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} + engines: {node: '>=12'} + + d3-selection@3.0.0: + resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} + engines: {node: '>=12'} + + d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + + d3-shape@3.2.0: + resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} + engines: {node: '>=12'} + + d3-time-format@4.1.0: + resolution: {integrity: sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==} + engines: {node: '>=12'} + + d3-time@3.1.0: + resolution: {integrity: sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==} + engines: {node: '>=12'} + + d3-timer@3.0.1: + resolution: {integrity: sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==} + engines: {node: '>=12'} + + d3-transition@3.0.1: + resolution: {integrity: sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==} + engines: {node: '>=12'} + peerDependencies: + d3-selection: 2 - 3 + + d3-zoom@3.0.0: + resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} + engines: {node: '>=12'} + + d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + + dagre-d3-es@7.0.13: + resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + + dayjs@1.11.19: + resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} + + de-indent@1.0.2: + resolution: {integrity: sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decode-named-character-reference@1.2.0: + resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==} + + decode-uri-component@0.4.1: + resolution: {integrity: sha512-+8VxcR21HhTy8nOt6jf20w0c9CADrw1O8d+VZ/YzzCt4bJ3uBjw+D1q2osAB8RnpwwaeYBxy0HyKQxD5JBMuuQ==} + engines: {node: '>=14.16'} + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + delaunator@5.0.1: + resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} + + didyoumean@1.2.2: + resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + + dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + + dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + dompurify@3.3.1: + resolution: {integrity: sha512-qkdCKzLNtrgPFP1Vo+98FRzJnBRGe4ffyCea9IwHB1fyxPOeNTHpLKYGd4Uk9xvNoH0ZoOjwZxNptyMwqrId1Q==} + + driver.js@1.4.0: + resolution: {integrity: sha512-Gm64jm6PmcU+si21sQhBrTAM1JvUrR0QhNmjkprNLxohOBzul9+pNHXgQaT9lW84gwg9GMLB3NZGuGolsz5uew==} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + electron-to-chromium@1.5.267: + resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==} + + emoji-mart@5.6.0: + resolution: {integrity: sha512-eJp3QRe79pjwa+duv+n7+5YsNhRcMl812EcFVwrnRvYKoNPoQb5qxU8DG6Bgwji0akHdp6D4Ln6tYLG58MFSow==} + + emoji-regex@10.6.0: + resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==} + + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} + engines: {node: '>=0.12'} + + entities@7.0.0: + resolution: {integrity: sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==} + engines: {node: '>=0.12'} + + error-ex@1.3.4: + resolution: {integrity: sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + es-toolkit@1.43.0: + resolution: {integrity: sha512-SKCT8AsWvYzBBuUqMk4NPwFlSdqLpJwmy6AP322ERn8W2YLIB6JBXnwMI2Qsh2gfphT3q7EKAxKb23cvFHFwKA==} + + esast-util-from-estree@2.0.0: + resolution: {integrity: sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==} + + esast-util-from-js@2.0.1: + resolution: {integrity: sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==} + + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} + engines: {node: '>=12'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + + eslint-plugin-vue@9.33.0: + resolution: {integrity: sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0 + + eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.57.1: + resolution: {integrity: sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + deprecated: This version is no longer supported. Please see https://eslint.org/version-support for other options. + hasBin: true + + espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esquery@1.7.0: + resolution: {integrity: sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + estree-util-attach-comments@3.0.0: + resolution: {integrity: sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==} + + estree-util-build-jsx@3.0.1: + resolution: {integrity: sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==} + + estree-util-is-identifier-name@3.0.0: + resolution: {integrity: sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==} + + estree-util-scope@1.0.0: + resolution: {integrity: sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==} + + estree-util-to-js@2.0.0: + resolution: {integrity: sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==} + + estree-util-visit@2.0.0: + resolution: {integrity: sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==} + + estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + extend-shallow@2.0.1: + resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} + engines: {node: '>=0.10.0'} + + extend-shallow@3.0.2: + resolution: {integrity: sha512-BwY5b5Ql4+qZoefgMj2NUmx+tehVTH/Kf4k1ZEtOHNFcm2wSxMRo992l6X3TIgni2eZVTZ85xMOjF31fwZAj6Q==} + engines: {node: '>=0.10.0'} + + extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.3: + resolution: {integrity: sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.20.1: + resolution: {integrity: sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==} + + fdir@6.5.0: + resolution: {integrity: sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==} + engines: {node: '>=12.0.0'} + peerDependencies: + picomatch: ^3 || ^4 + peerDependenciesMeta: + picomatch: + optional: true + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + file-saver@2.0.5: + resolution: {integrity: sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==} + + file-selector@0.5.0: + resolution: {integrity: sha512-s8KNnmIDTBoD0p9uJ9uD0XY38SCeBOtj0UMXyQSLg1Ypfrfj8+dAvwsLjYQkQ2GjhVtp2HrnF5cJzMhBjfD8HA==} + engines: {node: '>= 10'} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + filter-obj@5.1.0: + resolution: {integrity: sha512-qWeTREPoT7I0bifpPUXtxkZJ1XJzxWtfoWWkdVGqa+eCr3SHW/Ocp89o8vLvbUuQnadybJpjOKu4V+RwO6sGng==} + engines: {node: '>=14.16'} + + find-root@1.1.0: + resolution: {integrity: sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.3.3: + resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} + + follow-redirects@1.15.11: + resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + for-in@1.0.2: + resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} + engines: {node: '>=0.10.0'} + + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + + frac@1.1.2: + resolution: {integrity: sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==} + engines: {node: '>=0.8'} + + fraction.js@5.3.4: + resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==} + + framer-motion@12.23.26: + resolution: {integrity: sha512-cPcIhgR42xBn1Uj+PzOyheMtZ73H927+uWPDVhUMqxy8UHt6Okavb6xIz9J/phFUHUj0OncR6UvMfJTXoc/LKA==} + peerDependencies: + '@emotion/is-prop-valid': '*' + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + peerDependenciesMeta: + '@emotion/is-prop-valid': + optional: true + react: + optional: true + react-dom: + optional: true + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + get-east-asian-width@1.4.0: + resolution: {integrity: sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==} + engines: {node: '>=18'} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-value@2.0.6: + resolution: {integrity: sha512-Ln0UQDlxH1BapMu3GPtf7CuYNwRZf2gwCuPqbyG6pB8WfmFpzqcy4xtAaAMUhnNqjMKTiCPZG2oMT3YSx8U2NA==} + engines: {node: '>=0.10.0'} + + giscus@1.6.0: + resolution: {integrity: sha512-Zrsi8r4t1LVW950keaWcsURuZUQwUaMKjvJgTCY125vkW6OiEBkatE7ScJDbpqKHdZwb///7FVC21SE3iFK3PQ==} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@13.24.0: + resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} + engines: {node: '>=8'} + + globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + hachure-fill@0.5.2: + resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + hast-util-from-dom@5.0.1: + resolution: {integrity: sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q==} + + hast-util-from-html-isomorphic@2.0.0: + resolution: {integrity: sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw==} + + hast-util-from-html@2.0.3: + resolution: {integrity: sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==} + + hast-util-from-parse5@8.0.3: + resolution: {integrity: sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==} + + hast-util-is-element@3.0.0: + resolution: {integrity: sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==} + + hast-util-parse-selector@4.0.0: + resolution: {integrity: sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==} + + hast-util-raw@9.1.0: + resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + + hast-util-to-estree@3.1.3: + resolution: {integrity: sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==} + + hast-util-to-html@9.0.5: + resolution: {integrity: sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==} + + hast-util-to-jsx-runtime@2.3.6: + resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==} + + hast-util-to-parse5@8.0.1: + resolution: {integrity: sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==} + + hast-util-to-text@4.0.2: + resolution: {integrity: sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==} + + hast-util-whitespace@3.0.0: + resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==} + + hastscript@9.0.1: + resolution: {integrity: sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==} + + he@1.2.0: + resolution: {integrity: sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==} + hasBin: true + + hoist-non-react-statics@3.3.2: + resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + + html-url-attributes@3.0.1: + resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} + + html-void-elements@3.0.0: + resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + ignore@5.3.2: + resolution: {integrity: sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==} + engines: {node: '>= 4'} + + immer@11.1.3: + resolution: {integrity: sha512-6jQTc5z0KJFtr1UgFpIL3N9XSC3saRaI9PwWtzM2pSqkNGtiNkYY2OSwkOGDK2XcTRcLb1pi/aNkKZz0nxVH4Q==} + + import-fresh@3.3.1: + resolution: {integrity: sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + inline-style-parser@0.2.7: + resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} + + internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + + internmap@2.0.3: + resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} + engines: {node: '>=12'} + + intersection-observer@0.12.2: + resolution: {integrity: sha512-7m1vEcPCxXYI8HqnL8CKI6siDyD+eIWSwgB3DZA+ZTogxk9I4CDnj4wilt9x/+/QbHI4YG5YZNmC6458/e9Ktg==} + deprecated: The Intersection Observer polyfill is no longer needed and can safely be removed. Intersection Observer has been Baseline since 2019. + + is-alphabetical@2.0.1: + resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} + + is-alphanumerical@2.0.1: + resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-core-module@2.16.1: + resolution: {integrity: sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==} + engines: {node: '>= 0.4'} + + is-decimal@2.0.1: + resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} + + is-extendable@0.1.1: + resolution: {integrity: sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==} + engines: {node: '>=0.10.0'} + + is-extendable@1.0.1: + resolution: {integrity: sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==} + engines: {node: '>=0.10.0'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-hexadecimal@2.0.1: + resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} + + is-mobile@5.0.0: + resolution: {integrity: sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + + is-plain-object@2.0.4: + resolution: {integrity: sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==} + engines: {node: '>=0.10.0'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + isobject@3.0.1: + resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} + engines: {node: '>=0.10.0'} + + jiti@1.21.7: + resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==} + hasBin: true + + js-cookie@3.0.5: + resolution: {integrity: sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==} + engines: {node: '>=14'} + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + jsesc@3.1.0: + resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} + engines: {node: '>=6'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json2mq@0.2.0: + resolution: {integrity: sha512-SzoRg7ux5DWTII9J2qkrZrqV1gt+rTaoufMxEzXbS26Uid0NwaJd123HcoB80TgubEppxxIGdNxCx50fEoEWQA==} + + katex@0.16.27: + resolution: {integrity: sha512-aeQoDkuRWSqQN6nSvVCEFvfXdqo1OQiCmmW1kc9xSdjutPv7BGO7pqY9sQRJpMOGrEdfDgF2TfRXe5eUAD2Waw==} + hasBin: true + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + + langium@3.3.1: + resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} + engines: {node: '>=16.0.0'} + + layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + + layout-base@2.0.1: + resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} + + leva@0.10.1: + resolution: {integrity: sha512-BcjnfUX8jpmwZUz2L7AfBtF9vn4ggTH33hmeufDULbP3YgNZ/C+ss/oO3stbrqRQyaOmRwy70y7BGTGO81S3rA==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lilconfig@3.1.3: + resolution: {integrity: sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==} + engines: {node: '>=14'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + lit-element@4.2.2: + resolution: {integrity: sha512-aFKhNToWxoyhkNDmWZwEva2SlQia+jfG0fjIWV//YeTaWrVnOxD89dPKfigCUspXFmjzOEUQpOkejH5Ly6sG0w==} + + lit-html@3.3.2: + resolution: {integrity: sha512-Qy9hU88zcmaxBXcc10ZpdK7cOLXvXpRoBxERdtqV9QOrfpMZZ6pSYP91LhpPtap3sFMUiL7Tw2RImbe0Al2/kw==} + + lit@3.3.2: + resolution: {integrity: sha512-NF9zbsP79l4ao2SNrH3NkfmFgN/hBYSQo90saIVI1o5GpjAdCPVstVzO1MrLOakHoEhYkrtRjPK6Ob521aoYWQ==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash-es@4.17.21: + resolution: {integrity: sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==} + + lodash-es@4.17.22: + resolution: {integrity: sha512-XEawp1t0gxSi9x01glktRZ5HDy0HXqrM0x5pXQM98EaI0NxO6jVM7omDOxsuEo5UIASAnm2bRp1Jt/e0a2XU8Q==} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + longest-streak@3.1.0: + resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} + + loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + + lucide-react@0.469.0: + resolution: {integrity: sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + lucide-react@0.562.0: + resolution: {integrity: sha512-82hOAu7y0dbVuFfmO4bYF1XEwYk/mEbM5E+b1jgci/udUBEE/R7LF5Ip0CCEmXe8AybRM8L+04eP+LGZeDvkiw==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + markdown-extensions@2.0.0: + resolution: {integrity: sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==} + engines: {node: '>=16'} + + markdown-table@3.0.4: + resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + + marked@16.4.2: + resolution: {integrity: sha512-TI3V8YYWvkVf3KJe1dRkpnjs68JUPyEa5vjKrp1XEEJUAOaQc+Qj+L1qWbPd0SJuAdQkFU0h73sXXqwDYxsiDA==} + engines: {node: '>= 20'} + hasBin: true + + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + mdast-util-find-and-replace@3.0.2: + resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==} + + mdast-util-from-markdown@2.0.2: + resolution: {integrity: sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==} + + mdast-util-gfm-autolink-literal@2.0.1: + resolution: {integrity: sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==} + + mdast-util-gfm-footnote@2.1.0: + resolution: {integrity: sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==} + + mdast-util-gfm-strikethrough@2.0.0: + resolution: {integrity: sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==} + + mdast-util-gfm-table@2.0.0: + resolution: {integrity: sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==} + + mdast-util-gfm-task-list-item@2.0.0: + resolution: {integrity: sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==} + + mdast-util-gfm@3.1.0: + resolution: {integrity: sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==} + + mdast-util-math@3.0.0: + resolution: {integrity: sha512-Tl9GBNeG/AhJnQM221bJR2HPvLOSnLE/T9cJI9tlc6zwQk2nPk/4f0cHkOdEixQPC/j8UtKDdITswvLAy1OZ1w==} + + mdast-util-mdx-expression@2.0.1: + resolution: {integrity: sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==} + + mdast-util-mdx-jsx@3.2.0: + resolution: {integrity: sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==} + + mdast-util-mdx@3.0.0: + resolution: {integrity: sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==} + + mdast-util-mdxjs-esm@2.0.1: + resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==} + + mdast-util-newline-to-break@2.0.0: + resolution: {integrity: sha512-MbgeFca0hLYIEx/2zGsszCSEJJ1JSCdiY5xQxRcLDDGa8EPvlLPupJ4DSajbMPAnC0je8jfb9TiUATnxxrHUog==} + + mdast-util-phrasing@4.1.0: + resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==} + + mdast-util-to-hast@13.2.1: + resolution: {integrity: sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==} + + mdast-util-to-markdown@2.1.2: + resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} + + mdast-util-to-string@4.0.0: + resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} + + merge-value@1.0.0: + resolution: {integrity: sha512-fJMmvat4NeKz63Uv9iHWcPDjCWcCkoiRoajRTEO8hlhUC6rwaHg0QCF9hBOTjZmm4JuglPckPSTtcuJL5kp0TQ==} + engines: {node: '>=0.10.0'} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + mermaid@11.12.2: + resolution: {integrity: sha512-n34QPDPEKmaeCG4WDMGy0OT6PSyxKCfy2pJgShP+Qow2KLrvWjclwbc3yXfSIf4BanqWEhQEpngWwNp/XhZt6w==} + + micromark-core-commonmark@2.0.3: + resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} + + micromark-extension-cjk-friendly-util@2.1.1: + resolution: {integrity: sha512-egs6+12JU2yutskHY55FyR48ZiEcFOJFyk9rsiyIhcJ6IvWB6ABBqVrBw8IobqJTDZ/wdSr9eoXDPb5S2nW1bg==} + engines: {node: '>=16'} + peerDependencies: + micromark-util-types: '*' + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-cjk-friendly@1.2.3: + resolution: {integrity: sha512-gRzVLUdjXBLX6zNPSnHGDoo+ZTp5zy+MZm0g3sv+3chPXY7l9gW+DnrcHcZh/jiPR6MjPKO4AEJNp4Aw6V9z5Q==} + engines: {node: '>=16'} + peerDependencies: + micromark: ^4.0.0 + micromark-util-types: ^2.0.0 + peerDependenciesMeta: + micromark-util-types: + optional: true + + micromark-extension-gfm-autolink-literal@2.1.0: + resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} + + micromark-extension-gfm-footnote@2.1.0: + resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==} + + micromark-extension-gfm-strikethrough@2.1.0: + resolution: {integrity: sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==} + + micromark-extension-gfm-table@2.1.1: + resolution: {integrity: sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==} + + micromark-extension-gfm-tagfilter@2.0.0: + resolution: {integrity: sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==} + + micromark-extension-gfm-task-list-item@2.1.0: + resolution: {integrity: sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==} + + micromark-extension-gfm@3.0.0: + resolution: {integrity: sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==} + + micromark-extension-math@3.1.0: + resolution: {integrity: sha512-lvEqd+fHjATVs+2v/8kg9i5Q0AP2k85H0WUOwpIVvUML8BapsMvh1XAogmQjOCsLpoKRCVQqEkQBB3NhVBcsOg==} + + micromark-extension-mdx-expression@3.0.1: + resolution: {integrity: sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==} + + micromark-extension-mdx-jsx@3.0.2: + resolution: {integrity: sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==} + + micromark-extension-mdx-md@2.0.0: + resolution: {integrity: sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==} + + micromark-extension-mdxjs-esm@3.0.0: + resolution: {integrity: sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==} + + micromark-extension-mdxjs@3.0.0: + resolution: {integrity: sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==} + + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} + + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} + + micromark-factory-mdx-expression@2.0.3: + resolution: {integrity: sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==} + + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} + + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} + + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} + + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} + + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} + + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} + + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} + + micromark-util-decode-string@2.0.1: + resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==} + + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} + + micromark-util-events-to-acorn@2.0.3: + resolution: {integrity: sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==} + + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} + + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} + + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} + + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + + micromark-util-subtokenize@2.1.0: + resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} + + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + + micromark-util-types@2.0.2: + resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==} + + micromark@4.0.2: + resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==} + + micromatch@4.0.8: + resolution: {integrity: sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==} + engines: {node: '>=8.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + mixin-deep@1.3.2: + resolution: {integrity: sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==} + engines: {node: '>=0.10.0'} + + mlly@1.8.0: + resolution: {integrity: sha512-l8D9ODSRWLe2KHJSifWGwBqpTZXIXTeo8mlKjY+E2HAakaTeNpqAyBZ8GSqLzHgw4XmHmC8whvpjJNMbFZN7/g==} + + motion-dom@12.23.23: + resolution: {integrity: sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA==} + + motion-utils@12.23.6: + resolution: {integrity: sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ==} + + motion@12.23.26: + resolution: {integrity: sha512-Ll8XhVxY8LXMVYTCfme27WH2GjBrCIzY4+ndr5QKxsK+YwCtOi2B/oBi5jcIbik5doXuWT/4KKDOVAZJkeY5VQ==} + peerDependencies: + '@emotion/is-prop-valid': '*' + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + peerDependenciesMeta: + '@emotion/is-prop-valid': + optional: true + react: + optional: true + react-dom: + optional: true + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + muggle-string@0.4.1: + resolution: {integrity: sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==} + + mz@2.7.0: + resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + node-releases@2.0.27: + resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + npm-run-path@6.0.0: + resolution: {integrity: sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==} + engines: {node: '>=18'} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + numeral@2.0.6: + resolution: {integrity: sha512-qaKRmtYPZ5qdw4jWJD6bxEf1FJEqllJrwxCLIm0sQU/A7v2/czigzOb+C2uSiFsa9lBUzeH7M1oK+Q+OLxL3kA==} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + + on-change@4.0.2: + resolution: {integrity: sha512-cMtCyuJmTx/bg2HCpHo3ZLeF7FZnBOapLqZHr2AlLeJ5Ul0Zu2mUJJz051Fdwu/Et2YW04ZD+TtU+gVy0ACNCA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + oniguruma-parser@0.12.1: + resolution: {integrity: sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==} + + oniguruma-to-es@4.3.4: + resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + package-manager-detector@1.6.0: + resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + parse5@7.3.0: + resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + + path-browserify@1.0.1: + resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} + + path-data-parser@0.1.0: + resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-key@4.0.0: + resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} + engines: {node: '>=12'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + pathe@2.0.3: + resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + picomatch@4.0.3: + resolution: {integrity: sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==} + engines: {node: '>=12'} + + pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + + pinia@2.3.1: + resolution: {integrity: sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==} + peerDependencies: + typescript: '>=4.4.4' + vue: ^2.7.0 || ^3.5.11 + peerDependenciesMeta: + typescript: + optional: true + + pirates@4.0.7: + resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==} + engines: {node: '>= 6'} + + pkg-types@1.3.1: + resolution: {integrity: sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ==} + + points-on-curve@0.2.0: + resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} + + points-on-path@0.2.1: + resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + + polished@4.3.1: + resolution: {integrity: sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==} + engines: {node: '>=10'} + + postcss-import@15.1.0: + resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} + engines: {node: '>=14.0.0'} + peerDependencies: + postcss: ^8.0.0 + + postcss-js@4.1.0: + resolution: {integrity: sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.4.21 + + postcss-load-config@6.0.1: + resolution: {integrity: sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==} + engines: {node: '>= 18'} + peerDependencies: + jiti: '>=1.21.0' + postcss: '>=8.0.9' + tsx: ^4.8.1 + yaml: ^2.4.2 + peerDependenciesMeta: + jiti: + optional: true + postcss: + optional: true + tsx: + optional: true + yaml: + optional: true + + postcss-nested@6.2.0: + resolution: {integrity: sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + + postcss-selector-parser@6.1.2: + resolution: {integrity: sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prop-types@15.8.1: + resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==} + + property-information@7.1.0: + resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + query-string@9.3.1: + resolution: {integrity: sha512-5fBfMOcDi5SA9qj5jZhWAcTtDfKF5WFdd2uD9nVNlbxVv1baq65aALy6qofpNEGELHvisjjasxQp7BlM9gvMzw==} + engines: {node: '>=18'} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + rc-collapse@4.0.0: + resolution: {integrity: sha512-SwoOByE39/3oIokDs/BnkqI+ltwirZbP8HZdq1/3SkPSBi7xDdvWHTp7cpNI9ullozkR6mwTWQi6/E/9huQVrA==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-dialog@9.6.0: + resolution: {integrity: sha512-ApoVi9Z8PaCQg6FsUzS8yvBEQy0ZL2PkuvAgrmohPkN3okps5WZ5WQWPc1RNuiOKaAYv8B97ACdsFU5LizzCqg==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-footer@0.6.8: + resolution: {integrity: sha512-JBZ+xcb6kkex8XnBd4VHw1ZxjV6kmcwUumSHaIFdka2qzMCo7Klcy4sI6G0XtUpG/vtpislQCc+S9Bc+NLHYMg==} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + rc-image@7.12.0: + resolution: {integrity: sha512-cZ3HTyyckPnNnUb9/DRqduqzLfrQRyi+CdHjdqgsyDpI3Ln5UX1kXnAhPBSJj9pVRzwRFgqkN7p9b6HBDjmu/Q==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-input-number@9.5.0: + resolution: {integrity: sha512-bKaEvB5tHebUURAEXw35LDcnRZLq3x1k7GxfAqBMzmpHkDGzjAtnUL8y4y5N15rIFIg5IJgwr211jInl3cipag==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-input@1.8.0: + resolution: {integrity: sha512-KXvaTbX+7ha8a/k+eg6SYRVERK0NddX8QX7a7AnRvUa/rEH0CNMlpcBzBkhI0wp2C8C4HlMoYl8TImSN+fuHKA==} + peerDependencies: + react: '>=16.0.0' + react-dom: '>=16.0.0' + + rc-menu@9.16.1: + resolution: {integrity: sha512-ghHx6/6Dvp+fw8CJhDUHFHDJ84hJE3BXNCzSgLdmNiFErWSOaZNsihDAsKq9ByTALo/xkNIwtDFGIl6r+RPXBg==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-motion@2.9.5: + resolution: {integrity: sha512-w+XTUrfh7ArbYEd2582uDrEhmBHwK1ZENJiSJVb7uRxdE7qJSYjbO2eksRXmndqyKqKoYPc9ClpPh5242mV1vA==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-overflow@1.5.0: + resolution: {integrity: sha512-Lm/v9h0LymeUYJf0x39OveU52InkdRXqnn2aYXfWmo8WdOonIKB2kfau+GF0fWq6jPgtdO9yMqveGcK6aIhJmg==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-resize-observer@1.4.3: + resolution: {integrity: sha512-YZLjUbyIWox8E9i9C3Tm7ia+W7euPItNWSPX5sCcQTYbnwDb5uNpnLHQCG1f22oZWUhLw4Mv2tFmeWe68CDQRQ==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + rc-util@5.44.4: + resolution: {integrity: sha512-resueRJzmHG9Q6rI/DfK6Kdv9/Lfls05vzMs1Sk3M2P+3cJa+MakaZyWY8IPfehVuhPJFKrIY1IK4GqbiaiY5w==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + re-resizable@6.11.2: + resolution: {integrity: sha512-2xI2P3OHs5qw7K0Ud1aLILK6MQxW50TcO+DetD9eIV58j84TqYeHoZcL9H4GXFXXIh7afhH8mv5iUCXII7OW7A==} + peerDependencies: + react: ^16.13.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.13.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + react-avatar-editor@14.0.0: + resolution: {integrity: sha512-NaQM3oo4u0a1/Njjutc2FjwKX35vQV+t6S8hovsbAlMpBN1ntIwP/g+Yr9eDIIfaNtRXL0AqboTnPmRxhD/i8A==} + peerDependencies: + react: ^0.14.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^0.14.0 || ^16.0.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + react-colorful@5.6.1: + resolution: {integrity: sha512-1exovf0uGTGyq5mXQT0zgQ80uvj2PCwvF8zY1RN9/vbJVSjSo3fsB/4L3ObbF7u70NduSiK4xu4Y6q1MHoUGEw==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + react-dom@19.2.3: + resolution: {integrity: sha512-yELu4WmLPw5Mr/lmeEpox5rw3RETacE++JgHqQzd2dg+YbJuat3jH4ingc+WPZhxaoFzdv9y33G+F7Nl5O0GBg==} + peerDependencies: + react: ^19.2.3 + + react-draggable@4.4.6: + resolution: {integrity: sha512-LtY5Xw1zTPqHkVmtM3X8MUOxNDOUhv/khTgBgrUvwaS064bwVvxT+q5El0uUFNx5IEPKXuRejr7UqLwBIg5pdw==} + peerDependencies: + react: '>= 16.3.0' + react-dom: '>= 16.3.0' + + react-dropzone@12.1.0: + resolution: {integrity: sha512-iBYHA1rbopIvtzokEX4QubO6qk5IF/x3BtKGu74rF2JkQDXnwC4uO/lHKpaw4PJIV6iIAYOlwLv2FpiGyqHNog==} + engines: {node: '>= 10.13'} + peerDependencies: + react: '>= 16.8' + + react-error-boundary@6.0.1: + resolution: {integrity: sha512-zArgQpjJUN1ZLMEKWtifxQweW3yfvwL5j2nh3Pesze1qG6r5oCDMy/TA97bUF01wy4xCeeL4/pd8GHmvEsP3Bg==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + + react-fast-compare@3.2.2: + resolution: {integrity: sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ==} + + react-hotkeys-hook@5.2.1: + resolution: {integrity: sha512-xbKh6zJxd/vJHT4Bw4+0pBD662Fk20V+VFhLqciCg+manTVO4qlqRqiwFOYelfHN9dBvWj9vxaPkSS26ZSIJGg==} + peerDependencies: + react: '>=16.8.0' + react-dom: '>=16.8.0' + + react-is@16.13.1: + resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + + react-is@18.3.1: + resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} + + react-markdown@10.1.0: + resolution: {integrity: sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==} + peerDependencies: + '@types/react': '>=18' + react: '>=18' + + react-merge-refs@3.0.2: + resolution: {integrity: sha512-MSZAfwFfdbEvwkKWP5EI5chuLYnNUxNS7vyS0i1Jp+wtd8J4Ga2ddzhaE68aMol2Z4vCnRM/oGOo1a3V75UPlw==} + peerDependencies: + react: '>=16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0' + peerDependenciesMeta: + react: + optional: true + + react-rnd@10.5.2: + resolution: {integrity: sha512-0Tm4x7k7pfHf2snewJA8x7Nwgt3LV+58MVEWOVsFjk51eYruFEa6Wy7BNdxt4/lH0wIRsu7Gm3KjSXY2w7YaNw==} + peerDependencies: + react: '>=16.3.0' + react-dom: '>=16.3.0' + + react-zoom-pan-pinch@3.7.0: + resolution: {integrity: sha512-UmReVZ0TxlKzxSbYiAj+LeGRW8s8LraAFTXRAxzMYnNRgGPsxCudwZKVkjvGmjtx7SW/hZamt69NUmGf4xrkXA==} + engines: {node: '>=8', npm: '>=5'} + peerDependencies: + react: '*' + react-dom: '*' + + react@19.2.3: + resolution: {integrity: sha512-Ku/hhYbVjOQnXDZFv2+RibmLFGwFdeeKHFcOTlrt7xplBnya5OGn/hIRDsqDiSUcfORsDC7MPxwork8jBwsIWA==} + engines: {node: '>=0.10.0'} + + read-cache@1.0.0: + resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + readdirp@4.1.2: + resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} + engines: {node: '>= 14.18.0'} + + recma-build-jsx@1.0.0: + resolution: {integrity: sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==} + + recma-jsx@1.0.1: + resolution: {integrity: sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + recma-parse@1.0.0: + resolution: {integrity: sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==} + + recma-stringify@1.0.0: + resolution: {integrity: sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==} + + regex-recursion@6.0.2: + resolution: {integrity: sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==} + + regex-utilities@2.3.0: + resolution: {integrity: sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==} + + regex@6.1.0: + resolution: {integrity: sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==} + + rehype-github-alerts@4.2.0: + resolution: {integrity: sha512-6di6kEu9WUHKLKrkKG2xX6AOuaCMGghg0Wq7MEuM/jBYUPVIq6PJpMe00dxMfU+/YSBtDXhffpDimgDi+BObIQ==} + + rehype-katex@7.0.1: + resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} + + rehype-raw@7.0.0: + resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + + rehype-recma@1.0.0: + resolution: {integrity: sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==} + + remark-breaks@4.0.0: + resolution: {integrity: sha512-IjEjJOkH4FuJvHZVIW0QCDWxcG96kCq7An/KVH2NfJe6rKZU2AsHeB3OEjPNRxi4QC34Xdx7I2KGYn6IpT7gxQ==} + + remark-cjk-friendly@1.2.3: + resolution: {integrity: sha512-UvAgxwlNk+l9Oqgl/9MWK2eWRS7zgBW/nXX9AthV7nd/3lNejF138E7Xbmk9Zs4WjTJGs721r7fAEc7tNFoH7g==} + engines: {node: '>=16'} + peerDependencies: + '@types/mdast': ^4.0.0 + unified: ^11.0.0 + peerDependenciesMeta: + '@types/mdast': + optional: true + + remark-gfm@4.0.1: + resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==} + + remark-github@12.0.0: + resolution: {integrity: sha512-ByefQKFN184LeiGRCabfl7zUJsdlMYWEhiLX1gpmQ11yFg6xSuOTW7LVCv0oc1x+YvUMJW23NU36sJX2RWGgvg==} + + remark-math@6.0.0: + resolution: {integrity: sha512-MMqgnP74Igy+S3WwnhQ7kqGlEerTETXMvJhrUzDikVZ2/uogJCb+WHUg97hK9/jcfc0dkD73s3LN8zU49cTEtA==} + + remark-mdx@3.1.1: + resolution: {integrity: sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==} + + remark-parse@11.0.0: + resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==} + + remark-rehype@11.1.2: + resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==} + + remark-stringify@11.0.0: + resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + + reselect@5.1.1: + resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==} + + resize-observer-polyfill@1.5.1: + resolution: {integrity: sha512-LwZrotdHOo12nQuZlHEmtuXdqGoOD0OhaxopaNFxWzInpEgaLWoVuAMbTzixuosCx2nEG58ngzW3vxdWoxIgdg==} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve@1.22.11: + resolution: {integrity: sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==} + engines: {node: '>= 0.4'} + hasBin: true + + reusify@1.1.0: + resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + robust-predicates@3.0.2: + resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + + rollup@4.54.0: + resolution: {integrity: sha512-3nk8Y3a9Ea8szgKhinMlGMhGMw89mqule3KWczxhIzqudyHdCIOHw8WJlj/r329fACjKLEh13ZSk7oE22kyeIw==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + roughjs@4.6.6: + resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + rw@1.3.3: + resolution: {integrity: sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + scheduler@0.27.0: + resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==} + + screenfull@5.2.0: + resolution: {integrity: sha512-9BakfsO2aUQN2K9Fdbj87RJIEZ82Q9IGim7FqM5OsebfoFC6ZHXgDq/KvniuLTPdeM8wY2o6Dj3WQ7KeQCj3cA==} + engines: {node: '>=0.10.0'} + + scroll-into-view-if-needed@3.1.0: + resolution: {integrity: sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ==} + + semver-compare@1.0.0: + resolution: {integrity: sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==} + + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + + set-value@2.0.1: + resolution: {integrity: sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==} + engines: {node: '>=0.10.0'} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + shiki-stream@0.1.3: + resolution: {integrity: sha512-pDIqmaP/zJWHNV8bJKp0tD0CZ6OkF+lWTIvmNRLktlTjBjN3+durr19JarS657U1oSEf/WrSYmdzwr9CeD6m2Q==} + peerDependencies: + react: ^19.0.0 + vue: ^3.2.0 + peerDependenciesMeta: + react: + optional: true + vue: + optional: true + + shiki@3.20.0: + resolution: {integrity: sha512-kgCOlsnyWb+p0WU+01RjkCH+eBVsjL1jOwUYWv0YDWkM2/A46+LDKVs5yZCUXjJG6bj4ndFoAg5iLIIue6dulg==} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map@0.5.7: + resolution: {integrity: sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==} + engines: {node: '>=0.10.0'} + + source-map@0.7.6: + resolution: {integrity: sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==} + engines: {node: '>= 12'} + + space-separated-tokens@2.0.2: + resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} + + split-on-first@3.0.0: + resolution: {integrity: sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==} + engines: {node: '>=12'} + + split-string@3.1.0: + resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} + engines: {node: '>=0.10.0'} + + ssf@0.11.2: + resolution: {integrity: sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==} + engines: {node: '>=0.8'} + + string-convert@0.2.1: + resolution: {integrity: sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A==} + + stringify-entities@4.0.4: + resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.1.2: + resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==} + engines: {node: '>=12'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + style-to-js@1.1.21: + resolution: {integrity: sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==} + + style-to-object@1.0.14: + resolution: {integrity: sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==} + + stylis@4.2.0: + resolution: {integrity: sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==} + + stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + + sucrase@3.35.1: + resolution: {integrity: sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==} + engines: {node: '>=16 || 14 >=14.17'} + hasBin: true + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + swr@2.3.8: + resolution: {integrity: sha512-gaCPRVoMq8WGDcWj9p4YWzCMPHzE0WNl6W8ADIx9c3JBEIdMkJGMzW+uzXvxHMltwcYACr9jP+32H8/hgwMR7w==} + peerDependencies: + react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + tabbable@6.4.0: + resolution: {integrity: sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg==} + + tailwindcss@3.4.19: + resolution: {integrity: sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==} + engines: {node: '>=14.0.0'} + hasBin: true + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + thenify-all@1.6.0: + resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} + engines: {node: '>=0.8'} + + thenify@3.3.1: + resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + + throttle-debounce@5.0.2: + resolution: {integrity: sha512-B71/4oyj61iNH0KeCamLuE2rmKuTO5byTOSVwECM5FA7TiAiAW+UqTKZ9ERueC4qvgSttUhdmq1mXC3kJqGX7A==} + engines: {node: '>=12.22'} + + tiny-invariant@1.3.3: + resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + + tinyexec@1.0.2: + resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} + engines: {node: '>=18'} + + tinyglobby@0.2.15: + resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} + engines: {node: '>=12.0.0'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + to-vfile@8.0.0: + resolution: {integrity: sha512-IcmH1xB5576MJc9qcfEC/m/nQCFt3fzMHz45sSlgJyTWjRbKW1HAkJpuf3DgE57YzIlZcwcBZA5ENQbBo4aLkg==} + + trim-lines@3.0.1: + resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} + + trough@2.2.0: + resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} + + ts-api-utils@1.4.3: + resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==} + engines: {node: '>=16'} + peerDependencies: + typescript: '>=4.2.0' + + ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + + ts-interface-checker@0.1.13: + resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + + ts-md5@2.0.1: + resolution: {integrity: sha512-yF35FCoEOFBzOclSkMNEUbFQZuv89KEQ+5Xz03HrMSGUGB1+r+El+JiGOFwsP4p9RFNzwlrydYoTLvPOuICl9w==} + engines: {node: '>=18'} + + tslib@2.6.2: + resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + typescript@5.6.3: + resolution: {integrity: sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==} + engines: {node: '>=14.17'} + hasBin: true + + ufo@1.6.1: + resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==} + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + unicorn-magic@0.3.0: + resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==} + engines: {node: '>=18'} + + unified@11.0.5: + resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==} + + unist-util-find-after@5.0.0: + resolution: {integrity: sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==} + + unist-util-is@6.0.1: + resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==} + + unist-util-position-from-estree@2.0.0: + resolution: {integrity: sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==} + + unist-util-position@5.0.0: + resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==} + + unist-util-remove-position@5.0.0: + resolution: {integrity: sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==} + + unist-util-stringify-position@4.0.0: + resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} + + unist-util-visit-parents@6.0.2: + resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==} + + unist-util-visit@5.0.0: + resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==} + + update-browserslist-db@1.2.3: + resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + url-join@5.0.0: + resolution: {integrity: sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + + use-merge-value@1.2.0: + resolution: {integrity: sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==} + peerDependencies: + react: '>= 16.x' + + use-sync-external-store@1.6.0: + resolution: {integrity: sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + uuid@13.0.0: + resolution: {integrity: sha512-XQegIaBTVUjSHliKqcnFqYypAd4S+WCYt5NIeRs6w/UAry7z8Y9j5ZwRRL4kzq9U3sD6v+85er9FvkEaBpji2w==} + hasBin: true + + v8n@1.5.1: + resolution: {integrity: sha512-LdabyT4OffkyXFCe9UT+uMkxNBs5rcTVuZClvxQr08D5TUgo1OFKkoT65qYRCsiKBl/usHjpXvP4hHMzzDRj3A==} + + vfile-location@5.0.3: + resolution: {integrity: sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==} + + vfile-message@4.0.3: + resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==} + + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} + + vite-plugin-checker@0.9.3: + resolution: {integrity: sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==} + engines: {node: '>=14.16'} + peerDependencies: + '@biomejs/biome': '>=1.7' + eslint: '>=7' + meow: ^13.2.0 + optionator: ^0.9.4 + stylelint: '>=16' + typescript: '*' + vite: '>=2.0.0' + vls: '*' + vti: '*' + vue-tsc: ~2.2.10 + peerDependenciesMeta: + '@biomejs/biome': + optional: true + eslint: + optional: true + meow: + optional: true + optionator: + optional: true + stylelint: + optional: true + typescript: + optional: true + vls: + optional: true + vti: + optional: true + vue-tsc: + optional: true + + vite@5.4.21: + resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || >=20.0.0 + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + + vscode-jsonrpc@8.2.0: + resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} + engines: {node: '>=14.0.0'} + + vscode-languageserver-protocol@3.17.5: + resolution: {integrity: sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==} + + vscode-languageserver-textdocument@1.0.12: + resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} + + vscode-languageserver-types@3.17.5: + resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==} + + vscode-languageserver@9.0.1: + resolution: {integrity: sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==} + hasBin: true + + vscode-uri@3.0.8: + resolution: {integrity: sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==} + + vscode-uri@3.1.0: + resolution: {integrity: sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==} + + vue-chartjs@5.3.3: + resolution: {integrity: sha512-jqxtL8KZ6YJ5NTv6XzrzLS7osyegOi28UGNZW0h9OkDL7Sh1396ht4Dorh04aKrl2LiSalQ84WtqiG0RIJb0tA==} + peerDependencies: + chart.js: ^4.1.1 + vue: ^3.0.0-0 || ^2.7.0 + + vue-demi@0.14.10: + resolution: {integrity: sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==} + engines: {node: '>=12'} + hasBin: true + peerDependencies: + '@vue/composition-api': ^1.0.0-rc.1 + vue: ^3.0.0-0 || ^2.6.0 + peerDependenciesMeta: + '@vue/composition-api': + optional: true + + vue-eslint-parser@9.4.3: + resolution: {integrity: sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: '>=6.0.0' + + vue-i18n@9.14.5: + resolution: {integrity: sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==} + engines: {node: '>= 16'} + peerDependencies: + vue: ^3.0.0 + + vue-router@4.6.4: + resolution: {integrity: sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==} + peerDependencies: + vue: ^3.5.0 + + vue-tsc@2.2.12: + resolution: {integrity: sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==} + hasBin: true + peerDependencies: + typescript: '>=5.0.0' + + vue@3.5.26: + resolution: {integrity: sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + web-namespaces@2.0.1: + resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + wmf@1.0.2: + resolution: {integrity: sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==} + engines: {node: '>=0.8'} + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + word@0.3.0: + resolution: {integrity: sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==} + engines: {node: '>=0.8'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + xlsx@0.18.5: + resolution: {integrity: sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==} + engines: {node: '>=0.8'} + hasBin: true + + xml-name-validator@4.0.0: + resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} + engines: {node: '>=12'} + + yaml@1.10.2: + resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} + engines: {node: '>= 6'} + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zustand@3.7.2: + resolution: {integrity: sha512-PIJDIZKtokhof+9+60cpockVOq05sJzHCriyvaLBmEJixseQ1a5Kdov6fWZfWOu5SK9c+FhH1jU0tntLxRJYMA==} + engines: {node: '>=12.7.0'} + peerDependencies: + react: '>=16.8' + peerDependenciesMeta: + react: + optional: true + + zwitch@2.0.4: + resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@ant-design/colors@8.0.0': + dependencies: + '@ant-design/fast-color': 3.0.0 + + '@ant-design/cssinjs-utils@2.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@ant-design/cssinjs': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@babel/runtime': 7.28.4 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@ant-design/cssinjs@2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@emotion/hash': 0.8.0 + '@emotion/unitless': 0.7.5 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + csstype: 3.2.3 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + stylis: 4.3.6 + + '@ant-design/fast-color@3.0.0': {} + + '@ant-design/icons-svg@4.4.2': {} + + '@ant-design/icons@6.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@ant-design/colors': 8.0.0 + '@ant-design/icons-svg': 4.4.2 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@ant-design/react-slick@2.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + clsx: 2.1.1 + json2mq: 0.2.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + throttle-debounce: 5.0.2 + + '@antfu/install-pkg@1.1.0': + dependencies: + package-manager-detector: 1.6.0 + tinyexec: 1.0.2 + + '@babel/code-frame@7.27.1': + dependencies: + '@babel/helper-validator-identifier': 7.28.5 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/generator@7.28.5': + dependencies: + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + jsesc: 3.1.0 + + '@babel/helper-globals@7.28.0': {} + + '@babel/helper-module-imports@7.27.1': + dependencies: + '@babel/traverse': 7.28.5 + '@babel/types': 7.28.5 + transitivePeerDependencies: + - supports-color + + '@babel/helper-string-parser@7.27.1': {} + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/parser@7.28.5': + dependencies: + '@babel/types': 7.28.5 + + '@babel/runtime@7.28.4': {} + + '@babel/template@7.27.2': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + + '@babel/traverse@7.28.5': + dependencies: + '@babel/code-frame': 7.27.1 + '@babel/generator': 7.28.5 + '@babel/helper-globals': 7.28.0 + '@babel/parser': 7.28.5 + '@babel/template': 7.27.2 + '@babel/types': 7.28.5 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.28.5': + dependencies: + '@babel/helper-string-parser': 7.27.1 + '@babel/helper-validator-identifier': 7.28.5 + + '@base-ui/react@1.0.0(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@base-ui/utils': 0.2.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@floating-ui/react-dom': 2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@floating-ui/utils': 0.2.10 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + reselect: 5.1.1 + tabbable: 6.4.0 + use-sync-external-store: 1.6.0(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@base-ui/utils@0.2.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@floating-ui/utils': 0.2.10 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + reselect: 5.1.1 + use-sync-external-store: 1.6.0(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@braintree/sanitize-url@7.1.1': {} + + '@chevrotain/cst-dts-gen@11.0.3': + dependencies: + '@chevrotain/gast': 11.0.3 + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.21 + + '@chevrotain/gast@11.0.3': + dependencies: + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.21 + + '@chevrotain/regexp-to-ast@11.0.3': {} + + '@chevrotain/types@11.0.3': {} + + '@chevrotain/utils@11.0.3': {} + + '@dnd-kit/accessibility@3.1.1(react@19.2.3)': + dependencies: + react: 19.2.3 + tslib: 2.8.1 + + '@dnd-kit/core@6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@dnd-kit/accessibility': 3.1.1(react@19.2.3) + '@dnd-kit/utilities': 3.2.2(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + tslib: 2.8.1 + + '@dnd-kit/modifiers@9.0.0(@dnd-kit/core@6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3)': + dependencies: + '@dnd-kit/core': 6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@dnd-kit/utilities': 3.2.2(react@19.2.3) + react: 19.2.3 + tslib: 2.8.1 + + '@dnd-kit/sortable@10.0.0(@dnd-kit/core@6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3)': + dependencies: + '@dnd-kit/core': 6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@dnd-kit/utilities': 3.2.2(react@19.2.3) + react: 19.2.3 + tslib: 2.8.1 + + '@dnd-kit/utilities@3.2.2(react@19.2.3)': + dependencies: + react: 19.2.3 + tslib: 2.8.1 + + '@emoji-mart/data@1.2.1': {} + + '@emoji-mart/react@1.1.1(emoji-mart@5.6.0)(react@19.2.3)': + dependencies: + emoji-mart: 5.6.0 + react: 19.2.3 + + '@emotion/babel-plugin@11.13.5': + dependencies: + '@babel/helper-module-imports': 7.27.1 + '@babel/runtime': 7.28.4 + '@emotion/hash': 0.9.2 + '@emotion/memoize': 0.9.0 + '@emotion/serialize': 1.3.3 + babel-plugin-macros: 3.1.0 + convert-source-map: 1.9.0 + escape-string-regexp: 4.0.0 + find-root: 1.1.0 + source-map: 0.5.7 + stylis: 4.2.0 + transitivePeerDependencies: + - supports-color + + '@emotion/cache@11.14.0': + dependencies: + '@emotion/memoize': 0.9.0 + '@emotion/sheet': 1.4.0 + '@emotion/utils': 1.4.2 + '@emotion/weak-memoize': 0.4.0 + stylis: 4.2.0 + + '@emotion/css@11.13.5': + dependencies: + '@emotion/babel-plugin': 11.13.5 + '@emotion/cache': 11.14.0 + '@emotion/serialize': 1.3.3 + '@emotion/sheet': 1.4.0 + '@emotion/utils': 1.4.2 + transitivePeerDependencies: + - supports-color + + '@emotion/hash@0.8.0': {} + + '@emotion/hash@0.9.2': {} + + '@emotion/is-prop-valid@1.4.0': + dependencies: + '@emotion/memoize': 0.9.0 + + '@emotion/memoize@0.9.0': {} + + '@emotion/react@11.14.0(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@emotion/babel-plugin': 11.13.5 + '@emotion/cache': 11.14.0 + '@emotion/serialize': 1.3.3 + '@emotion/use-insertion-effect-with-fallbacks': 1.2.0(react@19.2.3) + '@emotion/utils': 1.4.2 + '@emotion/weak-memoize': 0.4.0 + hoist-non-react-statics: 3.3.2 + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + transitivePeerDependencies: + - supports-color + + '@emotion/serialize@1.3.3': + dependencies: + '@emotion/hash': 0.9.2 + '@emotion/memoize': 0.9.0 + '@emotion/unitless': 0.10.0 + '@emotion/utils': 1.4.2 + csstype: 3.2.3 + + '@emotion/sheet@1.4.0': {} + + '@emotion/unitless@0.10.0': {} + + '@emotion/unitless@0.7.5': {} + + '@emotion/use-insertion-effect-with-fallbacks@1.2.0(react@19.2.3)': + dependencies: + react: 19.2.3 + + '@emotion/utils@1.4.2': {} + + '@emotion/weak-memoize@0.4.0': {} + + '@esbuild/aix-ppc64@0.21.5': + optional: true + + '@esbuild/android-arm64@0.21.5': + optional: true + + '@esbuild/android-arm@0.21.5': + optional: true + + '@esbuild/android-x64@0.21.5': + optional: true + + '@esbuild/darwin-arm64@0.21.5': + optional: true + + '@esbuild/darwin-x64@0.21.5': + optional: true + + '@esbuild/freebsd-arm64@0.21.5': + optional: true + + '@esbuild/freebsd-x64@0.21.5': + optional: true + + '@esbuild/linux-arm64@0.21.5': + optional: true + + '@esbuild/linux-arm@0.21.5': + optional: true + + '@esbuild/linux-ia32@0.21.5': + optional: true + + '@esbuild/linux-loong64@0.21.5': + optional: true + + '@esbuild/linux-mips64el@0.21.5': + optional: true + + '@esbuild/linux-ppc64@0.21.5': + optional: true + + '@esbuild/linux-riscv64@0.21.5': + optional: true + + '@esbuild/linux-s390x@0.21.5': + optional: true + + '@esbuild/linux-x64@0.21.5': + optional: true + + '@esbuild/netbsd-x64@0.21.5': + optional: true + + '@esbuild/openbsd-x64@0.21.5': + optional: true + + '@esbuild/sunos-x64@0.21.5': + optional: true + + '@esbuild/win32-arm64@0.21.5': + optional: true + + '@esbuild/win32-ia32@0.21.5': + optional: true + + '@esbuild/win32-x64@0.21.5': + optional: true + + '@eslint-community/eslint-utils@4.9.1(eslint@8.57.1)': + dependencies: + eslint: 8.57.1 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.12.2': {} + + '@eslint/eslintrc@2.1.4': + dependencies: + ajv: 6.12.6 + debug: 4.4.3 + espree: 9.6.1 + globals: 13.24.0 + ignore: 5.3.2 + import-fresh: 3.3.1 + js-yaml: 4.1.1 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@8.57.1': {} + + '@floating-ui/core@1.7.3': + dependencies: + '@floating-ui/utils': 0.2.10 + + '@floating-ui/dom@1.7.4': + dependencies: + '@floating-ui/core': 1.7.3 + '@floating-ui/utils': 0.2.10 + + '@floating-ui/react-dom@2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@floating-ui/dom': 1.7.4 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@floating-ui/react@0.27.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@floating-ui/react-dom': 2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@floating-ui/utils': 0.2.10 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + tabbable: 6.4.0 + + '@floating-ui/utils@0.2.10': {} + + '@giscus/react@3.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + giscus: 1.6.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@humanwhocodes/config-array@0.13.0': + dependencies: + '@humanwhocodes/object-schema': 2.0.3 + debug: 4.4.3 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@2.0.3': {} + + '@iconify/types@2.0.0': {} + + '@iconify/utils@3.1.0': + dependencies: + '@antfu/install-pkg': 1.1.0 + '@iconify/types': 2.0.0 + mlly: 1.8.0 + + '@intlify/core-base@9.14.5': + dependencies: + '@intlify/message-compiler': 9.14.5 + '@intlify/shared': 9.14.5 + + '@intlify/message-compiler@9.14.5': + dependencies: + '@intlify/shared': 9.14.5 + source-map-js: 1.2.1 + + '@intlify/shared@9.14.5': {} + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@kurkle/color@0.3.4': {} + + '@lit-labs/ssr-dom-shim@1.5.0': {} + + '@lit/reactive-element@2.1.2': + dependencies: + '@lit-labs/ssr-dom-shim': 1.5.0 + + '@lobehub/emojilib@1.0.0': {} + + '@lobehub/fluent-emoji@4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@lobehub/emojilib': 1.0.0 + antd-style: 4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + emoji-regex: 10.6.0 + es-toolkit: 1.43.0 + lucide-react: 0.562.0(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + url-join: 5.0.0 + transitivePeerDependencies: + - '@types/react' + - antd + - supports-color + + '@lobehub/icons@4.0.2(@lobehub/ui@4.9.2)(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@lobehub/ui': 4.9.2(@lobehub/fluent-emoji@4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@lobehub/icons@4.0.2)(@types/mdast@4.0.4)(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(micromark-util-types@2.0.2)(micromark@4.0.2)(motion@12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vue@3.5.26(typescript@5.6.3)) + antd: 6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + antd-style: 4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + lucide-react: 0.469.0(react@19.2.3) + polished: 4.3.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + transitivePeerDependencies: + - '@types/react' + - supports-color + + '@lobehub/ui@4.9.2(@lobehub/fluent-emoji@4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(@lobehub/icons@4.0.2)(@types/mdast@4.0.4)(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(micromark-util-types@2.0.2)(micromark@4.0.2)(motion@12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3)(vue@3.5.26(typescript@5.6.3))': + dependencies: + '@ant-design/cssinjs': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@base-ui/react': 1.0.0(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@dnd-kit/core': 6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@dnd-kit/modifiers': 9.0.0(@dnd-kit/core@6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3) + '@dnd-kit/sortable': 10.0.0(@dnd-kit/core@6.3.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react@19.2.3) + '@dnd-kit/utilities': 3.2.2(react@19.2.3) + '@emoji-mart/data': 1.2.1 + '@emoji-mart/react': 1.1.1(emoji-mart@5.6.0)(react@19.2.3) + '@emotion/is-prop-valid': 1.4.0 + '@floating-ui/react': 0.27.16(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@giscus/react': 3.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@lobehub/fluent-emoji': 4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@lobehub/icons': 4.0.2(@lobehub/ui@4.9.2)(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@mdx-js/mdx': 3.1.1 + '@mdx-js/react': 3.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-slot': 1.2.4(@types/react@19.2.7)(react@19.2.3) + '@shikijs/core': 3.20.0 + '@shikijs/transformers': 3.20.0 + '@splinetool/runtime': 0.9.526 + ahooks: 3.9.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + antd: 6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + antd-style: 4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + chroma-js: 3.2.0 + class-variance-authority: 0.7.1 + clsx: 2.1.1 + dayjs: 1.11.19 + emoji-mart: 5.6.0 + es-toolkit: 1.43.0 + fast-deep-equal: 3.1.3 + immer: 11.1.3 + katex: 0.16.27 + leva: 0.10.1(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + lucide-react: 0.562.0(react@19.2.3) + marked: 17.0.1 + mermaid: 11.12.2 + motion: 12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + numeral: 2.0.6 + polished: 4.3.1 + query-string: 9.3.1 + rc-collapse: 4.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-footer: 0.6.8(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-image: 7.12.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-input-number: 9.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-menu: 9.16.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + re-resizable: 6.11.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-avatar-editor: 14.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react-dom: 19.2.3(react@19.2.3) + react-error-boundary: 6.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react-hotkeys-hook: 5.2.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react-markdown: 10.1.0(@types/react@19.2.7)(react@19.2.3) + react-merge-refs: 3.0.2(react@19.2.3) + react-rnd: 10.5.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react-zoom-pan-pinch: 3.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rehype-github-alerts: 4.2.0 + rehype-katex: 7.0.1 + rehype-raw: 7.0.0 + remark-breaks: 4.0.0 + remark-cjk-friendly: 1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5) + remark-gfm: 4.0.1 + remark-github: 12.0.0 + remark-math: 6.0.0 + shiki: 3.20.0 + shiki-stream: 0.1.3(react@19.2.3)(vue@3.5.26(typescript@5.6.3)) + swr: 2.3.8(react@19.2.3) + ts-md5: 2.0.1 + unified: 11.0.5 + url-join: 5.0.0 + use-merge-value: 1.2.0(react@19.2.3) + uuid: 13.0.0 + transitivePeerDependencies: + - '@types/mdast' + - '@types/react' + - '@types/react-dom' + - micromark + - micromark-util-types + - supports-color + - vue + + '@mdx-js/mdx@3.1.1': + dependencies: + '@types/estree': 1.0.8 + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdx': 2.0.13 + acorn: 8.15.0 + collapse-white-space: 2.1.0 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + estree-util-scope: 1.0.0 + estree-walker: 3.0.3 + hast-util-to-jsx-runtime: 2.3.6 + markdown-extensions: 2.0.0 + recma-build-jsx: 1.0.0 + recma-jsx: 1.0.1(acorn@8.15.0) + recma-stringify: 1.0.0 + rehype-recma: 1.0.0 + remark-mdx: 3.1.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + source-map: 0.7.6 + unified: 11.0.5 + unist-util-position-from-estree: 2.0.0 + unist-util-stringify-position: 4.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@mdx-js/react@3.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@types/mdx': 2.0.13 + '@types/react': 19.2.7 + react: 19.2.3 + + '@mermaid-js/parser@0.6.3': + dependencies: + langium: 3.3.1 + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.20.1 + + '@primer/octicons@19.21.1': + dependencies: + object-assign: 4.1.1 + + '@radix-ui/primitive@1.1.3': {} + + '@radix-ui/react-arrow@1.1.7(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-compose-refs@1.1.2(@types/react@19.2.7)(react@19.2.3)': + dependencies: + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-context@1.1.2(@types/react@19.2.7)(react@19.2.3)': + dependencies: + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-dismissable-layer@1.1.11(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-escape-keydown': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-id@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-popper@1.2.8(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@floating-ui/react-dom': 2.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-arrow': 1.1.7(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-context': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-rect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-size': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/rect': 1.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-portal@1.1.10(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-primitive': 2.1.4(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-portal@1.1.9(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-presence@1.1.5(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-primitive@2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-slot': 1.2.3(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-primitive@2.1.4(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-slot': 1.2.4(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-slot@1.2.3(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-slot@1.2.4(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-tooltip@1.2.8(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/primitive': 1.1.3 + '@radix-ui/react-compose-refs': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-context': 1.1.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-dismissable-layer': 1.1.11(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-id': 1.1.1(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-popper': 1.2.8(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-portal': 1.1.9(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-presence': 1.1.5(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-slot': 1.2.3(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-controllable-state': 1.2.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-visually-hidden': 1.2.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-callback-ref@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-controllable-state@1.2.2(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-use-effect-event': 0.0.2(@types/react@19.2.7)(react@19.2.3) + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-effect-event@0.0.2(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-escape-keydown@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-use-callback-ref': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-layout-effect@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-rect@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/rect': 1.1.1 + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-use-size@1.1.1(@types/react@19.2.7)(react@19.2.3)': + dependencies: + '@radix-ui/react-use-layout-effect': 1.1.1(@types/react@19.2.7)(react@19.2.3) + react: 19.2.3 + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/react-visually-hidden@1.2.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@radix-ui/react-primitive': 2.1.3(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + '@types/react': 19.2.7 + + '@radix-ui/rect@1.1.1': {} + + '@rc-component/async-validator@5.0.4': + dependencies: + '@babel/runtime': 7.28.4 + + '@rc-component/cascader@1.10.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/select': 1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tree': 1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/checkbox@1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/collapse@1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/color-picker@3.0.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@ant-design/fast-color': 3.0.0 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/context@2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/dialog@1.5.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/portal': 2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/drawer@1.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/portal': 2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/dropdown@1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/form@1.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/async-validator': 5.0.4 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/image@1.5.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/portal': 2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/input-number@1.6.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/mini-decimal': 1.1.0 + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/input@1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/mentions@1.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/input': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/menu': 1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/textarea': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/menu@1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/overflow': 1.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/mini-decimal@1.1.0': + dependencies: + '@babel/runtime': 7.28.4 + + '@rc-component/motion@1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/mutate-observer@2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/notification@1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/overflow@1.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/pagination@1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/picker@1.9.0(dayjs@1.11.19)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/overflow': 1.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + optionalDependencies: + dayjs: 1.11.19 + + '@rc-component/portal@1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/portal@2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/progress@1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/qrcode@1.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/rate@1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/resize-observer@1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/segmented@1.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/select@1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/overflow': 1.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/virtual-list': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/slider@1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/steps@1.2.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/switch@1.0.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/table@1.9.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/context': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/virtual-list': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/tabs@1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/dropdown': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/menu': 1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/textarea@1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/input': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/tooltip@1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/tour@2.2.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/portal': 2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/tree-select@1.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/select': 1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tree': 1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/tree@1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/virtual-list': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/trigger@2.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/portal': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + classnames: 2.5.1 + rc-motion: 2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-resize-observer: 1.4.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/trigger@3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/portal': 2.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/upload@1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rc-component/util@1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + is-mobile: 5.0.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + react-is: 18.3.1 + + '@rc-component/virtual-list@1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3)': + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + '@rollup/rollup-android-arm-eabi@4.54.0': + optional: true + + '@rollup/rollup-android-arm64@4.54.0': + optional: true + + '@rollup/rollup-darwin-arm64@4.54.0': + optional: true + + '@rollup/rollup-darwin-x64@4.54.0': + optional: true + + '@rollup/rollup-freebsd-arm64@4.54.0': + optional: true + + '@rollup/rollup-freebsd-x64@4.54.0': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.54.0': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.54.0': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.54.0': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.54.0': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.54.0': + optional: true + + '@rollup/rollup-linux-x64-musl@4.54.0': + optional: true + + '@rollup/rollup-openharmony-arm64@4.54.0': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.54.0': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.54.0': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.54.0': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.54.0': + optional: true + + '@shikijs/core@3.20.0': + dependencies: + '@shikijs/types': 3.20.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + hast-util-to-html: 9.0.5 + + '@shikijs/engine-javascript@3.20.0': + dependencies: + '@shikijs/types': 3.20.0 + '@shikijs/vscode-textmate': 10.0.2 + oniguruma-to-es: 4.3.4 + + '@shikijs/engine-oniguruma@3.20.0': + dependencies: + '@shikijs/types': 3.20.0 + '@shikijs/vscode-textmate': 10.0.2 + + '@shikijs/langs@3.20.0': + dependencies: + '@shikijs/types': 3.20.0 + + '@shikijs/themes@3.20.0': + dependencies: + '@shikijs/types': 3.20.0 + + '@shikijs/transformers@3.20.0': + dependencies: + '@shikijs/core': 3.20.0 + '@shikijs/types': 3.20.0 + + '@shikijs/types@3.20.0': + dependencies: + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + + '@shikijs/vscode-textmate@10.0.2': {} + + '@splinetool/runtime@0.9.526': + dependencies: + on-change: 4.0.2 + semver-compare: 1.0.0 + + '@stitches/react@1.2.8(react@19.2.3)': + dependencies: + react: 19.2.3 + + '@types/d3-array@3.2.2': {} + + '@types/d3-axis@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-brush@3.0.6': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-chord@3.0.6': {} + + '@types/d3-color@3.1.3': {} + + '@types/d3-contour@3.0.6': + dependencies: + '@types/d3-array': 3.2.2 + '@types/geojson': 7946.0.16 + + '@types/d3-delaunay@6.0.4': {} + + '@types/d3-dispatch@3.0.7': {} + + '@types/d3-drag@3.0.7': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-dsv@3.0.7': {} + + '@types/d3-ease@3.0.2': {} + + '@types/d3-fetch@3.0.7': + dependencies: + '@types/d3-dsv': 3.0.7 + + '@types/d3-force@3.0.10': {} + + '@types/d3-format@3.0.4': {} + + '@types/d3-geo@3.1.0': + dependencies: + '@types/geojson': 7946.0.16 + + '@types/d3-hierarchy@3.1.7': {} + + '@types/d3-interpolate@3.0.4': + dependencies: + '@types/d3-color': 3.1.3 + + '@types/d3-path@3.1.1': {} + + '@types/d3-polygon@3.0.2': {} + + '@types/d3-quadtree@3.0.6': {} + + '@types/d3-random@3.0.3': {} + + '@types/d3-scale-chromatic@3.1.0': {} + + '@types/d3-scale@4.0.9': + dependencies: + '@types/d3-time': 3.0.4 + + '@types/d3-selection@3.0.11': {} + + '@types/d3-shape@3.1.7': + dependencies: + '@types/d3-path': 3.1.1 + + '@types/d3-time-format@4.0.3': {} + + '@types/d3-time@3.0.4': {} + + '@types/d3-timer@3.0.2': {} + + '@types/d3-transition@3.0.9': + dependencies: + '@types/d3-selection': 3.0.11 + + '@types/d3-zoom@3.0.8': + dependencies: + '@types/d3-interpolate': 3.0.4 + '@types/d3-selection': 3.0.11 + + '@types/d3@7.4.3': + dependencies: + '@types/d3-array': 3.2.2 + '@types/d3-axis': 3.0.6 + '@types/d3-brush': 3.0.6 + '@types/d3-chord': 3.0.6 + '@types/d3-color': 3.1.3 + '@types/d3-contour': 3.0.6 + '@types/d3-delaunay': 6.0.4 + '@types/d3-dispatch': 3.0.7 + '@types/d3-drag': 3.0.7 + '@types/d3-dsv': 3.0.7 + '@types/d3-ease': 3.0.2 + '@types/d3-fetch': 3.0.7 + '@types/d3-force': 3.0.10 + '@types/d3-format': 3.0.4 + '@types/d3-geo': 3.1.0 + '@types/d3-hierarchy': 3.1.7 + '@types/d3-interpolate': 3.0.4 + '@types/d3-path': 3.1.1 + '@types/d3-polygon': 3.0.2 + '@types/d3-quadtree': 3.0.6 + '@types/d3-random': 3.0.3 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + '@types/d3-selection': 3.0.11 + '@types/d3-shape': 3.1.7 + '@types/d3-time': 3.0.4 + '@types/d3-time-format': 4.0.3 + '@types/d3-timer': 3.0.2 + '@types/d3-transition': 3.0.9 + '@types/d3-zoom': 3.0.8 + + '@types/debug@4.1.12': + dependencies: + '@types/ms': 2.1.0 + + '@types/estree-jsx@1.0.5': + dependencies: + '@types/estree': 1.0.8 + + '@types/estree@1.0.8': {} + + '@types/file-saver@2.0.7': {} + + '@types/geojson@7946.0.16': {} + + '@types/hast@3.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/js-cookie@3.0.6': {} + + '@types/katex@0.16.7': {} + + '@types/mdast@4.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/mdx@2.0.13': {} + + '@types/ms@2.1.0': {} + + '@types/node@20.19.27': + dependencies: + undici-types: 6.21.0 + + '@types/parse-json@4.0.2': {} + + '@types/react@19.2.7': + dependencies: + csstype: 3.2.3 + + '@types/trusted-types@2.0.7': {} + + '@types/unist@2.0.11': {} + + '@types/unist@3.0.3': {} + + '@types/web-bluetooth@0.0.20': {} + + '@typescript-eslint/eslint-plugin@7.18.0(@typescript-eslint/parser@7.18.0(eslint@8.57.1)(typescript@5.6.3))(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 7.18.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/scope-manager': 7.18.0 + '@typescript-eslint/type-utils': 7.18.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/utils': 7.18.0(eslint@8.57.1)(typescript@5.6.3) + '@typescript-eslint/visitor-keys': 7.18.0 + eslint: 8.57.1 + graphemer: 1.4.0 + ignore: 5.3.2 + natural-compare: 1.4.0 + ts-api-utils: 1.4.3(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@7.18.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@typescript-eslint/scope-manager': 7.18.0 + '@typescript-eslint/types': 7.18.0 + '@typescript-eslint/typescript-estree': 7.18.0(typescript@5.6.3) + '@typescript-eslint/visitor-keys': 7.18.0 + debug: 4.4.3 + eslint: 8.57.1 + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@7.18.0': + dependencies: + '@typescript-eslint/types': 7.18.0 + '@typescript-eslint/visitor-keys': 7.18.0 + + '@typescript-eslint/type-utils@7.18.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@typescript-eslint/typescript-estree': 7.18.0(typescript@5.6.3) + '@typescript-eslint/utils': 7.18.0(eslint@8.57.1)(typescript@5.6.3) + debug: 4.4.3 + eslint: 8.57.1 + ts-api-utils: 1.4.3(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@7.18.0': {} + + '@typescript-eslint/typescript-estree@7.18.0(typescript@5.6.3)': + dependencies: + '@typescript-eslint/types': 7.18.0 + '@typescript-eslint/visitor-keys': 7.18.0 + debug: 4.4.3 + globby: 11.1.0 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.7.3 + ts-api-utils: 1.4.3(typescript@5.6.3) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@7.18.0(eslint@8.57.1)(typescript@5.6.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + '@typescript-eslint/scope-manager': 7.18.0 + '@typescript-eslint/types': 7.18.0 + '@typescript-eslint/typescript-estree': 7.18.0(typescript@5.6.3) + eslint: 8.57.1 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@7.18.0': + dependencies: + '@typescript-eslint/types': 7.18.0 + eslint-visitor-keys: 3.4.3 + + '@ungap/structured-clone@1.3.0': {} + + '@use-gesture/core@10.3.1': {} + + '@use-gesture/react@10.3.1(react@19.2.3)': + dependencies: + '@use-gesture/core': 10.3.1 + react: 19.2.3 + + '@vitejs/plugin-vue@5.2.4(vite@5.4.21(@types/node@20.19.27))(vue@3.5.26(typescript@5.6.3))': + dependencies: + vite: 5.4.21(@types/node@20.19.27) + vue: 3.5.26(typescript@5.6.3) + + '@volar/language-core@2.4.15': + dependencies: + '@volar/source-map': 2.4.15 + + '@volar/source-map@2.4.15': {} + + '@volar/typescript@2.4.15': + dependencies: + '@volar/language-core': 2.4.15 + path-browserify: 1.0.1 + vscode-uri: 3.1.0 + + '@vue/compiler-core@3.5.26': + dependencies: + '@babel/parser': 7.28.5 + '@vue/shared': 3.5.26 + entities: 7.0.0 + estree-walker: 2.0.2 + source-map-js: 1.2.1 + + '@vue/compiler-dom@3.5.26': + dependencies: + '@vue/compiler-core': 3.5.26 + '@vue/shared': 3.5.26 + + '@vue/compiler-sfc@3.5.26': + dependencies: + '@babel/parser': 7.28.5 + '@vue/compiler-core': 3.5.26 + '@vue/compiler-dom': 3.5.26 + '@vue/compiler-ssr': 3.5.26 + '@vue/shared': 3.5.26 + estree-walker: 2.0.2 + magic-string: 0.30.21 + postcss: 8.5.6 + source-map-js: 1.2.1 + + '@vue/compiler-ssr@3.5.26': + dependencies: + '@vue/compiler-dom': 3.5.26 + '@vue/shared': 3.5.26 + + '@vue/compiler-vue2@2.7.16': + dependencies: + de-indent: 1.0.2 + he: 1.2.0 + + '@vue/devtools-api@6.6.4': {} + + '@vue/language-core@2.2.12(typescript@5.6.3)': + dependencies: + '@volar/language-core': 2.4.15 + '@vue/compiler-dom': 3.5.26 + '@vue/compiler-vue2': 2.7.16 + '@vue/shared': 3.5.26 + alien-signals: 1.0.13 + minimatch: 9.0.5 + muggle-string: 0.4.1 + path-browserify: 1.0.1 + optionalDependencies: + typescript: 5.6.3 + + '@vue/reactivity@3.5.26': + dependencies: + '@vue/shared': 3.5.26 + + '@vue/runtime-core@3.5.26': + dependencies: + '@vue/reactivity': 3.5.26 + '@vue/shared': 3.5.26 + + '@vue/runtime-dom@3.5.26': + dependencies: + '@vue/reactivity': 3.5.26 + '@vue/runtime-core': 3.5.26 + '@vue/shared': 3.5.26 + csstype: 3.2.3 + + '@vue/server-renderer@3.5.26(vue@3.5.26(typescript@5.6.3))': + dependencies: + '@vue/compiler-ssr': 3.5.26 + '@vue/shared': 3.5.26 + vue: 3.5.26(typescript@5.6.3) + + '@vue/shared@3.5.26': {} + + '@vueuse/core@10.11.1(vue@3.5.26(typescript@5.6.3))': + dependencies: + '@types/web-bluetooth': 0.0.20 + '@vueuse/metadata': 10.11.1 + '@vueuse/shared': 10.11.1(vue@3.5.26(typescript@5.6.3)) + vue-demi: 0.14.10(vue@3.5.26(typescript@5.6.3)) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + '@vueuse/metadata@10.11.1': {} + + '@vueuse/shared@10.11.1(vue@3.5.26(typescript@5.6.3))': + dependencies: + vue-demi: 0.14.10(vue@3.5.26(typescript@5.6.3)) + transitivePeerDependencies: + - '@vue/composition-api' + - vue + + acorn-jsx@5.3.2(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + + acorn@8.15.0: {} + + adler-32@1.3.1: {} + + ahooks@3.9.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + '@types/js-cookie': 3.0.6 + dayjs: 1.11.19 + intersection-observer: 0.12.2 + js-cookie: 3.0.5 + lodash: 4.17.21 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + react-fast-compare: 3.2.2 + resize-observer-polyfill: 1.5.1 + screenfull: 5.2.0 + tslib: 2.8.1 + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + alien-signals@1.0.13: {} + + ansi-regex@5.0.1: {} + + ansi-regex@6.2.2: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + antd-style@4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@ant-design/cssinjs': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@babel/runtime': 7.28.4 + '@emotion/cache': 11.14.0 + '@emotion/css': 11.13.5 + '@emotion/react': 11.14.0(@types/react@19.2.7)(react@19.2.3) + '@emotion/serialize': 1.3.3 + '@emotion/utils': 1.4.2 + antd: 6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + use-merge-value: 1.2.0(react@19.2.3) + transitivePeerDependencies: + - '@types/react' + - react-dom + - supports-color + + antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@ant-design/colors': 8.0.0 + '@ant-design/cssinjs': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@ant-design/cssinjs-utils': 2.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@ant-design/fast-color': 3.0.0 + '@ant-design/icons': 6.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@ant-design/react-slick': 2.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@babel/runtime': 7.28.4 + '@rc-component/cascader': 1.10.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/checkbox': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/collapse': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/color-picker': 3.0.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/dialog': 1.5.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/drawer': 1.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/dropdown': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/form': 1.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/image': 1.5.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/input': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/input-number': 1.6.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/mentions': 1.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/menu': 1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/motion': 1.1.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/mutate-observer': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/notification': 1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/pagination': 1.2.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/picker': 1.9.0(dayjs@1.11.19)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/progress': 1.0.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/qrcode': 1.1.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/rate': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/resize-observer': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/segmented': 1.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/select': 1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/slider': 1.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/steps': 1.2.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/switch': 1.0.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/table': 1.9.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tabs': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/textarea': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tooltip': 1.4.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tour': 2.2.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tree': 1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/tree-select': 1.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/trigger': 3.8.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/upload': 1.1.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@rc-component/util': 1.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + clsx: 2.1.1 + dayjs: 1.11.19 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + scroll-into-view-if-needed: 3.1.0 + throttle-debounce: 5.0.2 + transitivePeerDependencies: + - date-fns + - luxon + - moment + + any-promise@1.3.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@5.0.2: {} + + argparse@2.0.1: {} + + array-union@2.1.0: {} + + assign-symbols@1.0.0: {} + + astring@1.9.0: {} + + asynckit@0.4.0: {} + + attr-accept@2.2.5: {} + + autoprefixer@10.4.23(postcss@8.5.6): + dependencies: + browserslist: 4.28.1 + caniuse-lite: 1.0.30001761 + fraction.js: 5.3.4 + picocolors: 1.1.1 + postcss: 8.5.6 + postcss-value-parser: 4.2.0 + + axios@1.13.2: + dependencies: + follow-redirects: 1.15.11 + form-data: 4.0.5 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + babel-plugin-macros@3.1.0: + dependencies: + '@babel/runtime': 7.28.4 + cosmiconfig: 7.1.0 + resolve: 1.22.11 + + bail@2.0.2: {} + + balanced-match@1.0.2: {} + + baseline-browser-mapping@2.9.11: {} + + binary-extensions@2.3.0: {} + + boolbase@1.0.0: {} + + brace-expansion@1.1.12: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.2: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.28.1: + dependencies: + baseline-browser-mapping: 2.9.11 + caniuse-lite: 1.0.30001761 + electron-to-chromium: 1.5.267 + node-releases: 2.0.27 + update-browserslist-db: 1.2.3(browserslist@4.28.1) + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + callsites@3.1.0: {} + + camelcase-css@2.0.1: {} + + caniuse-lite@1.0.30001761: {} + + ccount@2.0.1: {} + + cfb@1.2.2: + dependencies: + adler-32: 1.3.1 + crc-32: 1.2.2 + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + character-entities-html4@2.1.0: {} + + character-entities-legacy@3.0.0: {} + + character-entities@2.0.2: {} + + character-reference-invalid@2.0.1: {} + + chart.js@4.5.1: + dependencies: + '@kurkle/color': 0.3.4 + + chevrotain-allstar@0.3.1(chevrotain@11.0.3): + dependencies: + chevrotain: 11.0.3 + lodash-es: 4.17.22 + + chevrotain@11.0.3: + dependencies: + '@chevrotain/cst-dts-gen': 11.0.3 + '@chevrotain/gast': 11.0.3 + '@chevrotain/regexp-to-ast': 11.0.3 + '@chevrotain/types': 11.0.3 + '@chevrotain/utils': 11.0.3 + lodash-es: 4.17.21 + + chokidar@3.6.0: + dependencies: + anymatch: 3.1.3 + braces: 3.0.3 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + + chokidar@4.0.3: + dependencies: + readdirp: 4.1.2 + + chroma-js@3.2.0: {} + + class-variance-authority@0.7.1: + dependencies: + clsx: 2.1.1 + + classnames@2.5.1: {} + + clsx@1.2.1: {} + + clsx@2.1.1: {} + + codepage@1.15.0: {} + + collapse-white-space@2.1.0: {} + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.4: {} + + colord@2.9.3: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + comma-separated-tokens@2.0.3: {} + + commander@4.1.1: {} + + commander@7.2.0: {} + + commander@8.3.0: {} + + compute-scroll-into-view@3.1.1: {} + + concat-map@0.0.1: {} + + confbox@0.1.8: {} + + convert-source-map@1.9.0: {} + + cose-base@1.0.3: + dependencies: + layout-base: 1.0.2 + + cose-base@2.2.0: + dependencies: + layout-base: 2.0.1 + + cosmiconfig@7.1.0: + dependencies: + '@types/parse-json': 4.0.2 + import-fresh: 3.3.1 + parse-json: 5.2.0 + path-type: 4.0.0 + yaml: 1.10.2 + + crc-32@1.2.2: {} + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + csstype@3.2.3: {} + + cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + + cytoscape-fcose@2.2.0(cytoscape@3.33.1): + dependencies: + cose-base: 2.2.0 + cytoscape: 3.33.1 + + cytoscape@3.33.1: {} + + d3-array@2.12.1: + dependencies: + internmap: 1.0.1 + + d3-array@3.2.4: + dependencies: + internmap: 2.0.3 + + d3-axis@3.0.0: {} + + d3-brush@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3-chord@3.0.1: + dependencies: + d3-path: 3.1.0 + + d3-color@3.1.0: {} + + d3-contour@4.0.2: + dependencies: + d3-array: 3.2.4 + + d3-delaunay@6.0.4: + dependencies: + delaunator: 5.0.1 + + d3-dispatch@3.0.1: {} + + d3-drag@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-selection: 3.0.0 + + d3-dsv@3.0.1: + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + + d3-ease@3.0.1: {} + + d3-fetch@3.0.1: + dependencies: + d3-dsv: 3.0.1 + + d3-force@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + + d3-format@3.1.0: {} + + d3-geo@3.1.1: + dependencies: + d3-array: 3.2.4 + + d3-hierarchy@3.1.2: {} + + d3-interpolate@3.0.1: + dependencies: + d3-color: 3.1.0 + + d3-path@1.0.9: {} + + d3-path@3.1.0: {} + + d3-polygon@3.0.1: {} + + d3-quadtree@3.0.1: {} + + d3-random@3.0.1: {} + + d3-sankey@0.12.3: + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + + d3-scale-chromatic@3.1.0: + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + + d3-scale@4.0.2: + dependencies: + d3-array: 3.2.4 + d3-format: 3.1.0 + d3-interpolate: 3.0.1 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + + d3-selection@3.0.0: {} + + d3-shape@1.3.7: + dependencies: + d3-path: 1.0.9 + + d3-shape@3.2.0: + dependencies: + d3-path: 3.1.0 + + d3-time-format@4.1.0: + dependencies: + d3-time: 3.1.0 + + d3-time@3.1.0: + dependencies: + d3-array: 3.2.4 + + d3-timer@3.0.1: {} + + d3-transition@3.0.1(d3-selection@3.0.0): + dependencies: + d3-color: 3.1.0 + d3-dispatch: 3.0.1 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-timer: 3.0.1 + + d3-zoom@3.0.0: + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + + d3@7.9.0: + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.0 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + + dagre-d3-es@7.0.13: + dependencies: + d3: 7.9.0 + lodash-es: 4.17.22 + + dayjs@1.11.19: {} + + de-indent@1.0.2: {} + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decode-named-character-reference@1.2.0: + dependencies: + character-entities: 2.0.2 + + decode-uri-component@0.4.1: {} + + deep-is@0.1.4: {} + + delaunator@5.0.1: + dependencies: + robust-predicates: 3.0.2 + + delayed-stream@1.0.0: {} + + dequal@2.0.3: {} + + devlop@1.1.0: + dependencies: + dequal: 2.0.3 + + didyoumean@1.2.2: {} + + dir-glob@3.0.1: + dependencies: + path-type: 4.0.0 + + dlv@1.1.3: {} + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + dompurify@3.3.1: + optionalDependencies: + '@types/trusted-types': 2.0.7 + + driver.js@1.4.0: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + electron-to-chromium@1.5.267: {} + + emoji-mart@5.6.0: {} + + emoji-regex@10.6.0: {} + + entities@6.0.1: {} + + entities@7.0.0: {} + + error-ex@1.3.4: + dependencies: + is-arrayish: 0.2.1 + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + es-toolkit@1.43.0: {} + + esast-util-from-estree@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + devlop: 1.1.0 + estree-util-visit: 2.0.0 + unist-util-position-from-estree: 2.0.0 + + esast-util-from-js@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + acorn: 8.15.0 + esast-util-from-estree: 2.0.0 + vfile-message: 4.0.3 + + esbuild@0.21.5: + optionalDependencies: + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 + + escalade@3.2.0: {} + + escape-string-regexp@4.0.0: {} + + escape-string-regexp@5.0.0: {} + + eslint-plugin-vue@9.33.0(eslint@8.57.1): + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + eslint: 8.57.1 + globals: 13.24.0 + natural-compare: 1.4.0 + nth-check: 2.1.1 + postcss-selector-parser: 6.1.2 + semver: 7.7.3 + vue-eslint-parser: 9.4.3(eslint@8.57.1) + xml-name-validator: 4.0.0 + transitivePeerDependencies: + - supports-color + + eslint-scope@7.2.2: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint@8.57.1: + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@8.57.1) + '@eslint-community/regexpp': 4.12.2 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.57.1 + '@humanwhocodes/config-array': 0.13.0 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.3.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.6 + debug: 4.4.3 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.7.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.24.0 + graphemer: 1.4.0 + ignore: 5.3.2 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.1 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.6.1: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + eslint-visitor-keys: 3.4.3 + + esquery@1.7.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + estree-util-attach-comments@3.0.0: + dependencies: + '@types/estree': 1.0.8 + + estree-util-build-jsx@3.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + estree-walker: 3.0.3 + + estree-util-is-identifier-name@3.0.0: {} + + estree-util-scope@1.0.0: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + + estree-util-to-js@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + astring: 1.9.0 + source-map: 0.7.6 + + estree-util-visit@2.0.0: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/unist': 3.0.3 + + estree-walker@2.0.2: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + esutils@2.0.3: {} + + extend-shallow@2.0.1: + dependencies: + is-extendable: 0.1.1 + + extend-shallow@3.0.2: + dependencies: + assign-symbols: 1.0.0 + is-extendable: 1.0.1 + + extend@3.0.2: {} + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.3: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.8 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.20.1: + dependencies: + reusify: 1.1.0 + + fdir@6.5.0(picomatch@4.0.3): + optionalDependencies: + picomatch: 4.0.3 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.2.0 + + file-saver@2.0.5: {} + + file-selector@0.5.0: + dependencies: + tslib: 2.8.1 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + filter-obj@5.1.0: {} + + find-root@1.1.0: {} + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.2.0: + dependencies: + flatted: 3.3.3 + keyv: 4.5.4 + rimraf: 3.0.2 + + flatted@3.3.3: {} + + follow-redirects@1.15.11: {} + + for-in@1.0.2: {} + + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + + frac@1.1.2: {} + + fraction.js@5.3.4: {} + + framer-motion@12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + motion-dom: 12.23.23 + motion-utils: 12.23.6 + tslib: 2.8.1 + optionalDependencies: + '@emotion/is-prop-valid': 1.4.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + get-east-asian-width@1.4.0: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-value@2.0.6: {} + + giscus@1.6.0: + dependencies: + lit: 3.3.2 + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@13.24.0: + dependencies: + type-fest: 0.20.2 + + globby@11.1.0: + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.3 + ignore: 5.3.2 + merge2: 1.4.1 + slash: 3.0.0 + + gopd@1.2.0: {} + + graphemer@1.4.0: {} + + hachure-fill@0.5.2: {} + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + hast-util-from-dom@5.0.1: + dependencies: + '@types/hast': 3.0.4 + hastscript: 9.0.1 + web-namespaces: 2.0.1 + + hast-util-from-html-isomorphic@2.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-from-dom: 5.0.1 + hast-util-from-html: 2.0.3 + unist-util-remove-position: 5.0.0 + + hast-util-from-html@2.0.3: + dependencies: + '@types/hast': 3.0.4 + devlop: 1.1.0 + hast-util-from-parse5: 8.0.3 + parse5: 7.3.0 + vfile: 6.0.3 + vfile-message: 4.0.3 + + hast-util-from-parse5@8.0.3: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + devlop: 1.1.0 + hastscript: 9.0.1 + property-information: 7.1.0 + vfile: 6.0.3 + vfile-location: 5.0.3 + web-namespaces: 2.0.1 + + hast-util-is-element@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + hast-util-parse-selector@4.0.0: + dependencies: + '@types/hast': 3.0.4 + + hast-util-raw@9.1.0: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + '@ungap/structured-clone': 1.3.0 + hast-util-from-parse5: 8.0.3 + hast-util-to-parse5: 8.0.1 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + parse5: 7.3.0 + unist-util-position: 5.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-to-estree@3.1.3: + dependencies: + '@types/estree': 1.0.8 + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + estree-util-attach-comments: 3.0.0 + estree-util-is-identifier-name: 3.0.0 + hast-util-whitespace: 3.0.0 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + style-to-js: 1.1.21 + unist-util-position: 5.0.0 + zwitch: 2.0.4 + transitivePeerDependencies: + - supports-color + + hast-util-to-html@9.0.5: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + comma-separated-tokens: 2.0.3 + hast-util-whitespace: 3.0.0 + html-void-elements: 3.0.0 + mdast-util-to-hast: 13.2.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + stringify-entities: 4.0.4 + zwitch: 2.0.4 + + hast-util-to-jsx-runtime@2.3.6: + dependencies: + '@types/estree': 1.0.8 + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + hast-util-whitespace: 3.0.0 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + style-to-js: 1.1.21 + unist-util-position: 5.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + hast-util-to-parse5@8.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + web-namespaces: 2.0.1 + zwitch: 2.0.4 + + hast-util-to-text@4.0.2: + dependencies: + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + hast-util-is-element: 3.0.0 + unist-util-find-after: 5.0.0 + + hast-util-whitespace@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + hastscript@9.0.1: + dependencies: + '@types/hast': 3.0.4 + comma-separated-tokens: 2.0.3 + hast-util-parse-selector: 4.0.0 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + + he@1.2.0: {} + + hoist-non-react-statics@3.3.2: + dependencies: + react-is: 16.13.1 + + html-url-attributes@3.0.1: {} + + html-void-elements@3.0.0: {} + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + ignore@5.3.2: {} + + immer@11.1.3: {} + + import-fresh@3.3.1: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + inline-style-parser@0.2.7: {} + + internmap@1.0.1: {} + + internmap@2.0.3: {} + + intersection-observer@0.12.2: {} + + is-alphabetical@2.0.1: {} + + is-alphanumerical@2.0.1: + dependencies: + is-alphabetical: 2.0.1 + is-decimal: 2.0.1 + + is-arrayish@0.2.1: {} + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.3.0 + + is-core-module@2.16.1: + dependencies: + hasown: 2.0.2 + + is-decimal@2.0.1: {} + + is-extendable@0.1.1: {} + + is-extendable@1.0.1: + dependencies: + is-plain-object: 2.0.4 + + is-extglob@2.1.1: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-hexadecimal@2.0.1: {} + + is-mobile@5.0.0: {} + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + is-plain-obj@4.1.0: {} + + is-plain-object@2.0.4: + dependencies: + isobject: 3.0.1 + + isexe@2.0.0: {} + + isobject@3.0.1: {} + + jiti@1.21.7: {} + + js-cookie@3.0.5: {} + + js-tokens@4.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + jsesc@3.1.0: {} + + json-buffer@3.0.1: {} + + json-parse-even-better-errors@2.3.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json2mq@0.2.0: + dependencies: + string-convert: 0.2.1 + + katex@0.16.27: + dependencies: + commander: 8.3.0 + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + khroma@2.1.0: {} + + langium@3.3.1: + dependencies: + chevrotain: 11.0.3 + chevrotain-allstar: 0.3.1(chevrotain@11.0.3) + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.12 + vscode-uri: 3.0.8 + + layout-base@1.0.2: {} + + layout-base@2.0.1: {} + + leva@0.10.1(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@radix-ui/react-portal': 1.1.10(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@radix-ui/react-tooltip': 1.2.8(@types/react@19.2.7)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + '@stitches/react': 1.2.8(react@19.2.3) + '@use-gesture/react': 10.3.1(react@19.2.3) + colord: 2.9.3 + dequal: 2.0.3 + merge-value: 1.0.0 + react: 19.2.3 + react-colorful: 5.6.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react-dom: 19.2.3(react@19.2.3) + react-dropzone: 12.1.0(react@19.2.3) + v8n: 1.5.1 + zustand: 3.7.2(react@19.2.3) + transitivePeerDependencies: + - '@types/react' + - '@types/react-dom' + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lilconfig@3.1.3: {} + + lines-and-columns@1.2.4: {} + + lit-element@4.2.2: + dependencies: + '@lit-labs/ssr-dom-shim': 1.5.0 + '@lit/reactive-element': 2.1.2 + lit-html: 3.3.2 + + lit-html@3.3.2: + dependencies: + '@types/trusted-types': 2.0.7 + + lit@3.3.2: + dependencies: + '@lit/reactive-element': 2.1.2 + lit-element: 4.2.2 + lit-html: 3.3.2 + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash-es@4.17.21: {} + + lodash-es@4.17.22: {} + + lodash.merge@4.6.2: {} + + lodash@4.17.21: {} + + longest-streak@3.1.0: {} + + loose-envify@1.4.0: + dependencies: + js-tokens: 4.0.0 + + lucide-react@0.469.0(react@19.2.3): + dependencies: + react: 19.2.3 + + lucide-react@0.562.0(react@19.2.3): + dependencies: + react: 19.2.3 + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + markdown-extensions@2.0.0: {} + + markdown-table@3.0.4: {} + + marked@16.4.2: {} + + marked@17.0.1: {} + + math-intrinsics@1.1.0: {} + + mdast-util-find-and-replace@3.0.2: + dependencies: + '@types/mdast': 4.0.4 + escape-string-regexp: 5.0.0 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + mdast-util-from-markdown@2.0.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + mdast-util-to-string: 4.0.0 + micromark: 4.0.2 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-decode-string: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-stringify-position: 4.0.0 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-autolink-literal@2.0.1: + dependencies: + '@types/mdast': 4.0.4 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-find-and-replace: 3.0.2 + micromark-util-character: 2.1.1 + + mdast-util-gfm-footnote@2.1.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + micromark-util-normalize-identifier: 2.0.1 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-strikethrough@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-table@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + markdown-table: 3.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-task-list-item@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm@3.1.0: + dependencies: + mdast-util-from-markdown: 2.0.2 + mdast-util-gfm-autolink-literal: 2.0.1 + mdast-util-gfm-footnote: 2.1.0 + mdast-util-gfm-strikethrough: 2.0.0 + mdast-util-gfm-table: 2.0.0 + mdast-util-gfm-task-list-item: 2.0.0 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-math@3.0.0: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + longest-streak: 3.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + unist-util-remove-position: 5.0.0 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-expression@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-jsx@3.2.0: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + parse-entities: 4.0.2 + stringify-entities: 4.0.4 + unist-util-stringify-position: 4.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx@3.0.0: + dependencies: + mdast-util-from-markdown: 2.0.2 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdxjs-esm@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-newline-to-break@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-find-and-replace: 3.0.2 + + mdast-util-phrasing@4.1.0: + dependencies: + '@types/mdast': 4.0.4 + unist-util-is: 6.0.1 + + mdast-util-to-hast@13.2.1: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@ungap/structured-clone': 1.3.0 + devlop: 1.1.0 + micromark-util-sanitize-uri: 2.0.1 + trim-lines: 3.0.1 + unist-util-position: 5.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + + mdast-util-to-markdown@2.1.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + longest-streak: 3.1.0 + mdast-util-phrasing: 4.1.0 + mdast-util-to-string: 4.0.0 + micromark-util-classify-character: 2.0.1 + micromark-util-decode-string: 2.0.1 + unist-util-visit: 5.0.0 + zwitch: 2.0.4 + + mdast-util-to-string@4.0.0: + dependencies: + '@types/mdast': 4.0.4 + + merge-value@1.0.0: + dependencies: + get-value: 2.0.6 + is-extendable: 1.0.1 + mixin-deep: 1.3.2 + set-value: 2.0.1 + + merge2@1.4.1: {} + + mermaid@11.12.2: + dependencies: + '@braintree/sanitize-url': 7.1.1 + '@iconify/utils': 3.1.0 + '@mermaid-js/parser': 0.6.3 + '@types/d3': 7.4.3 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.13 + dayjs: 1.11.19 + dompurify: 3.3.1 + katex: 0.16.27 + khroma: 2.1.0 + lodash-es: 4.17.22 + marked: 16.4.2 + roughjs: 4.6.6 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 11.1.0 + + micromark-core-commonmark@2.0.3: + dependencies: + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly-util@2.1.1(micromark-util-types@2.0.2): + dependencies: + get-east-asian-width: 1.4.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-cjk-friendly@1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2): + dependencies: + devlop: 1.1.0 + micromark: 4.0.2 + micromark-extension-cjk-friendly-util: 2.1.1(micromark-util-types@2.0.2) + micromark-util-chunked: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + optionalDependencies: + micromark-util-types: 2.0.2 + + micromark-extension-gfm-autolink-literal@2.1.0: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-footnote@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-strikethrough@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-table@2.1.1: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-tagfilter@2.0.0: + dependencies: + micromark-util-types: 2.0.2 + + micromark-extension-gfm-task-list-item@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm@3.0.0: + dependencies: + micromark-extension-gfm-autolink-literal: 2.1.0 + micromark-extension-gfm-footnote: 2.1.0 + micromark-extension-gfm-strikethrough: 2.1.0 + micromark-extension-gfm-table: 2.1.1 + micromark-extension-gfm-tagfilter: 2.0.0 + micromark-extension-gfm-task-list-item: 2.1.0 + micromark-util-combine-extensions: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-math@3.1.0: + dependencies: + '@types/katex': 0.16.7 + devlop: 1.1.0 + katex: 0.16.27 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-mdx-expression@3.0.1: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-factory-mdx-expression: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-mdx-jsx@3.0.2: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + micromark-factory-mdx-expression: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + vfile-message: 4.0.3 + + micromark-extension-mdx-md@2.0.0: + dependencies: + micromark-util-types: 2.0.2 + + micromark-extension-mdxjs-esm@3.0.0: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-position-from-estree: 2.0.0 + vfile-message: 4.0.3 + + micromark-extension-mdxjs@3.0.0: + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + micromark-extension-mdx-expression: 3.0.1 + micromark-extension-mdx-jsx: 3.0.2 + micromark-extension-mdx-md: 2.0.0 + micromark-extension-mdxjs-esm: 3.0.0 + micromark-util-combine-extensions: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-destination@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-label@2.0.1: + dependencies: + devlop: 1.1.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-mdx-expression@2.0.3: + dependencies: + '@types/estree': 1.0.8 + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-events-to-acorn: 2.0.3 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-position-from-estree: 2.0.0 + vfile-message: 4.0.3 + + micromark-factory-space@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.2 + + micromark-factory-title@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-whitespace@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-character@2.1.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-chunked@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-classify-character@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-combine-extensions@2.0.1: + dependencies: + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-decode-numeric-character-reference@2.0.2: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-decode-string@2.0.1: + dependencies: + decode-named-character-reference: 1.2.0 + micromark-util-character: 2.1.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-symbol: 2.0.1 + + micromark-util-encode@2.0.1: {} + + micromark-util-events-to-acorn@2.0.3: + dependencies: + '@types/estree': 1.0.8 + '@types/unist': 3.0.3 + devlop: 1.1.0 + estree-util-visit: 2.0.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + vfile-message: 4.0.3 + + micromark-util-html-tag-name@2.0.1: {} + + micromark-util-normalize-identifier@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-resolve-all@2.0.1: + dependencies: + micromark-util-types: 2.0.2 + + micromark-util-sanitize-uri@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 + + micromark-util-subtokenize@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-symbol@2.0.1: {} + + micromark-util-types@2.0.2: {} + + micromark@4.0.2: + dependencies: + '@types/debug': 4.1.12 + debug: 4.4.3 + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + transitivePeerDependencies: + - supports-color + + micromatch@4.0.8: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.12 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.2 + + mixin-deep@1.3.2: + dependencies: + for-in: 1.0.2 + is-extendable: 1.0.1 + + mlly@1.8.0: + dependencies: + acorn: 8.15.0 + pathe: 2.0.3 + pkg-types: 1.3.1 + ufo: 1.6.1 + + motion-dom@12.23.23: + dependencies: + motion-utils: 12.23.6 + + motion-utils@12.23.6: {} + + motion@12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + framer-motion: 12.23.26(@emotion/is-prop-valid@1.4.0)(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + tslib: 2.8.1 + optionalDependencies: + '@emotion/is-prop-valid': 1.4.0 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + ms@2.1.3: {} + + muggle-string@0.4.1: {} + + mz@2.7.0: + dependencies: + any-promise: 1.3.0 + object-assign: 4.1.1 + thenify-all: 1.6.0 + + nanoid@3.3.11: {} + + natural-compare@1.4.0: {} + + node-releases@2.0.27: {} + + normalize-path@3.0.0: {} + + npm-run-path@6.0.0: + dependencies: + path-key: 4.0.0 + unicorn-magic: 0.3.0 + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + numeral@2.0.6: {} + + object-assign@4.1.1: {} + + object-hash@3.0.0: {} + + on-change@4.0.2: {} + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + oniguruma-parser@0.12.1: {} + + oniguruma-to-es@4.3.4: + dependencies: + oniguruma-parser: 0.12.1 + regex: 6.1.0 + regex-recursion: 6.0.2 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + package-manager-detector@1.6.0: {} + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + parse-entities@4.0.2: + dependencies: + '@types/unist': 2.0.11 + character-entities-legacy: 3.0.0 + character-reference-invalid: 2.0.1 + decode-named-character-reference: 1.2.0 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + is-hexadecimal: 2.0.1 + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.27.1 + error-ex: 1.3.4 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + parse5@7.3.0: + dependencies: + entities: 6.0.1 + + path-browserify@1.0.1: {} + + path-data-parser@0.1.0: {} + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-key@4.0.0: {} + + path-parse@1.0.7: {} + + path-type@4.0.0: {} + + pathe@2.0.3: {} + + picocolors@1.1.1: {} + + picomatch@2.3.1: {} + + picomatch@4.0.3: {} + + pify@2.3.0: {} + + pinia@2.3.1(typescript@5.6.3)(vue@3.5.26(typescript@5.6.3)): + dependencies: + '@vue/devtools-api': 6.6.4 + vue: 3.5.26(typescript@5.6.3) + vue-demi: 0.14.10(vue@3.5.26(typescript@5.6.3)) + optionalDependencies: + typescript: 5.6.3 + transitivePeerDependencies: + - '@vue/composition-api' + + pirates@4.0.7: {} + + pkg-types@1.3.1: + dependencies: + confbox: 0.1.8 + mlly: 1.8.0 + pathe: 2.0.3 + + points-on-curve@0.2.0: {} + + points-on-path@0.2.1: + dependencies: + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + + polished@4.3.1: + dependencies: + '@babel/runtime': 7.28.4 + + postcss-import@15.1.0(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.11 + + postcss-js@4.1.0(postcss@8.5.6): + dependencies: + camelcase-css: 2.0.1 + postcss: 8.5.6 + + postcss-load-config@6.0.1(jiti@1.21.7)(postcss@8.5.6): + dependencies: + lilconfig: 3.1.3 + optionalDependencies: + jiti: 1.21.7 + postcss: 8.5.6 + + postcss-nested@6.2.0(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + postcss-selector-parser: 6.1.2 + + postcss-selector-parser@6.1.2: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + prelude-ls@1.2.1: {} + + prop-types@15.8.1: + dependencies: + loose-envify: 1.4.0 + object-assign: 4.1.1 + react-is: 16.13.1 + + property-information@7.1.0: {} + + proxy-from-env@1.1.0: {} + + punycode@2.3.1: {} + + query-string@9.3.1: + dependencies: + decode-uri-component: 0.4.1 + filter-obj: 5.1.0 + split-on-first: 3.0.0 + + queue-microtask@1.2.3: {} + + rc-collapse@4.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-motion: 2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-dialog@9.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/portal': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + classnames: 2.5.1 + rc-motion: 2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-footer@0.6.8(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-image@7.12.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/portal': 1.1.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + classnames: 2.5.1 + rc-dialog: 9.6.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-motion: 2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-input-number@9.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/mini-decimal': 1.1.0 + classnames: 2.5.1 + rc-input: 1.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-input@1.8.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-menu@9.16.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + '@rc-component/trigger': 2.3.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + classnames: 2.5.1 + rc-motion: 2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-overflow: 1.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-motion@2.9.5(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-overflow@1.5.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-resize-observer: 1.4.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + rc-resize-observer@1.4.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + classnames: 2.5.1 + rc-util: 5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + resize-observer-polyfill: 1.5.1 + + rc-util@5.44.4(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + '@babel/runtime': 7.28.4 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + react-is: 18.3.1 + + re-resizable@6.11.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-avatar-editor@14.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-colorful@5.6.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-dom@19.2.3(react@19.2.3): + dependencies: + react: 19.2.3 + scheduler: 0.27.0 + + react-draggable@4.4.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + clsx: 1.2.1 + prop-types: 15.8.1 + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-dropzone@12.1.0(react@19.2.3): + dependencies: + attr-accept: 2.2.5 + file-selector: 0.5.0 + prop-types: 15.8.1 + react: 19.2.3 + + react-error-boundary@6.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-fast-compare@3.2.2: {} + + react-hotkeys-hook@5.2.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react-is@16.13.1: {} + + react-is@18.3.1: {} + + react-markdown@10.1.0(@types/react@19.2.7)(react@19.2.3): + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@types/react': 19.2.7 + devlop: 1.1.0 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + mdast-util-to-hast: 13.2.1 + react: 19.2.3 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + unified: 11.0.5 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + react-merge-refs@3.0.2(react@19.2.3): + optionalDependencies: + react: 19.2.3 + + react-rnd@10.5.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + re-resizable: 6.11.2(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + react-draggable: 4.4.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3) + tslib: 2.6.2 + + react-zoom-pan-pinch@3.7.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): + dependencies: + react: 19.2.3 + react-dom: 19.2.3(react@19.2.3) + + react@19.2.3: {} + + read-cache@1.0.0: + dependencies: + pify: 2.3.0 + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + readdirp@4.1.2: {} + + recma-build-jsx@1.0.0: + dependencies: + '@types/estree': 1.0.8 + estree-util-build-jsx: 3.0.1 + vfile: 6.0.3 + + recma-jsx@1.0.1(acorn@8.15.0): + dependencies: + acorn: 8.15.0 + acorn-jsx: 5.3.2(acorn@8.15.0) + estree-util-to-js: 2.0.0 + recma-parse: 1.0.0 + recma-stringify: 1.0.0 + unified: 11.0.5 + + recma-parse@1.0.0: + dependencies: + '@types/estree': 1.0.8 + esast-util-from-js: 2.0.1 + unified: 11.0.5 + vfile: 6.0.3 + + recma-stringify@1.0.0: + dependencies: + '@types/estree': 1.0.8 + estree-util-to-js: 2.0.0 + unified: 11.0.5 + vfile: 6.0.3 + + regex-recursion@6.0.2: + dependencies: + regex-utilities: 2.3.0 + + regex-utilities@2.3.0: {} + + regex@6.1.0: + dependencies: + regex-utilities: 2.3.0 + + rehype-github-alerts@4.2.0: + dependencies: + '@primer/octicons': 19.21.1 + hast-util-from-html: 2.0.3 + hast-util-is-element: 3.0.0 + unist-util-visit: 5.0.0 + + rehype-katex@7.0.1: + dependencies: + '@types/hast': 3.0.4 + '@types/katex': 0.16.7 + hast-util-from-html-isomorphic: 2.0.0 + hast-util-to-text: 4.0.2 + katex: 0.16.27 + unist-util-visit-parents: 6.0.2 + vfile: 6.0.3 + + rehype-raw@7.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-raw: 9.1.0 + vfile: 6.0.3 + + rehype-recma@1.0.0: + dependencies: + '@types/estree': 1.0.8 + '@types/hast': 3.0.4 + hast-util-to-estree: 3.1.3 + transitivePeerDependencies: + - supports-color + + remark-breaks@4.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-newline-to-break: 2.0.0 + unified: 11.0.5 + + remark-cjk-friendly@1.2.3(@types/mdast@4.0.4)(micromark-util-types@2.0.2)(micromark@4.0.2)(unified@11.0.5): + dependencies: + micromark-extension-cjk-friendly: 1.2.3(micromark-util-types@2.0.2)(micromark@4.0.2) + unified: 11.0.5 + optionalDependencies: + '@types/mdast': 4.0.4 + transitivePeerDependencies: + - micromark + - micromark-util-types + + remark-gfm@4.0.1: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-gfm: 3.1.0 + micromark-extension-gfm: 3.0.0 + remark-parse: 11.0.0 + remark-stringify: 11.0.0 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-github@12.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-find-and-replace: 3.0.2 + mdast-util-to-string: 4.0.0 + to-vfile: 8.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + + remark-math@6.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-math: 3.0.0 + micromark-extension-math: 3.1.0 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-mdx@3.1.1: + dependencies: + mdast-util-mdx: 3.0.0 + micromark-extension-mdxjs: 3.0.0 + transitivePeerDependencies: + - supports-color + + remark-parse@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + micromark-util-types: 2.0.2 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-rehype@11.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + mdast-util-to-hast: 13.2.1 + unified: 11.0.5 + vfile: 6.0.3 + + remark-stringify@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-to-markdown: 2.1.2 + unified: 11.0.5 + + reselect@5.1.1: {} + + resize-observer-polyfill@1.5.1: {} + + resolve-from@4.0.0: {} + + resolve@1.22.11: + dependencies: + is-core-module: 2.16.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + reusify@1.1.0: {} + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + robust-predicates@3.0.2: {} + + rollup@4.54.0: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.54.0 + '@rollup/rollup-android-arm64': 4.54.0 + '@rollup/rollup-darwin-arm64': 4.54.0 + '@rollup/rollup-darwin-x64': 4.54.0 + '@rollup/rollup-freebsd-arm64': 4.54.0 + '@rollup/rollup-freebsd-x64': 4.54.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.54.0 + '@rollup/rollup-linux-arm-musleabihf': 4.54.0 + '@rollup/rollup-linux-arm64-gnu': 4.54.0 + '@rollup/rollup-linux-arm64-musl': 4.54.0 + '@rollup/rollup-linux-loong64-gnu': 4.54.0 + '@rollup/rollup-linux-ppc64-gnu': 4.54.0 + '@rollup/rollup-linux-riscv64-gnu': 4.54.0 + '@rollup/rollup-linux-riscv64-musl': 4.54.0 + '@rollup/rollup-linux-s390x-gnu': 4.54.0 + '@rollup/rollup-linux-x64-gnu': 4.54.0 + '@rollup/rollup-linux-x64-musl': 4.54.0 + '@rollup/rollup-openharmony-arm64': 4.54.0 + '@rollup/rollup-win32-arm64-msvc': 4.54.0 + '@rollup/rollup-win32-ia32-msvc': 4.54.0 + '@rollup/rollup-win32-x64-gnu': 4.54.0 + '@rollup/rollup-win32-x64-msvc': 4.54.0 + fsevents: 2.3.3 + + roughjs@4.6.6: + dependencies: + hachure-fill: 0.5.2 + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + points-on-path: 0.2.1 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + rw@1.3.3: {} + + safer-buffer@2.1.2: {} + + scheduler@0.27.0: {} + + screenfull@5.2.0: {} + + scroll-into-view-if-needed@3.1.0: + dependencies: + compute-scroll-into-view: 3.1.1 + + semver-compare@1.0.0: {} + + semver@7.7.3: {} + + set-value@2.0.1: + dependencies: + extend-shallow: 2.0.1 + is-extendable: 0.1.1 + is-plain-object: 2.0.4 + split-string: 3.1.0 + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + shiki-stream@0.1.3(react@19.2.3)(vue@3.5.26(typescript@5.6.3)): + dependencies: + '@shikijs/core': 3.20.0 + optionalDependencies: + react: 19.2.3 + vue: 3.5.26(typescript@5.6.3) + + shiki@3.20.0: + dependencies: + '@shikijs/core': 3.20.0 + '@shikijs/engine-javascript': 3.20.0 + '@shikijs/engine-oniguruma': 3.20.0 + '@shikijs/langs': 3.20.0 + '@shikijs/themes': 3.20.0 + '@shikijs/types': 3.20.0 + '@shikijs/vscode-textmate': 10.0.2 + '@types/hast': 3.0.4 + + slash@3.0.0: {} + + source-map-js@1.2.1: {} + + source-map@0.5.7: {} + + source-map@0.7.6: {} + + space-separated-tokens@2.0.2: {} + + split-on-first@3.0.0: {} + + split-string@3.1.0: + dependencies: + extend-shallow: 3.0.2 + + ssf@0.11.2: + dependencies: + frac: 1.1.2 + + string-convert@0.2.1: {} + + stringify-entities@4.0.4: + dependencies: + character-entities-html4: 2.1.0 + character-entities-legacy: 3.0.0 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.1.2: + dependencies: + ansi-regex: 6.2.2 + + strip-json-comments@3.1.1: {} + + style-to-js@1.1.21: + dependencies: + style-to-object: 1.0.14 + + style-to-object@1.0.14: + dependencies: + inline-style-parser: 0.2.7 + + stylis@4.2.0: {} + + stylis@4.3.6: {} + + sucrase@3.35.1: + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + commander: 4.1.1 + lines-and-columns: 1.2.4 + mz: 2.7.0 + pirates: 4.0.7 + tinyglobby: 0.2.15 + ts-interface-checker: 0.1.13 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + swr@2.3.8(react@19.2.3): + dependencies: + dequal: 2.0.3 + react: 19.2.3 + use-sync-external-store: 1.6.0(react@19.2.3) + + tabbable@6.4.0: {} + + tailwindcss@3.4.19: + dependencies: + '@alloc/quick-lru': 5.2.0 + arg: 5.0.2 + chokidar: 3.6.0 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.3.3 + glob-parent: 6.0.2 + is-glob: 4.0.3 + jiti: 1.21.7 + lilconfig: 3.1.3 + micromatch: 4.0.8 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.1.1 + postcss: 8.5.6 + postcss-import: 15.1.0(postcss@8.5.6) + postcss-js: 4.1.0(postcss@8.5.6) + postcss-load-config: 6.0.1(jiti@1.21.7)(postcss@8.5.6) + postcss-nested: 6.2.0(postcss@8.5.6) + postcss-selector-parser: 6.1.2 + resolve: 1.22.11 + sucrase: 3.35.1 + transitivePeerDependencies: + - tsx + - yaml + + text-table@0.2.0: {} + + thenify-all@1.6.0: + dependencies: + thenify: 3.3.1 + + thenify@3.3.1: + dependencies: + any-promise: 1.3.0 + + throttle-debounce@5.0.2: {} + + tiny-invariant@1.3.3: {} + + tinyexec@1.0.2: {} + + tinyglobby@0.2.15: + dependencies: + fdir: 6.5.0(picomatch@4.0.3) + picomatch: 4.0.3 + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + to-vfile@8.0.0: + dependencies: + vfile: 6.0.3 + + trim-lines@3.0.1: {} + + trough@2.2.0: {} + + ts-api-utils@1.4.3(typescript@5.6.3): + dependencies: + typescript: 5.6.3 + + ts-dedent@2.2.0: {} + + ts-interface-checker@0.1.13: {} + + ts-md5@2.0.1: {} + + tslib@2.6.2: {} + + tslib@2.8.1: {} + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-fest@0.20.2: {} + + typescript@5.6.3: {} + + ufo@1.6.1: {} + + undici-types@6.21.0: {} + + unicorn-magic@0.3.0: {} + + unified@11.0.5: + dependencies: + '@types/unist': 3.0.3 + bail: 2.0.2 + devlop: 1.1.0 + extend: 3.0.2 + is-plain-obj: 4.1.0 + trough: 2.2.0 + vfile: 6.0.3 + + unist-util-find-after@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-is@6.0.1: + dependencies: + '@types/unist': 3.0.3 + + unist-util-position-from-estree@2.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-remove-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-visit: 5.0.0 + + unist-util-stringify-position@4.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-visit-parents@6.0.2: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-visit@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + update-browserslist-db@1.2.3(browserslist@4.28.1): + dependencies: + browserslist: 4.28.1 + escalade: 3.2.0 + picocolors: 1.1.1 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + url-join@5.0.0: {} + + use-merge-value@1.2.0(react@19.2.3): + dependencies: + react: 19.2.3 + + use-sync-external-store@1.6.0(react@19.2.3): + dependencies: + react: 19.2.3 + + util-deprecate@1.0.2: {} + + uuid@11.1.0: {} + + uuid@13.0.0: {} + + v8n@1.5.1: {} + + vfile-location@5.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile: 6.0.3 + + vfile-message@4.0.3: + dependencies: + '@types/unist': 3.0.3 + unist-util-stringify-position: 4.0.0 + + vfile@6.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile-message: 4.0.3 + + vite-plugin-checker@0.9.3(eslint@8.57.1)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.21(@types/node@20.19.27))(vue-tsc@2.2.12(typescript@5.6.3)): + dependencies: + '@babel/code-frame': 7.27.1 + chokidar: 4.0.3 + npm-run-path: 6.0.0 + picocolors: 1.1.1 + picomatch: 4.0.3 + strip-ansi: 7.1.2 + tiny-invariant: 1.3.3 + tinyglobby: 0.2.15 + vite: 5.4.21(@types/node@20.19.27) + vscode-uri: 3.1.0 + optionalDependencies: + eslint: 8.57.1 + optionator: 0.9.4 + typescript: 5.6.3 + vue-tsc: 2.2.12(typescript@5.6.3) + + vite@5.4.21(@types/node@20.19.27): + dependencies: + esbuild: 0.21.5 + postcss: 8.5.6 + rollup: 4.54.0 + optionalDependencies: + '@types/node': 20.19.27 + fsevents: 2.3.3 + + vscode-jsonrpc@8.2.0: {} + + vscode-languageserver-protocol@3.17.5: + dependencies: + vscode-jsonrpc: 8.2.0 + vscode-languageserver-types: 3.17.5 + + vscode-languageserver-textdocument@1.0.12: {} + + vscode-languageserver-types@3.17.5: {} + + vscode-languageserver@9.0.1: + dependencies: + vscode-languageserver-protocol: 3.17.5 + + vscode-uri@3.0.8: {} + + vscode-uri@3.1.0: {} + + vue-chartjs@5.3.3(chart.js@4.5.1)(vue@3.5.26(typescript@5.6.3)): + dependencies: + chart.js: 4.5.1 + vue: 3.5.26(typescript@5.6.3) + + vue-demi@0.14.10(vue@3.5.26(typescript@5.6.3)): + dependencies: + vue: 3.5.26(typescript@5.6.3) + + vue-eslint-parser@9.4.3(eslint@8.57.1): + dependencies: + debug: 4.4.3 + eslint: 8.57.1 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.7.0 + lodash: 4.17.21 + semver: 7.7.3 + transitivePeerDependencies: + - supports-color + + vue-i18n@9.14.5(vue@3.5.26(typescript@5.6.3)): + dependencies: + '@intlify/core-base': 9.14.5 + '@intlify/shared': 9.14.5 + '@vue/devtools-api': 6.6.4 + vue: 3.5.26(typescript@5.6.3) + + vue-router@4.6.4(vue@3.5.26(typescript@5.6.3)): + dependencies: + '@vue/devtools-api': 6.6.4 + vue: 3.5.26(typescript@5.6.3) + + vue-tsc@2.2.12(typescript@5.6.3): + dependencies: + '@volar/typescript': 2.4.15 + '@vue/language-core': 2.2.12(typescript@5.6.3) + typescript: 5.6.3 + + vue@3.5.26(typescript@5.6.3): + dependencies: + '@vue/compiler-dom': 3.5.26 + '@vue/compiler-sfc': 3.5.26 + '@vue/runtime-dom': 3.5.26 + '@vue/server-renderer': 3.5.26(vue@3.5.26(typescript@5.6.3)) + '@vue/shared': 3.5.26 + optionalDependencies: + typescript: 5.6.3 + + web-namespaces@2.0.1: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + wmf@1.0.2: {} + + word-wrap@1.2.5: {} + + word@0.3.0: {} + + wrappy@1.0.2: {} + + xlsx@0.18.5: + dependencies: + adler-32: 1.3.1 + cfb: 1.2.2 + codepage: 1.15.0 + crc-32: 1.2.2 + ssf: 0.11.2 + wmf: 1.0.2 + word: 0.3.0 + + xml-name-validator@4.0.0: {} + + yaml@1.10.2: {} + + yocto-queue@0.1.0: {} + + zustand@3.7.2(react@19.2.3): + optionalDependencies: + react: 19.2.3 + + zwitch@2.0.4: {} diff --git a/frontend/postcss.config.js b/frontend/postcss.config.js new file mode 100644 index 00000000..2b75bd8a --- /dev/null +++ b/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +} diff --git a/frontend/public/logo.png b/frontend/public/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..7d3fa420386bcf0e080278bc3c4ba25cb4c620fc GIT binary patch literal 149928 zcmV()K;OTKP)qDY00093P)t-s0000T zF=H1gUK%c87%N~JFlz`DK>z^{1rR(98%O{F7ZfI687yKFC0q&>I4?wX5+YR{EL#sA zP97~?1P>z$5;6-FI3F%r7bHvs3M3UMUK%fA9WY}SCtMsbVizi17bsp9Dqk5bU>GW2 z8!%)TD`6TfVHGA`7%gENFJKxjV;eDI6(?LAGGrAgTo)`~7AIRDV*eH|5MTb0Ra#(i~mTw|15z2 z9bx_f00VINvr*3fVD0urwEr!G|3ab@|7G^?X87w^-TrL#wl9YNL$UvM`@4Aiwj48FV)FJimH%V$@pSybVC?%EFJEDmRe-Ve85mENNo!`bx^@Z~W9u zu>S=JI84svRovQK<hkv=ld~5iya1``asK$Y=4=M6c&0TBbdc>Ri0F1QJ!D{LdUo!&~RM z2px4cgWqx8>Lg%?99G{^z1BN|cv`QWEpgt4+|Cw2N+xAxaEZur0000QbW%=J0RH~| z{{H;>{`~m+`uv#){vG`m^~k3}aNt`+u0a3-Mmgdh-vt&X2UV-Ek` zmgZW@d){N#0w&>1jD?BW&$zoY3}9iWD2k#eilW@Xu!BK5rV1pelTR8WbXN?VR%ENA zaF^*|AK%Atj{06JpB)s+gF2UkXqMyw$E^9w;Aq4O9YrK!q9}sCBX(Jt#5hteT z@m#X!!#5MawoP-tF5^RftoPDSA!braA8TKNt13?rMbRo_S#sXdHuvu= zD{o_!GAD4fkK8u)@O*pjeq`;B`f}N=B8I^*Lk`y-pp+h3NEAdd(Tt-=EM|dtj=m03 zH{G&i$&>JK{GSI91+fP+x*7y60pTrl3^RxdLrDtct=kn^)>9xL* zCnv|pxA&V(`w5G6O5a_@_rIFs|HIOcrz+z{K=$i)nd;9s-IV>7)~NT(`%vQ{z|6R6 z;qAt!+uP3)fB(GLF2lEcr-!p=xBfSKa{l7p<)=?y?w%aKd-dib=cSlg8Kq3chD~D(D{^VH zSPr}zeN46Jk#_W|sPrK*$7yA4-hcqJ- zHa9(k!QHDnrP#>`dD0+9#uL77xn%Z|u?#tt>9q*Kc0F|9E_I_nNl!;vA54 z#r+SR>Fn_E!{yce2Or%gP8bR%Aeu^;e8GU6qA3VLAjGU%Ed_{>YBMx-`SqB~DMo6b zI1O@CpChmECkYG{Rmc(y4$k0|X;iZeVSR`b)vuilUL&%$a#i9<9*`J5nafpS*r6&I zQ7m_r>s8{~f%H3{;xd-KbQ7VLq?9MyhKLmIxDu?Ar!{kK#_7+=U`lWbDuEn)HIYgw2aU-|gC)3v zR|QF>R9Bp&P!vXsNY_px+hEiTqG&cVRp;y$o#xfo zHMMnptKOF9JhuSWK1N46eg5>;zielR%gd|l2e{anV3W#!bx=Wqg;#kWHVgy%jbIat(4R9^bbfb$Es#==o*^}UrY?Z_2_O^X* zNCN5MO`5rvE8PXCtLJ-S_u5jXfQZveQ%HLkYN ztBWY1Ql&)|WlTH0*>f>m4q;g^u@^yU$xAK{Y#Z?2^xqW<={7n2Mc72M$vHJRWGMo2 zlfxIqGjJ|xu36fYFC?)hRY+43Kq@Xkw#NOjf?t5HMqsBQa5%+rqyI}Ms+g=nEl3_8SL0m z15uR56G(;!3?T_1w+ivpZFSqUV9`Q?@5t{U&4vY=St7B?4`jy|a?h>IFC_82*S4$5 zZWLc`UDqoTKtx*eM>i2Eq~fn~7&5TOuFe28giuOQ*E^{MkOahmug-uf!eoQLiDza8 zfC|(ImGD-wQWKnZS;=dpw4#TI$9QZ_ZnsS@(X`0AT>7uil-FK<_sPf9zJ8^<*k`{# zd2&oN!-5_(F!KS&bm${)foL#WD{F!m`_UE8Og@Y5OoN$=3@MWZ7dYoYKsphXgBd&V zp}BEvv~Vs4MT{xM{YcegKBEn&(wVRVoX*Pzs0ImnS*tTJ3F2#ys^Jv5I*#zUPd$>B zBIF_eH3u(&f38!ps8&nyvhXmL3ImkSY$B>^8b)xD+l7v^nzriZyV#|j3mFTQbX5wy zFj!c1HHM%uD-8%3DzkLudG}OBq>l$i-@bf(``O*ww^Oz`k90B= z^k)f>dKsiRy_pl)@%%#dB_PLF&_AfWMCL-WUcG1DZV`ZU)75VG$5;64}z9SU0z4rI_HrGXb5 z(;~dEQG!TjMlC8c1a{ugjgw{$D`csq>7lt=Gdop2t@RbUDFywFCD$oZ#4|%app&Nn z4aAD%)THdWT%LWV9}r%B6zb?3UB})$KNshr!9Q-^m6L!{$f)Nrn_$3;#)M}L2>g)0 zERfAR_+@a&ffWk?VuXP(Ljy!%v#jB$^pSgH%rXeBG00x9Pu6ipD1K;KSiwfn7ZDJN z2uZfU){hc%XJ-^opBso1tthlq4TN6#3y@K;o?OC%%Nk&Sfzq|>O-w7;=T=NiihzYS)6rQghAnf~CbLvgO-X*0_Y@hm z^+r;ATi3Rp{io-DDXV>O7=81@{ll9-PmzHIF=siT4virVCo?F7-pr)t&PVT_r$ zoZ}d�X#3Oq((@RZ5bNai9ZrKpbIoDy0Z!38ZhG68}e?Bf`d-)rFbX>=b9%pdqP zpb@#7(WWX;7-*FsFdJI#noejqmOzTEw6y9KJgt=!1&S2ApH=i^d^Jy33X)At z_-i-YdIfnmcY@KY52>+Xp@*k;Po6A+Voth9cHwXWG+_rHuni%yglw9cR>&%Fk$PCe z*Cdvu%r*QH67GS-GC(PZZaz_O+EPIJH4JTng*h(=el&-KCSLFC=?#(;Gw(k3L&1Fp zHDM7J(OFWUKWd?x4w9V<+4yUeaam@dAawP;LR#fffKzV-M*^Gtf`ehFB)IfMgMfjF z?E(nFQ3?kJmBqcND{yDu<*vM^m3p2E2`5I<6(;U?fzyBzWueQ-V~o$MK;$&Ik_dk< z|NeIO;^F?QPd-?C_4@wJug8BNX21~uz-bGW%UC>th2slX4Xv0WbofWc(3wSygE4Ag z99qf3)I_ppS_6E*QHz~2hp2}D=d{F_mI4xDlkvz7nQBVsG;c&AAyt9-U8g-&$DW7xs3~9E})RKLi20uLW_WNts8QQ{hGA* z&Fa)y&h2`ZO^I*2{C&Cm>*4nMPd-Td{{Hn}e?k`d|8*)Dp1wypPGINS3slw&qK5R54-MqZwym-^ zb5fyvbQO5oc5T~vU0b~<@oftG^p~Qf_TmHF)V_W3`r*#SW<6Zq$=|p@g8`8U07&>D zi3ChK6=^QQoq*tw0#XAF>;oVzyTKy8>qG4FD5`lL>G*zuE>}fgpp1 z&WD^&C{yQ+zOmGYet=t+g|cXFBtuWqK&U!AJo!$YLEVF;g?D%H2gucd(?eCYgCQ%W z84JB{^-ZJQ^=l+FEe6kmb`q?vHKOGmEh)>)v#UQUq_PVggafXY^#a?A2GZ5d=Cf%) z8eKIyA*qe{4*IrB`*FRw`{(}U`~ErPt3U3aUZ%sc$7)l(P8|b>XSzu(#K{x&2rf4q ze3Ch=oPE^TP&LK`J%}(vK{M!}MBX4M_W-%UQG$}JJI2BK7%@ebfJ_fr`%xw{pULks zQi>;X7x@;49xRG(F&_gZGLzk-Y%o_NoBBEV&u9Lp9!8%x6=P8xjdFmvo_TEob1LP~-m z$KVdb#oHNUL6(aY&GO?pf~Ku8*klw$STLDg$ZN|!bN0TxDIFa+VG6(!7RBZ!y|?cq z9tuWS_iDn4NR_!Qw#&)dy!IDgqjn}dY`0(rRGI{N4^r?E;u->+g*Kpp8-{!S!ga)yMZ>~) zV%NyhrGYs5BHyA_z&fnUB2ynU#oR#L2V)#kZR-1oW%B+>KIRhvQvga$t!7bO5j)GV zx83{_a1z7^mq;H04`Z*$r+773MR>cCSgfU5;a7Ju4<}rA5svd0*uvcOX)FYQ)Hd;e zO@Omel|*fC@}H&cTR}*pd^Yy2bpwg3z6y~8h;91Rh7A8%KZd+~KiBzM$n#((iV-Vnw)ME{8W--F>r!XdJu#JXV zNZb^u#EoC=fZzLfh)5XNhXPoc&>y)DhM4@0rIW-=O6(@&gp~hcILIZ(&V^8DD6<)A z^yr$SqG6;udS{Y9Wzg&OZHAIatEDcjM_9o)@t}sl!j`JIqStXyV+{ZD8X5~lpi4Hz zQEo(B3lTmbdAD@Pw^uQSy=*3G_R=N2d zL`G(o=PcE%g)bA@io^L-I@E!Tn$1{DR~EXr(@_~Hk`eP$>0eHlJ@&-g@8#OCCYiOe;GWuYD zMbx5N5){HHoEboe5@Gx)famQ}H4s!?4aqpf*qQCFLA zqpvG(C7!He<{TrX(?a5~>AN0I7wQ-unq`p)Z7R=ZTvmv^6j`u#5tAO5#4?e%x=U$~oU5-*NpQnF|xxCDX&}5J>5)4N zr%vEFuy268wYWmZ?<_77^2|GJIJ;OpFFmxPt6b)2Z9llnwe*gYiYtRo8Y(!2R|jp< zIi6>+jM{N{1Ys#|ST*nS&H=A|*Js_!Mljg(VJn1OPGMgQ#54aYvK_va4pKcU^zOi&Q4fw}#STVvxTQ$X6OKn&QCsr#^I31$z z1W*tVt*vN(_S%IkqAW^qTSSh=A;?Kv11yM1P7ZH)N;b_jg05W#J2lw7uG3mAv`I{= z2rKGZcK~(@TgoC$>!;@em6T;=sq8amp9RO!HiaLIzF>-zg8qpEh4zN)6P()bFP!N!k zRw5vWvVc5l2LD#aNZzUoI8?wGKnMhQT-zd_$cZQgBJ#IYVlHfoY$$V-I>ar&X?X>^ zAx8*dK~!GLh@7;lGQQd89o72J`dKIwP(&c0z#&mC)t9*_eL&s%ny z2yGMKTsl)56tzlRuhu}sXCwyZc@uc18O24kFO#{flk(Yj^YqGd_wPPEd~R~+g^y3U za~xAJAQ$5DWo{@f7CcD^`I9U$s>!`{$tHHZmu4(Ap&@llL1i%9Of^hhEp?ChBVUGo zsE61dFFH81VAt&&jK5J;>iwQsb{fZ*P6k}SN^;y$%qhUq)WBg=OY88iBeBj>vjJp* zp=b$nCO>4=0TR3dhzQpJf)1mBge?Mn=Yb#~^-x)lx+bC`W|J=>o+7Z9g=G-7yI?U; z(P|V;$qvar7j5GlWv9^{G}$Wx6ui|wA0R~M_HV@fq?lN8&FN{S-B&yvVZNAX>JmoV z`OLT9PA~p9!suvQ?YS89m7K>>?6WCgpwG7 zGxMDu>KwLbr`Mp>*C9RbT?XnXEHy}%8wgfOkxpctK}f@DfZzzTd{asDQBVXb{qxCg zTQN1zI#g!xsx)*AKjiR=w2=}mkyybpr4t&C*2QD0*^F>i#jc6EjG)xz4FtVdv(e06 z>Y%9zc6+THYj?7#^M1fAT_^!AMS@4e@K{M=uS^X3oKK1@nss zztQYD8VF2z)G0-syM1uGjZ-F4Cu%rxnr9z3g=F|WmUu~+)0+s9h}|WIK~d)u7ZsvpC-@RHNYeRpQ?%{dR(7jKVui+hnNAuj-a* zlo`hhV@z{=03?@ekvcBPI_Cgzkwht@;MfwKsnz*T6AUl~t4@}ui7!)Gt$`qx#*$TX zp3&V&Xdm#+aH)Ne z0#1yL9_D2<1Uq4(g>H_LX|&HvWM-0Tj`mx8(THz?D&l|~$C(Z|N+?HONS$1|L3@nW z-IdvB2Ir&fmVvEpZ!M2k$$|1vZ=co4;Gi^K&|a3lr#%-vH&aO9Z42_dw9VJqhzP~yf_3VB5yqr z@$;<`Nzc4+|Ju`H=#@NSn&r<{o!pBXQlgaMfLnHvS0ehlEBRx>KL2gazaT1 zt<>tgxKM%uk|r-Z%z?g6$yLWF7)$3zF4|fp@iS8ugIh|%`BNK$F@uP+!V`^(xJpMs zpU}=huegW2QB|)%1+3)ri1hJ-W~23$SDt%#x*+X?`yaj>m){&|gCl)80vi#}c3F<< z(-*6V=!h1CQ&cw1c^<7fjtjO@DhpRc=g_W$WUdz`PQoAz);LL*FJk6t!Bbdj02Xfi zlF9`*MjCl*VAALSum$Nf7+O$8(&cmsm~|NjF)R{|c7-^Gx-8fa<^kT$FqL)zFAOKo z$e`-SsdZFz?oye$oSLlqfTk|@gdD=u(fpFa8rn#k>eXn-WE@#CNyJ(^-LBw+G>#8O z>s9UF;<$?68^H3qYYw$N4<#d+R8{~_DWg^14OFd)_AlErFH(?wA`JaBotK-uaFHX- z4)>>W1|6iJl`SXq7(OY?#<2t(enDq$AHM7MJ?yYH&D<3GaCSUiE*VSw-%vkjeq0M1 z1ku1Lm+ESO8t5frrdVrTM3JRJ8p?($@>qRb<#Hy(WO%2Re4JQ@ZHGqZtGw{-o({!{^hlQ+k z0OG!#-g@}(sd#9jobfv@V=U1rOG7Z|8@Ckc!YiW4sTsYq*6;?b1l5gQNJqmmI1K6w6z&lg^mfEsw*lE?u?a?z=T zyB}f-LSPfz0KnlK({N6+qYH4xh}<4d8=_ZewrW0XH(VA24Gz-0H-D%>ruZ zjJYoAj}Z0+7OY2wwloL5GO|-cQe(or19-3@#mtN&alL&O>d3%MBtN>8ub4z*1+uNH zAFt9$B_W>8S3sdf=wNN(n*?L$5Gt|6xt=Uc+mf(u6&Zu0%i33*QVm^f63VWEv61bz z(!>;5gl6~D16ZBcX`B)Q&Pb=h@>G?JF$%a>cmr>w2!vVB^=ml}nqx ze(}c-pP*d!(>GL?qJ2yPmOT(kOBA1lJdsB(e-T%OL+oURdB}I;5-pP8h+sSTN!cyf zyU7_MWjw}0#t?kj#aUJB9Or`_L(x_)(=IfFnE_6X7xgjc;rZH)%Zlr@c)Dn8UM5#g zhFHQpiPX_&Xj4gt9vd>K4?lpW$>nhJ+*yMm>MfI4AS3m*-WW&^XlFZ$*7x{Y8k@_@j!?k`-;G z)8Bvx_H4YYBoBMkQGibh^;H!XVb3C`5jhIMP|e{=8i2z1xMJTegaDD#gQ#PjlR7L( z9TgE$5wj>bXU)(`RfD-IK5H*?@5q_8hFc_q>T2+Shuj1O%h(~Eg3!5O1yfjFq&`B#K$05Yz&F>S+;&@9=2)adi% zb{j6K03A+GQDu4Ho)qLs~AxAVq{dZ zipSEMG-cFnysu=rSKoHdE`!~yn9v)uJ z${Q+5(9p$nO$FVl`a7(l^q2CN9|LnBNX_`gH8WNd~5dgSo}GRS5DS@y#@KVmkK zM<4t)*kr+^Mlp4~Tmwoj!!~v6iy$t7d=_~?QGx~Ca1$t!oFZ)3m>u&1FWnZnlACUv z5|x9S_&8FKg{1V}we^iJe#{;|@}ja%-B$d;x<-H#y|lX6s#K6q{e;lUXbGUxtBeE@ z($tlWxJp!H?pddOolaqh^VOfbb^dsI?*3oouHSihgZmKenj$<|UWmehGDmdk&{=D? z`kHgtE*8{?C7{R)e9)KpD9Seu3>`ZVRAxFXQC*n>=F4p~Y;ArCzmY~ygm0m2oZ44% z<>sJkc&PCW;<(ZR!{wI^4-y`o9rV>OA%xTUkRKXJNRx10I3z0V+LK0~7-2~S|sj1p!6%K;PfBB+6`y%i&wED676U{bheZqpjA9va?Dk;cEqirXPv6}oB& zwgC@RQ)xx4(+ks<0UOE}L3@Sr{pI%8UsGtSAXl?=(M=r{E-G?+4z^r0w%?M3*~-wN zUav47ILTD%M(IfPmg4l2%VN3RYC;rkm{c!xt-JGzjWqQM&}K}@3(eZBg~>%f6j~Ls zCn?|sAFJ5LnQSVvXeJR9@<-xIP*XRmdY;5@lU|3q^w*Bxd3gJuKA`xV(1Q0$@zSLS zyE5l{Zbf^GB&Z6&Xcr>l;Lo_n@iXI+0bm=lYQBN812>ofBoYHMAsOka1P4wxbhng?we9&!doTxFB01Dt_%gVX|B6=Ao>Xbb#!inpwL|R4MG~p2y#!Df=JMh!W zdq3G&WV;}u*k0@QxB1IiDqE?eS6+STrGFOOfA<5$9#`f~CZ-2&Y1~d+kTZE%lb^+} z*MK2jQ+}J$2kXsr9Y4oTutr?MXrB5eypr_?R?v-%nf%OQFR%KxE&k zb!B0-{SF>WkYVsH#HbfHwAYD2a&4p#3Fl>_@NAvtl@doqBkinHVwIygC=zSs(U19V zn(nlh>2&wjKMM3;SKY+0}o5yK9jGr~w#wrWcBJOG=lxE-lK zVUwaM_%se$Yh#S%G3{i!h!gUSlnuoYE}jd?&<8ajk&NC(Zj(*OXkMtxoFxEntj&6`q3 zJQKB6Y@jkbu`WFAwK)rUYSUcq>RV%>^a_kXOkoI$SWK}~iKWOPD8fOM+Y2V>7g7M_ zbo8!a=X?q~OmdzGYWy5N?3{oK7&^qcpO&B9mZ9QxmlTr6Q44S?(h_O_Lv@H-8;vLEccAt;S(# zW$S3Cav+H)zuqdUyILu|vg(?iNvPqYfkN1cN1{r`C@N@yiO6W$b-5F4U{*GLV4QOg zL2VO|x&>-tAQ2k-GCT1I0;kVj`0j6qXzzXZ;<=L^f`!_Ri*l3gwXza|8`N*A|syXukw=9Hq3D-pk4s@0^Yt5->?Se$|!_bcj3ioGd$;>9WF4&Rs zEbJBCHcXO+EWERjsKCmJ0u)r^Y=L$7-FP^qtb?uF^KC;aI1f_~l1D;I>pW~UYJ`5% zKNK!e=~if(;0C!(GIF<`1@sLNi|(s%o?Yn@rmc$Q@=2q@j?1&HS$LQfT#aV;v86Z1f|)Qx@}6YjD>@A(iJk%rUh{?*KI|#|K%Y7Vkub^SSDz z2vs_Awo%}LgJd>MtD4sJ)9-)yyW;xI?|y)XMwU*Xbo2jjfHEv2;>+L@SU&WUhX8Lb zio4(;L*w0)WMMTW(S(; zS%@SA1~ZVp_LI@Jg0+rJlviIAtIVW*+M6i zT+!_GqTL>CJJOrTy%q-kLD`w>*iM8&SkPu45c_6iH1@I8?v^ltHRAFdBs>Q(5(xnb zArc8s!oyKtm5*-1axpW&2cWQ-A4H_;vZM}shRn1TyC zg~dai2kd=up=IEbDv&K-|N1}4wY-ee-Mf?a^xERwNvd=L<{Cga`H+cwzK_r20kXmi zgdED^J`q_lwqoqr!U6-SL_<{}RdS=0WDv|U&qX=q?}0y8Br=qIMgWX}NoHM~aZKr?J3C<7Qwx$R*OC`3Oz>@*n1jE# z4Aa|jGbc}82*a)$DoGxqn$8)sY{EoM+6>9& zUUae2^+HIo?-7#=0u7aB(M+6-T{RD%^*bN|;~e5TICvG97Hz|#K{+9ZcVGKowCJO6 z-+gFT!fEo)@=(G|41#|7n~_(Es%e_@P^GH^U;Ds{Lry%%^R^p2U0joWZKe)Ft-sjJ zkTqa1g_L!#gL`*VtScMsx`(EtSM9=zqH}2?S44Lr(URcpU>D+R1SSrE7>J33u#zN> z2M9b><17|t8%1CT9 zX2?vE{zBT6S&NWtAZ|vXtrd@T5j#SVm3n|do$F;?Z+!d7|4D~^{bA=ncJxk+@;IVX z_Xnozek8VFd_)%Ym;A5{xjeHgdB}L;#PEs-Fdd1EC$BICO06S~0S!OTUIn=weFZ>u zYBh_w#=O=gCk+i026azd(!8+>@VZUQXhOH)Po4gqgB!F62t<=PRkEihRyh@h+ErAb z!yEz+{=~f?l_t&$DhzSr5WEuDtf=<%EhBehg8$+OIMwr-!F$KOJsqjgM{Z6HIjTu5 z13h*Z(g$SUh)FaD)lS-1oh8lyizZzhlI?KEIcR|AZ{TdpEQ(dus&P>rksT+?);d6; zBxmFss+!T%R^dy!#jwoF0dayA;ba;fKK$v$t@}5B8ffhu$^#lw$6?LTC3^0$%sgFC z5<%f|3jOk#*(quL+$SMcAj4#L+{*W65+fd*z)+&!CWHq>$=*W#?&S1mu5D2;DTo$= zTrieqGbY5GrR{?kVOH`FJ6b#-M$MZ5J?OI3f##sSFu0nHFIW)s7+Ls-gc;g4rbqsC zOSv@gLZ@x0Blofv$TmzTl{-%m9D9hs6%Re#I&W$B;TqNkN<^Jz;9v~t=NZed-e%Kg zCo&yl>N>{p+(`OKLRt0as_{9+kEp1GGnR90>sfW};KTDz&RCFCd_hSY9weO7$T5_ZAo=VRmR8ONU2^Dti^S0ec#um4 z>2-)#KOXWo3RR2tXB*o46o9;-W%ksrUeY#l(%v;7iH5S4BCjW8nVmjT0iW=eA5&66 zvZbpYGl|IF^CII21bOi?g-JPV&rG)q5+gjexSdp1s|YvlV>-R3Ue)GR`bluipG0)= z(RDZiAwY`M0-vG$&DcV4-CyL800#y>qR!^ORaSDz@ayb(huMS;k5ehl8tiY}dAUM` zYCE*CYZTAZu9K~Z4UJu_$C4?n%!MZ!fZUKv6`{07beZ=+c#`vqz~YiwqnB7I$O*8y zuH22VtJw`vQ!T;Pm*`1=V4(A!dYm?-X-ggr%0E4B3@%$?bY0}HE{l}eP_=j% zC8d7(`P&!EmVNW5ADcmOxt{a5Z55{iw!8K-FGz%NbhngdWk7Z{bI82qg=Z3tx5bmJ z>palpr-6>~UcTn3xQXI4Fu}1F0?{3$CZ^g<;C0BpPws641dv$fzQ9dO({pAQW^4oG z50%MKXDDq)UEYC>mS*@JX5AYu?Ttf8p)~Cu38ihCYJ!PyGLzBRNBo6T$839`f5?vI zMa{JK1=#FkDU;ORJ{7yS_qmmFgH^+7fL{ZA8h{m%^_tVoaj#~6T|w49b5MbnAcbf< zG1+FzrMhZxFwiWDMhP8BCXVN}GIcUDJ0Q|#8~K=OtKj3pB7(2SBGx1&hHbCi@U)`% z&a!^`1=aCgoeX94VlI8xMsSn&b4U~a-&jqXCl zp@ba!Z-PMb?R1-lD&BE7rz?b3lB|!#PBp7}6M-{=Y`RiY#6D1^O;ekIQo?w?*mkm4 z5nwgl>~|4qV)wffijp5qQexSgJ9Q$D+)TfoV`Ca-yx~WJ+{gf%cO1~Ope2!_<@FRn zh9;eq(_4qFpd?2A3_ySbB5K%}LY0v+gyF8UVbLtKNH!Z@M~S8awnVK#&~WIp;6hr@ z-`{wl+@kM(qdw?QS4|`m!DiP>%pFT4j_x^W)+tj;dhU&-{Tu@=mlj7ovO}goju?$1 zjGQus)`0dM=rWp?j!{BB_d=HWPC76qHIrUfkY8{NoRjbm>AqsH(NpOa;nrCea|lZ0 z%jlX|(8h-U3-OW?+;+w5TY86>j*mAKVsA3qt^`-%N`rn$umh>Oq~j*S>fc*Ism+B1 z+pPjFU78-T80Bf!82x5DO}7fQD$WnGb)K?B`-5C$Z{4=05mq%5(o_?O(gk!D1_~6Y zVqRSlSiFd&o=v3P}S`pa1sei^MzL$9&f2 z_qw!Kd{QKTz#BC)!yj#tdg#157CUbx^Cz#Cj^mqKMwH)wYU@YBV~5j+>g5u zPJMXBWJ&Y*i?#tf4ac=v_o)xK^oA#CDZ>H;4+0^3*>3CY{ny_4@2u)Kmy4IAd9<6Q zq+F2$9gSCY#3fDG>6vLHW$rQTOp&5b?r;N#ohkl7+AxYewNJ)`^Y`OsPTgAtc)T-`dRaGJ!}0LLi)F{QyjN3mnx>p{x8mN{^Xl(=xBm754K&0u@5Nj?QipU{>NTMZy`*kF` z%H#=ZB+YzwuFLFo>#^8}gxnIegr!xRu6eX+_by2x|8CyD{t5vuq*?=H|hlU>y0ku29T{i7b_-TZQ3o1 zoGe1{Ytwl8QH|)WQf+q?cXc@Dv$VA#gHDMOXH}yGQB8$D!$LvGb(tNL6=cd~+hdXe ztY^z+XLhOQiX^h*a#@8b0qaXa4P)-<_3z&MkJS7h8@ep1ToEsBTn;Q4_y5}P5ARc< z#qzKlh9IK=E7kM>Eau&nyc4Hh+Iuw3f~?C3yyd10L_l2{%>6upW4;$C%%7Zmv9(_u z3mt5+B>Jvd_vQO)4&P)?m?c0Rb?MbAiE|v6=HF?%kNOghnnNGm|Ek*jE=K$bk~*k* zR_$Uzx|-$9E+8*;Y!w#7Sn`JMueS5+gh~DgK{~U8d~X$Rl7njdDI!KWh!glh5C;TW z%Z9M3wU!4|7-kjw)!?fY5rh81WmGM8T-)&TbEP{5JWEsLo&mkr5}t9>HVeq;Xa;V{*vM*V|xp6s;S+q1u{fHD(_- z#wvFQbnpbak{rS$Y%cFlvmXr2l=S}ws-%HvITKwq+0-wP?w#);ioN@#c+{H(R^~vM zid?IU=a;bX-9pc1Si-Cvf+o%6m3X8MF(a$u zf$jVQQS>j2=*J2|l8F)FqC>;MPqtsViH*JaXi|)Ti^;9Ku-Y5QYA%k>ux!y(Ui#rM zmM4=mYtNwufR3GkOrnxT3!0EDq*cOYUQdmoJUrkvWk9yqflSOvyw%KMQ$-Dd50F7D z2l(}1`l=xU-0*9^po0}7;L)s|j$MesCq#p)yu5D0z9%$Ow4s)`!&tyd(Rjmq)HTk6 zeS3&F5$}eRr)Qqo^geD6PRDLw$O+dr`L`s-YOCWtrygQV=5?&K?Zg=b-97~(XD22E zd!*EdpR;Um<1KDUNf%JEuQOxom&c0IA{c^{!7F{ZZ;{Rh_z~PlN^Kng(JXzY4b;B- zZ=&dfZ(n`ckHlIj8Y-1PQ6WCpOh*&BPv;LQBC{e=3@-r$Tmo@9^&4 zxE7G2@X}*F0W?q>v5@q77uHg1dPTcXTn1PVn03xaKp5m+P9N@riC|8YuW=1`XDM-g z^cy)IEa0{2HvBryL%%!I(yFzLjcIk>Nt%WqjrKocZX}>a#VjOUgZzbJ)=eRHNcJRu zv;(yQ2=IY#pC^kT#ZKne=$@2)CfzZpkf7+)R>Vk(>x*`4Z74c$f{%y@8t!U-*^o@| z=LP${3?~57M&@FL<7kfqNuyZ)3zmH~{Fp}ul@*sY1e@{s5H8CDNsj+wnzfOqbf-nw z-8#wZQ$vrmC9rDc^t|jvzx`39#y%mil0>B8Ff<+HHI;NFJEd zQP;z85wZ_W36$0u8Q8WFq-Mmn!2EXjnd24Mrwhi;Cg_brF$_IlMrHl!xYu1N4IxT>+hk02Zs2=&O+b|VHAmE7 z!f$Thm>>t72RBgzZIG)SGcm0`ow4L&dkOqL`>6{e;H>GiOU{@Jk6_Y_+l7a^)FrO? z5b1*69aK$CBRa~Cp}dVV&|xdLQaR<(`=B-|6wzO``sKmsG~CUaX^v~0*y|&o*k32= z1$U9F8a;c|1EMU3+)!k|2i=AI*~t&uG-)lRkyg!_A=-!MG>Uk_LG6$LL_oX08Cm+m z-15lzyvXsm*z8}5LTK;)V~>Z|erwJ(bIvPBu{-bzV&Yp%hC7zCF_X;bW^Z)A`UJUJ z>rxrrJrmhE7ZDrk2VEE*f>s)BEl)jWU>MC6M}s)wNK*U(-0Zpm<3J_kIdy5Mn_Nh{ zDU<)Ufh24z^jPwhplK}A$UdTFCqtF%`~|i6W>%vOVgx~wAb}-WeB4U!)H4FZ&D4pI zhXhg4n3Y$GAL$5+vP(bxTM;!ECg4dUcLG9|H`ZL$;WWI6j0!DoyLjp!2^|-CfTE^( zb(aM+rIf2@rClhY%Ws;~_TZTtOBJtKtem9?hD6oCPxnVUGrKR;afD zHNcLJ$8GR!4!HKqgSHd%Ua3Xi=96zpO32^$)q8`d$Ma63h8BX#!%oyJInoobF`m#< z_Vk&D@VFyFQe+wjWoI30l3TBtKrpO_nW#7|3XyiE22Z(>AkBb%mT_`>ITr+M-K;G6 z*z{od0pY?pF*J+9INF_Z#?X{dZYQ(Sa&WYE>l0;0^g99xI;T|-UA7vcs80<9;JI%4 zEw|1*4n#DL;A16~MxKHep)wHCYDL$X3}Ljrlx81=D_df<&2Pk`W$TV$Ix*bcRfei~ z_vz%yieE^_L?a4vi*#s`&FC^Kw!DsZQ429UFPtWym8gfkh{DfdbyPPO25AicGm7ba zBqMgIhL0d7>QnU#YeH6?NFuFX1SfEI4E5#VQKE`MqxSwkRH423>sMC-bjmVfR(V4( zIat>0nA`bZ7A6uWO@e3>`)jE$%eZd=z#QlDw1XMksJpBO4Fu{4q;`#kwqFzo(xdri zCje%aNq#)lY5&=K-=hQ}QlJr9?MkNI*yhNiu#!r9^?4ry3EN zGgRk7rZT$HT47AA)gd(q^GP~vSsNlK7MIi)`mG2wn?O;|F$MKv&6>5y8jPhPs2CR* z@RCP|Y<0nB-arh+9hmVLY{wbhN2w6Dz2>Wq=SSV4a*nz?;6&#y)DQh#Q?yrakrp=| zY5MFUfE@cyo}tYhX_Tab%v@ z*S0LkBi~?|kueCI19(UnJvnoTxT}`T5<8Dxx(&ypRt%4i18^)tVEg@zZ~xBW`0-Eg zk|+2l5AAu6j+71Msiz3dgx30HLQfe-Mna0bTsFi&T)@dkOW|X2Nzi4R@et!T0SUQQuwNsPo)RtW7WG#06=rq0nq?fB-!FKF+ z%qVvby~cL0%f#)85-RfsDWJ(Q_vy})Vv1o zsN_Z?Sq`L0azD1?FSgEYRdyl_!nisr>hge!2$315HKBW2Xpy)eK8J7Owcw4nUi!S& zS2doN#+e+tJ8`q8_W##^%$%^~r4WAI4oDB-jCz0$jSD#%gDh$01*mQ9E9bX@Qp3!AFXo ziGdN!=VWn;+5!YLH4l^7Bk;NcVoc`If2m zYj5?pL?Q*dCBz|R`E0|#LBbf?Go1#l>ATNw7|0otqajMg%EMiUeO4Cc9TL9Z(J#2ff|(vuwRw%=1V#Ih2R z)6!W;R&`R(NKb_CAU$+V3@mR!)SBcEIY)6lU`?*!ag1I5Nu4xzS?quZ9S0u+xf z|G+wQSvn0#Ly`BP(G!3$OXlnZA!baI>Y(FgGNJ_tBgZ-_dTe9f=$f~>J!!IR0QvsW zyZ^00`{CCOUTs9AtwG#C-okB@qyd? z=X_K_s<+4#X{j_>c=(((s11h?6=wA)V#f!FSt&y#+arEQmR0+D#HIzb#4qlf4g5*Q zG(t+N%0a2K;~z+#soN&jJ*1f!to?|(-k0Q_?Lx~gHMKcHPs$|V+5F#R!B=eDGH;%^ zZC5ewsq*pROGghs(>K?X31L(PVd_!G;NCQ(x#t{zO7u%JAD>>KCA`bh4=NA35jM zZ0O#$E`_!WVr6BskB5d<#6l{>MscwUj_!$Q6dcmJbjgPxlyV@=0Hq?@ya`QxYv{mQ zR#Fv#jM23eD3Ih6$*=l#HAGLVn(U!Alci7BvucWr3lY^*H(*GJo5*nR;RcY8T(4Wj=p~r8@QA@G&QDgT+^i0H)F->Z$ z1E^gKfIxpEIgt9=lEg;O(8cjsOQU{4JwpAUMk|T2jJ|Q#?5d z_17v?)q`X<>#WvF1zX1~ODkxbqQ!^3)5l@LM5rI)joR9Ui1H&azc8IL~%!!Ervk>oqjnNQkji;cQ@k_h}EJ!D-Bp!ka4H%RdgON)-I5uBA zliU*fA80V|YK@V~4o&N{y8)wXNf1xyFP$K5hUJI`I@$HycY!a(HXDNyMb~Hyc+znE zqN}=bxcnBNpuhlweAQNWYvul_iLS1Hbx%5_8$fS0dX1mn2C-o2*!HlSOJz&uVDx~99<{^2bV~X=7f)X`t_WwYCFyg_~ zZ95H__Q)5SFz7|&H)M`ojI7^o6OnYg>bfGJzF!t5w2EhJS5xr!-umH_SK92ImhdS= z1m>EA=oscnPP6dP1%Jm$6v~V2vx>3{Yabt}@g$T+F9maE1Ddy;yKEe`3LIls6h&y&yzcdXw`)u4f-V1RJ-C&8d zJ6z^w(??0YSu*flr?~pfK82(qeKo(=wMuX;&7FY{N9suxs?I}9w;1BAqyhmd^cOH% zSa#5z;kGAq4^(uZN&ZR^)b4fsthyxlY{kE1@CJKuj* z$N0f7ADw9+w{g4d^9Ao2^_IAgTY$wUC-z)@ph1`>xSQkC+)08}%nh+}NF3a%v?^n*5<)L})7&ztF@*G{Kh(8L!y)bLtek z|M~fqefH^Ziz{-{OFo%I3}E8t&APtSF~(I4{ztQMfQD#EMm7LbD<1|;TeNqd4xWq< zVQtcd;yWbVn-nE>BS&sePIWLDb>@d8;+6O?*hax{2Ud6vb&ZVKWlp*V5xCyE({Wl?Lih(Wl;m&sN^&D7Bp~V@$2ZVhv|D71 z0y7fp8xSO&wX$g0okS*kOxQxqz>g-8j|Edp>)0w}nkq$8rWt~bVTNU&Q;ttltlFnH ztrUw+^A_w!^5l2Br3=_`cX z(QN9Xsb{sa^V2sN4x|{6*kso!xUrAudQ9!nXjk|Rt&G}a6*j*nkVjA343eg8{P_fY za3^JZ(t8;{|96$g*E{OPnOljOl6D0+@E(Dh)lrALTk~SS>n}Y5!LM;n8m$M+m*k4M zr1VF(usr>ypz3QnWa&l#LdmtOAsN<KQNt!P?Ofl0b5)T&e1aY|0B7FB3806;NRpfdMk3P1allDIXEL!ntA z(;vJw2WAh1Rn1d3azjFB4p6bN3}FS%%yfhrgb>j#P_WNt$Vi9zT0sxZL=;ug{vu^gW-pfpq^bsFHT8W56~azk?NcoE6x)qV ziiBHx<$d8{LC$V&T@ncEi-EFs!3UDudis(TeZ*|B&1+v~1k*Mb_xG4D80rTIxH~Cz zR%bzjqDDLQB~|*h97UNUTTRng@g!LDeIdRs?IoPgErhAj_*KZlXV{V~?R3SR|K}7f zK8SJnzG$q)VGf@fF#rwWWz8mT^y0=Kq*?2JyRI46XKo76KL5kW|4=oH-;@;0I5%CN z$D1)jlR}pfHetB-S*QC)pp=4V%&lB=pJ}q;#l7f7K*I!!-}(;8y!c9z+wpXo6@gg)4LDD!g8HzU((E|3MvDNMhBWBU zdFiJ>w4OTM>)Gw`X}Ul-RS;M?lTa#vr&P9c?E=oZHqDT8Y9BKqNK>VLMYSN(V=}kS zDz9UAvU0<4$L496;GF7sy*=a?AE!;@!JOrwh0LboLC{rhp|3Qr>)9Yh6eBiw|FFEP zZv8>nOS?IB0GSh|cYC4o<6LfY3LAg$b}pfHlwlaA=Zy7Mw0PMVan7YDq-O}TDS?wk z5nNj>1|ix->?VuWKysutR%~oY6^x};EsEj|MM7IIS%{SoFC~^jDP3h#Y`jz#R;hw3 z#moD=-?VAe+KS*eGyhEff9A9>FW>imm;axN#;bx+T9S4tVfvKTzB{p+u&+^N!T}08?-lWqR$!09{VeI&YJGWJsH@s}WKFZR$uZfjFCsDo% zJBr7ogFd}ZG*x~Xjir~W!VA)KDv8d=T*cnrJV*Pte z)~bjswwIwVY|}IjNCW`>?s6Z0)v^{KK^SZ4&UV>XYJ3P+iokFW2bcj4I+3;KrDm<# zYQcjaqpe9ZX^{Cmtxf~7!>KK_kYa+;Rk-5XyU3vPMm-rh(Zgh;WQx#BSRZbvLy<*u z*Jxaz@N}P8`b;P81SRkyp3xY8GdVG7#C^3QOhky#rYkp)phsA&9HJz;W2p(Eoxu!!~2|`^>s|?n-uMRoC?z~S}T|ah3S(M1DG<(%BYog2^voq(# z8fNr53m+&mmu4gWLSdJS?d@sf&PTZpPhfilB&SlrN5lwjDH{*)?k4|^58ve&v&)}j z5fOZLoGZCq+~OK1NEG9qtAWoZ51cm*X2f_~5?Ui8W8+OjYMNQAjjS#+*40uU_p5DY zrcKsmE$m$lph^3H7EDq{mmU!yJGvZ;ZkZ%&Sn+4|ceI2qp(&TTm`Rt8SBY`UWKV7f4@Xjm9_@Ke|2uG_ zqhe=G(a2Qsflli67ptqUQ_k8!!q5V%UDtz32(O9MXwVz&Ob*77@V;GZ8lM!Saq!To zGlzr2BB^1lh&E#wfh+W>$U&fV%y@usz^GW)nttv8f=xr49|x!L5qu|!F@s*A28U}I z&}h$0A3i)iJvY}rUuLp)cYCHg(*^ZOqD^$R+jG;%%uKs`2FYgHGyktlPj`2uX@&PB zB-I=KCZa#1pfj;0RshZ$6haf<_Jr1QXdIy>)RDmylU4n<;9@#ty3<3gP${nmv!RQ+ zdvdDGDKkFG`YZQy{1rSq)gyqh)Z@RgVxrrnJcV}K>%V%}4d=}j+PB(tg?b!-68Q+o zN(Xs&SO1x62a<}YkWHK;h43J3Op&dKBt2jN84zqc`o**$kb!w(41;B*~g>WRb4HV1&stFjv6#d98r%pMH-eK=}d(vQ)#+MyLQ#Xi~*vSud5~bYjpz#JB9#t zU8Z>WNl8;-7ekGzB{IFC?ZALVyoPMHPOcD)+940b5*d-HjT8ml#q##W6vie#IGHxb z4GOOc#~gfsF2f-sJL@2nf7GQ)^lL#O$Qal~1A4-mdc(sj;@@_}FoGoR(Uq;y(b0XQ zb92l4`g$+_?dM<4eDka2n@f~4|0G|Y`SMcbi%XM}Cv`}^{r;yb_KuA$fBwzOsug&O ztzcbKIRq886j;;4>@Y^neG*scGi3wzEh25VFJaVyW*6qedg1~dWv{=eFGF(1@|Mh- zE`=2$OKH_CD8}qauCH`>{1Ff_MpZ*T9uGoKu{d~)>P%Ueg^lIVO+J~(&&-~+CG zroD)~cVU@wBT=hk|NbYx{b8(c<(sQ2l!D21YAJG*laU7_#GIhi@FSCFzU@M#qLAb2 zRs1M46VX-^xoOC>2}mi3I6EPT2sJdPBHd!BloBBb&tKzKIiY=O zIXX+vmQvMZSKxhDx~3?;k2j&PeFkVSTh4ePVb~<-7(j$-$Y|p2W0}xWE`|;D0D^pH zEICctU}U#|6VJzI>LSyR(?vEEI zXlrhwf7fS1(9s{?UpKVw)^+O!2G*{n9auN8R;~WLN9TJ6blmgw_MxY>4GnL9_~D0# zw4Eo97VV>wosaI^x%0I4)2BK9D)Th2o<4N}Sva+@R8B0MSSlZWx_@eF;c(wreWsqU z{FLVI4Gp4mvux3I-ju%dGGlS5^!r$A00Qg%5+VjD3rzG>-L(G#HnRb1fj9jcw!gp`bjU znV1owZ38Ak0?xE}Dw-P4Ap$*W+X^D?Ywzpp`$g*K$iTXhBbOk@*e)cGYf~$EaZ&c{ z;Ru-@A0MB8Yx#U5xU2CA4K z=TwEFQ~)b-V@wnRPuQwN;IPw0UV{r@L&mlOyRPQJip-}fGH0a}MkKQ*jdk^|8y9Qz zJBc)ECHTyKdxwsgND3(eLTb+~W***>Z52JZOYtEndbtTeIiQ^3ij%K zXYep!7iuqa-4UBTIe)vD-(kiHoXSqU9uE#Cd&d7Q%E%P&Y4uKga{R~1k&A-|^l%}e zT`Ic+JjZsc;k*bT?IH7f=GDv#MA`%+wuk1oY~HeYv+;Av&4vx9Hs}C4mIZBs&Vtah z@wSck-zK^9&VzSJW@ir^cT!f{WMZv3!EJ(3xxyv`gW4DE}BOd7BL3}A5 z@sf7U3vgm@JAU+i!(?*u@1V5x$S}=T?Vu?3rDE-axD6=~6aYH3XV3Vdamk@Wk0I7UA8~xN zY11aDqD_x1YN7`=AcHzkMcW1k9~|7aZQFf2cI*(Ab_hcU*#QwEvo`hN*hDwe2BlJL z$XNUn+l8M7IHZY)P3t1*qKizdKO)3{pyMfvLB5@RjY1*wpDrl1r!dyti!@A>)!Oz| z&AvU7RP>nI5-6+rRvKX^Q2^Uz*NUC3sI8djJ2%lI6*z<+a%Hkx%v#3zi4RMK@1!#` ziZMq`h)JZcT&tEv{v)7NSU+dsBb`PC5&cp6$T(1RT}PAxM3tH+H2OY_BUj)DlH|-s zs5H=NE)^Arna{0ETByCOyT5(>sG#%yun;skGBRv59hn?HGR(dxTZgx@BgfdRduVs> z_VIYApaMaQ*6a`vvW$mF4N2w;TlSEZP{4=XN=RyGuuwzVQ9}zG1s?05yY9O8-aEvI z)~~-~R#t8H;Ov3fsRMm4)kIkdK2kefV*-Ao*~N_eT4Z&dw-?Fw=UkGqj2-pQ5g_vF z_{xUZtMB2(qn=P77_zjd;ZpQib|;O2r(Ng53b zB_UKvYEGX(yS8x7u1Z!2FVJMCxllx$g%-$3=D&0XM}#6{j;j%XC#J*2X>lS12*153PrI5G$%RBbDXVzb!_JaHq?;~~((Ey5hWK6r+=^V^w!4|7P6Q7NzP)xEz5 zh9*Y@B4J4T@bHNCkpXQm>7gMV+lM6EpB9;B5B~gT*@-;o<2mGwH=cV#@~Y+SkaypG z_Z7=Kg}iQgM)LB@&%7*oA>_%W@|5J+r=CTSCqnK%FpJ!NV(PoguUmCfg(AfcCYU8% z%q=851a|y+afcQ2aQErRo%-nPG}G}?RO(<|p8(4wdrxTW}tV|3f? zH%ZGp{SYlwCcip5+8%9x-2WRGnv@7Ol7Y1&cEi*l;Ag2UdSYxL;fe)@w1e6Sv-Rz^-ml83>K?bh|Jti3htn_L_kQcYQl?qBu;bnM(l7%= zJ={duYTBFV=#jEB0s2;xd^hV!ZK83q^u57KCZO5=l2q9+Yi4kCn8YAz z5Nh($bnFw6HM^z+fCbRp?(<=8vr&~_(O}Vmatg<*mGi)eYoM#eR$1LUXm=1Hw+9L9 z9)P*RT|lXp_0(xquU6%|qJ@AZF?(BlU{Oa}*co~*N^=1ac*6Qeh30hT^*@A6Q=pa9 zp!w1HtwVER=v((m4t-5*2q|<*7-s-c`vWwh%)NQKZ2R0BJTrR1HLtHt^F{ zX!|-Gpht1=~OwK%UqHi1x_Ax)Lrk2F)@cHW>&u+_TB5lI!c$i$m`pmUQWs^Z8!XuSdKv`E z9>#3Y>AZ;1#!T-`jidg~QV{gFZ{RXvb0;rbGn76O9gC!0LQ^Ac2sgEsvSC>*0%V3WT(h zQsfRvM(b+Ig50QbN5B|55oTjBc>x8!%L7v~qjlYWYM{keB6-%ms9;c1lQ3yZBGNVH z`Fw5+*|LB2efLQS$t?|tkU=#iVq>tkE^Er7B?#3pVm3HYz({=PBaosNikc9rLG*6n zrvafhf;5ew28iyza^)4bUvYbP?@L>kZyYICL#`0x=-~$n8%#$UGib`TB6@~h#!Z~e z$R*+%RZ2eAJodc~QL&*DIP(Nd$!VB5oZ)0ey~hbKgz#jx+Bzu*37-knx-;mfl1_Ro zFzKj$BsYee=#5_8M-_YU)lua;w3wl%voR2^o{{M=xY9jsVTn1q1vm^ncn>?slF{H* z>>LQ;Ip8G1kD@Ge3qFOz$xcLw5-%XvLJDeUXc6^@ybW7;TFPzpPoY>NGzgOt=L?MtBAcV4WD{2%FMM3}|lR@WrnWmlDy{w|lgL06C zqetKpcu-#EHhR!Q2;tZUY?0>C(Vsxz40zRne=sIOEg2;Jrn}+RUcLHOk{d|`c_hw3 zVTNv%q9xFvSfjuK!f6|^T!*nh8CLjf@JOR{LZRtv+~ft#C~l1GG(V4K`))R5*^`(g zxlANF&A6V`(KilIGq4j&Rk?1-{w0K;N0$n){PoB0Za)0m-w=W{rON1Dq68nt>Tr za+t!@39q)f94H-Gi@9jgo9;!nnkfuOi|%3;OL}VjX+mwdO+d_C$mwU?h{c5M9}k*T z_lv>kA5|niO4YmygS#@21)GQu9lZmFq{<*f!jJ}xNSp|*#;8Gr9OA=hf(R5X8TjEP z3KT6;B1R~)1|eETQ5!)^rdf@kCWcUGFTG6!7R#$$LAR7l$d=X1y$jcl@?5QK$hlS>~Cp|Eft@kR#|jK}8If{XSJ+v`OFXT21P zB&CLkLK3Z9efahh?XbRlmAZ~X){hM&mp5DdSc$|M7zK7eTsTUiq1#wo;Y_lg!dvL> z_b5b*9td_sZ@C(w1`FJl1qNEA>qoa^{sz|tkPD$k8Efv+X!lw?9(DIl#CHvcj--AZ zzq9%HZ+`>JzQz!C*?j;gK8&_`&u9njG0qI3ws97s$Pj9S$YYsSgP*oK^Jts*wQc54 zL!B`KMbCY%Q1tSfm$$FNh^Dk&uy83?vl5l%s4IxVXQUT&1aDLN2NS^namE)|pf0JW zV4Q%Yx+I<&D-L}Gz{uH`)ZyIZn>^VF=u-k~#b-#83t}G=jELubbYnRs&Dy@k!$-#~ z^-eqQSjY|~f`P^I6JD#Qi!|VOMAeDgI^~R=t^kC6+tD|u`nCBxc~wU3Ojw6etH!U9 zby9>-LPsLg5^B|WmXklA=axA9z2j!_PQFJ(br0#7;~$#e6^0(Th$;g^7DKkp&}Bl9 zXAB_J@)9vCIn<=q^=NP+hDmh{I@X!{~~zNH}@`FxUt$H zXdgNF9zCZoT9Zx0p-C2ze^DyV@l%N7qK+f}9cmyS8z6ebdk@oMj($C1HvtBi4paARo4HLe z*%=xU7d6-*Id*DyDN&9^S)7mP1Xs}Lb;r$>P)%_4qd1TX`eXw%L(xv8SWMV1OH04- z7bVfzgGwW@qFf#WXt21M0nG)5+dQqL#qNq)*@8_cdx!y&i?R|gigNIWXkO}fMf*e#IPQe9Bx|C=# z1XItES&msR!Xj2})UbjO1PE{HQ&IwCL1R3ivhY4m9P*fm`H<<+ad}60jrytn6ihtr z=)IY~mkv_F(bhk2JQkNVAiCP^IyIJ*YWR+m(%jntF#xoXBteCOp6CLj%8OP-{yj{| zP1Ly($}skbbwv4@fM^{VO@uJ`H5s}ez4)W3%>77gqOH3Ul6MhdywBI&8)ohb*Y#Vk zfBoj;ANa;aICYI74R+Z09;QSMS=6A`>=~_~M{^iMY}KeiWC*n}^xxGPAZin9L#Qo? zJkDJ?|J>Wp-QC^$_4dPy8M+H)CE+9c3=tHahed*L5n+){qDytz11WB+C$Q)UiYP2Y z)bt?NaI8UA3Jp50Ag~-QBZm?8E#Lqj)RmP zZ#_IN_?1LHa0)x(5S=*8q}?Yva-une!`h}7*LaWtjS12S(&JU-%$+o1)y;ri0%#Cc zqpsui=*NI#?utgkSd*v*KJ$1^IELq=BFP0W5o+BcduX0-UY36d2%*f7LSQJgSxkdx z*}`whA+{wik@--I61A;+!3YeowE`a2W-$UlwItGHwB=BsXaPj$ube#dqT9Q>U;NZB zwmVmafoVxoMHy!#v(g0(41t)cBZI&(nb<}fP~?hI%QA$LiE?x>_ffMX0WBL~dkGe) z46c;^Rmdi$j=bEFPD@wi7a9mFh$E}Jj^4NVF9Z?D*JqNgR{dojDi8@KG$$NWBTcG#$#sFDW>TepFNP z^|hI6qqN)?Mkm32Voil8`SGu83Y`x9I=2V5;29;EWdazMJ^;b2@7pTF8-y$p1tK9W z;%nflN^!(66JR5;?O!0eF_wvsd7mWsX_9Dlr7c49wz6EbVb*L@pF255nhpAOUse6c zY2QLrj0#w*HsqBB59qr9_zM^QWU%GTyY#tJY=?AG)ME6R4*670rsoYq05Y`shd|nzP9wJRc$mTGYWnLs;2pH`~#vS+;nQ zuthx9&4^Nc6z$hR_u$}}0&0DXI+6Ac23-xJhaH|MF$YVWqOz{*no)0~h&>=0;7ou> zej0kl8(86ow4}0^+2%an$N~-wF-{;;LDX{+flSQ#d&nrhSa9tYMxZC*xv94{r!A61 z?X?W`6>9C1bq$x7-Pyc=ZSPUbp@0z9na~5N*5Ti=>gBju3qt=ML2XF{f?_NZA_8a3 zR^TlBL=7Szfa_Q0kzB}!3hgDgd&tY(2F#F0KJ*n;B>JxHju1S1bbaI(r9VAC5g19`oz=%^#Qa&Q)hOrNbs0zcH zw=baEHN%POL+VD>vEgz&3XpNTHTerR+%y3g6o}ACjt5f_yt#B>q9oWH5NUk*q3?a* z+pji&u*+hYRf8c;EQ^dI@B@ktb~QG#Kn~6$3U$^Fcz85zGgG2>x9t0-h2N4xplF#A zc_4_ukjD9)7d`hyw{QM&`(cH(Nm=5(?g&QcFQDVq6DAIcBwExV(l~^tjNURs3@Xev zfTIf|hjhejW8#{8X(a8_K{@POJe7zj;v=wWq!0zL+NCHiWyYrc!7L*a33}A;q(t&N zO%P$7jl5)KWHh=lS@*a=;0xe|)3E~WR#s1I>iUTbYln#fhJMyAwYmBOC3LlF-yf| zu%$k9*?B^^tTUuji3N{GlIXr4_R|a)9U)LA5k&kW3-Q$-3RKZ5E(Ke<=Hv?Yeg?H0~Cu~IximOx>;*Rp0EGcCt!>9_|eXC~UZvelOBii<)Y zhvYylaaq(Qe-{A8>YNt(^Ae}Pec>(<#-HX`Vg%qTSt{&+dJ2Yc%Z4q|C76 z(@aR2W~5l-P{jHtPam4J77b!c5pn#}w6b=r%gao`XXrzHLTacKP-ceLq=T&vVA$Dz zlnNup8@Y=SA#>59UDlcsMLKpIJtL4-0wOOjbfcRb>e_h>Ez+6f5VlzK$AcQ%

M* zubaS(5kI*TJdiiqvDQ@dfVD)*kzk5<@M{YSuZiO&`&+c@KmR%p`(X3quPbkA}1QU_x{bU3MM>A&Y)+ zeMcbL*?Ul{8P0nBtSl*k$3<=H9?O3XCZs(rJ*E~bb8>6s)^4Wb?kx=Bm%Q(t=qaPXP5dH32tDcikH{R6$p?_^2eJ;U`Z?;z zb*$0RO>#7>^U-qg>W%#Fg$p0Ef2gU;7)@PP^XgLwqrgy$SvTYN>}w~{vTfE3y#Dt@ zB9DKYpx-v|fe>4Oh#hR`yd@FZ?Bw-pI|9+pz3**5?P^CD$}q?5V|2$fpn-$2Bx^s? z8)4bed5COT=#MG2P}M03rDlUib_V3yn>RZQD=B0uL^!pBld@yHp}6GP)=2x&B~5}S zMFR>?axwMEa@3!h0-~_aRugcbrEDW2Hd-=jQzVVsQ z;2&DjW&1JwW)=verO$v+14fPDE2C|jS(_~hBGg&a&)AONvmF>U3lKFk^jpE#D-uHY zp9@{a?)2%CJGXat9^Cuc_ITPaS4%uaQ`72ls(m&Uzb=O^jMWK7KAd_0O(vPh?TK`- z*NAY&4ES_Fh$f45WOf>*P#-P?6*HcRLu8o{q_j1Gsgvq{D&n09Ih>{gV3iJMlNv;i zke?_iRP>w^)^f(7PMAQ#WC2U>n7^U|~B+o*UVAoL^%gUz(23nM8 z(<`KK{WMAaf^NKLE~LjC(Wg0tNT{J>gYIG4%P#x!h0Wi68vtEg&}IH0f$SyX80Oi& zn~2?#h2OBw>d}-#jDrP{W;=Wjh!$cVZ7)$%4l$Ojdp=TUkZJ?zePnRV6|MElz54-(#o*bdOfbuT7>%rhpHw~M+kI^faz10Zvu@z4B28zZG4D-Gr2!; z$i!;wSk_OW&kll2bk<8#{>VhS4W-!6X5vJ(pQtDQ(1^gjer^iJY(!a0VWLO588Sgf zJ$K&*k|A9nZ>vquA>CN^E?zT(j;!mVYSu@&8Ao9)ntdeTOvh+Z6+IuhlcN}8k!KF9 zoH++sb7~YXgP~Ukrw;f$D-Kok)%XcPbjsk-QTWkiOPR5?4gAJvh@k^(JleIdOMIxU z&Ir8%phZe#!>p#&X97lHoI!|;A%O^T{oHH!_8xKh@VKP3SqK0_5<;Z6DpIDq)X(ED zqD727kR>^d(^*!o=|KrHbiLZq0}zmP3?J8P5MJ|Q3%yKgOYijTT%7dDs5WoP@CWJr zzQxtRN>P8QCwTSc3}}F8tf-mYdAOpT4m5Upp`ABoL3p4c(+CCp04Oz{TbCuS?p)sf z+XKx!M#3H7hg}dNBoQZ-o5=3QP-yl3e-K388Abo0+HBFfUres9$HA<+ozZCgG<}VL z$S6_(jVYJb(+NEss=HBSpYd+z#ATb)RHLtL@SBg46FUF6YFhYvHU#Pd<8R}6Ptm&%fMS{XQK*V-Tq$wp*!!bF?8|beJxIH zQRJvu4I}(Rd;pRqP-C=%9!u`(CaYe`0BPrdEXQ42r20Tc)YN9}B#?86q2Sf}2tfwX z>2o{J-PyhQi_JGqVIgqlj9f!G6-jXgLE6KOyoM{+C3l1e>R&;Oz%x_u)aL!rSQto8 zaTx+icu{CW&#kmNM9uh~0w5VwI#L~xNxHA_hn+?#8IC8K>TFn`k4D`j@nTi6bTk%` z9n!Ie9a1LN9gALfE{FvnNR8+g9LqJrkMLg;K0^m0uz2XyYG(v^zQs3ZpBL#!)TGaz`X zcJlOZ9gFz6GOx)|5npSZnZ*p@iQV zFj&j5J{WFjw2qmyut4BUnBYZN5f5=dvp6PVhihxSNF-uXD<+5nGJ!KfY|hK3AF*2> zL8r0H_;<+VFJR#2*4oki3C5q(to`{dVE&!IJ3X z3w1)xs9Bp5nGpqs^oeY=gBj6+Bszayc|>8Du|t~yBBSUBr%wt+*T3||t;>hYS=N`2 zC_{afP6Ekv6o{hxM{k-Dn$?5Ik#$&OR+h-2RM!Fwib$uCD(w)z9@_hycJv&rHq@R( zM~fgo>8E`OCBA?j3~x0FAp$I6j{wnxl>^BJ>FVdBm`ACy>ye}pt*8?q<%Y4mkU(t; zK&Am$KuUrRD%@62UMK8yO@l*YGGm(Et zxgQVP{>!&%KeXhh=CeQQ0B}H$zlcYz(rlV72hP~FO#DXhwTYri6tyYQvNl^X?!!UE z$78W}x{V-)by@AJ)&xH_D|+^`PwvQb{gnr|-uj(Omo7;u5fM8k36lyvGrFBKheSo3 zHR1%>)gVSFA-=E*942PdfTqJHki<1KIyyIS?ARxi*{8@;UP-~y08VZvqtJB%#$7hl zStl>@aS|#jAQD;l>j_$^&Va=vixsU#`i;E~nVj0@#_l5wnTq~26hPxM7|F-%A~eA0 zb??30s{1V#mSPxrQIj^xiEiXzj{)Xf*fAD{ZV5xbQBO~E>L!Ms9+MsD3GWbxN)QE* z;?siBavsPchL$7vfuW{l)>39|2r-s1q}hnw8$*F3@)QOUCRIzz1iADi_$b1rLCv;r&R37G^D&unY0q?rrt6-rfH17rk- zP+&+Yy$5?H_}bB1@-gZufsZ4!CrVLf_A7Pz(*`GEk+{Uc`K0Qn8DTSa>%r2^KA!Q5C|<2o~x{0BJ~h;)5K zYOym3k9CAShDBG9cyjkpLLRIGp&C&(SQ%hc#ydk6lZzrp;%s=hX*r``ANiH59ueZ4 zkOktZ`GSNbMxm`r$}UC_FlaegfkwHFY~fa(w$Rq2HchKct>{{P@`a%Xez&a4tjJ;u zK?Eye)}_;dAkN#}9F~(B**74hPaVMz3{m&G2%=_7tL4Be86Zn?sNL48$^kh;F4{Sb zfKW@7F#<)qJJ)aDeE8GP8s!MTyio#I>%Z&34SK9S74Rq!fl)%5~oYJ|8-jUX(0fe+?QK&@9vOqe^WZt;eU(C4Hx3c|}|A^r$WL$Q@^N z;q`{C6?1R|Az~+6@z&jkY*rYO7_uCy$AVeb=0!lKji6;^#yl`^wj4G)pu%GzHNTCb zWy=gXv`{zA7TSyqeWR!*M5p-}L7GC**`1xUpMC%33pb7hI*>b)zB3e6zGE+QA7ns~ zIzXmF5*bFD1c^L4LqwQt`7*_6*?|=?Q>2Za>Q>JP%8uEYLCCz>OIz02QBk72n-l~;>^sbjqv%6TSgUeRJ1W9aM46kerM4%@>&FWz29RD7LZ~G} zo{6-(hQDfIT^vd%iM3->f6iDW?JdDBWAM+9$RDC&dgJcH#i@U1U4|TLnPnk~YO^fN z`~3}CU6(DgU+T2C+?J^3ME@N`%NSbdi`HU9Ek|RFCWM^NSYs$a1cKNBB4p9CbHY-Uaf+me42Sl_cvq|3*1n?q)LY=v~(P@g#qee5L z(T?;f)z{5b7()=5d`piVNi*Hz<^HhmhH0z&9_xuh_=QV>aQ$F_kD$U}M0eeC96ux$ zzvK=gNP?)U0Rd?QZ|mAHV!75zNw+xa7zs`R&_a$I63RSWPmEF!S-8wC%rrjKmnl|u zlwxUN2wfJ1uJ9p16yk?{Qwr@HX0ZoA?Avr`-!fY)^mre$?X7MzqITqU$y0r>GFwWD ztsvBhnYm1;6*OycqGvN+_)Mi2T@$(9eBAVyqp9>Q5~U%MFOt-oMVW{DHh>}k<=83O zjs-diyH9#BD&ylAuEWTFeM$Cv@ik2oYkH(&55O!Y<6cg614}8 z4&+CPE!82By6@P&*QaqQ8dsv!_^BUFA{bg$W=2nIdvR-kqSmPS&~o2lP;{^| zTh2stc?YxIzhNQlx*U4l&#K-RMdz6EQ?~>W0BT@J3hnyNwX?f7f4V(e?E@0)v~t!d z*ObKXXgyg(8fYfu5zobk`r7MOI2|*+*Rzq2G)afzNPr=w*lgCptd|xPpi(KhKCU6b zG*TOJD2RFUH}QE3jH2RA>ulI9SX(@&;5|Wmbusq6VX>L}$T^+cT?j#fH^IEQBDm&+ zSXfelD$_A45OrOq+*Te++QK>qJNM$(lskf@ZY7M!*3T7i#kLoQtjpXc|5QT| zG3&@Z(+H}hP^)P1T^?V5S0i3#05#hZg(_>9_*ypz7&XH8KmPG2e;nfzAO3OMS&bC> zep3)V2#DI)X-c7&zVu9O)@u)=K41HY7!6k41kp2|aq`@`(=WLB>CGEgXP0J|#GPmI zEtXzFvBX%@6@5QeaR8(?_fc*%)&o9}8269CwogF?xR?kR+}7(5C+9UrlfAyoH(6?6 zCbhRI$`yZgB9 z@7g~U2}Cu7?gKteR>ZJ2^Hw7$#=BkxhQ9u;hA{fQ20O+ZwEc(ik>CHMIXz}>%)~&BjXS3M=8%*}R0%5&@@B@PQ$-VG6f(KYdbZl5g{!oYOJ}g-f zAs(jhO19{PC_jkNA4*M?Qn=C)slh%dLkHTc7*Ze;=P> z{>O#x>akbt_v*pv;`j=)KG%5D`(N_XotK_D^U{h@M`pFg*(;i$2oME~8aOhDgrc+8 zu6^{IFW-Fc4SB95E>>r$ia*d|L_f-KRGVU}wQMKS?j0h-MqdE|bb^IU6Nm3R)x(zi z5m98~ATt*RF)-u#o=rGK(kS@o@TPV|PILkM08w4-A^nv@aK}1$F=;dqrhSy}SW~UT zi%p13qn!n4xjiN|tPbz27=$#BeRE9>Qj=MsaG zLk~v|xk0r9W=k2>O$dc7YU2k4X(EYU^{sFH0VhH)c|0ID?Qz7-u?A?BRwn z?j}=G_S|!zV&<$M)-Nk5_rO*QbA6z)A_P&WJ1{cfeqU0^qc`*($z)L^_<$#h`Sroz zn?AbXY#fgj|I8(iy2Q31&O{-sf-2&iJI90mW?fwg1c;0-sT@v>)s1NH8K|*45jq6F zqIo`fY-D->J-riX41D>k5y01p^I~(!w_A4}ha7?rG1{hm-O*~nPu-9|(MDfNOsWhF zeeS~c))ou*HU?MA!%G0OuP?pT=REYG{f3Wz#E!2oEh#~Z_)y6#$ zkJ`GgWKct$RVcdlg}uLSKem|ljG<`nnMJ}i$E;6UwTh{A7c3}?R!3T;MKs*vCY>v@eMUiI}u)cg?(VAr=H- z>9z-!j3n1d3rHwE2@t4_=`p{#W33Qms_%InTtz|%#+N%3MWbV^Gm>SUkB`QTrD2b| z=&>Gi+4i?K|NQOWz54BhT^n}UeYJrf;wT99a#JhB4{``0^!2a5>QfJFUik8t@813L z-7hbX#hYK%cIT@cwXuuA9Sxso5p+&@Peb>)ZVoQv$a?8au5Nt(tDBGAd+^-(`_G(F zg&B3zb`kCKADxRs}5-@711|wQ8vJf58mrekJ+0a5X>Vo_n5IGN0gM1+*mlc}o zfjHKdnT;|%m#ms^=ud8Z{;`{XwryYcj%mirG(CK1N+Vd2pd;)chyaoVk>t<=58b*u zzoqedeHdQ5`WBmFa~|AaH=ncPIOnjzMLySR``T!~j^lxm^CviJ=uX1D;l^N~!$5Rp z@~A;?JkD3gy3}<_(av;uI-vX3NkpGNYDAl6iRlpV<=iqSCTDTj5{P|t1hq0^Ed{nUuSi|CLC@`eHPG3Du~ zOj3PVAN0z*w4 zEx_kMIm8f%-tet&Jbvre90b+)2(a)GIAVaB4IV=3Ip(~o_M#ic(m>8neG;|#(;Im{ z$nzDWXTFh-1e}S^K^Gtz(-}w$Cl-map>It#dc%#$QwGDSdc7XI@%ZiceB}ocM6T?p zg;3yV!O2LtqXv<@jYiQc<#fD${hQysEYJ0cvh2@jH)9_ViPeG=HrgzUoR1lBLV%#W zG&-}YZ9~Vki-gWlSAx|bPCyMAMTi_sH3+oM@i6iXuQhck^Mpm`&t%9~Ye45JI&k(I#4P z98Xd`5qbQmin-3W{w_Z?5Lzm9WwvvOY8^v@69mCn{V z0+{$*{Nq9eceEuVmN42#5aS47ja0;`^MX`;Q7jK>LMEjg+!S(zkj{tEh#=#BD9XHbiv<{NFWk=W9!1bFa2O=2iFkj3Ha0zGfhSFa3d1q+5W+)=D6B8g}7Xpdj75Fk4 z@50NmbP%a$S;{GKSR~jG$%MaAxBC9Gmuh1DkQ;z#rQ7Wm@?4XGD2_cM@h1_aOp+A9 zD3U;n^_r368d*ymy^1|%kwoO3poe~wAm9*j-oiLTl_jV>4P+NID~FKRd??qFFLT*t zv2YMFh=7@uSwN_PrIvxeXxxWoFWdgj!q7vvo;<$=c-C|2A_)*sAw49lc-=H^gATIb zl8wBJ%voDYbZ4!rZz(Bd+}!|A=q_PtLrXn(lNF(81wd)pA1z_Ori(6+(vniFk?5tR z62XYYe(AJ0k=?CkRQch}Pw(BnzAF$3J;ISb&SQ)b@Ci+36ak`23>inydFIJ;LeVd^ zxZbf@Hj^-dn`Bm3I-=qneAUW(c1_RveG!|P=#Duw?-?C^x(#8|L!M8doqCGo^A^zR zi+#JXOy52C@Cd-Lr1K(tE3naFU)EW}**LaUCI?`_Z{I~ujJot~MRGjZvC{A+TTHRT zc{Iv(&xvHwV;GH;SRLl}LdN=Jg!kQ41k($}NXduG9mUh(-Sk=mhx0<41kv56ZvI`3 zKcvfyA=j7fbLv2kk3M->OPjS(Bp>6?KlX^NJM&voPlup009BHRu6

0Tf99VT&e` zPf5-PED2o599`;um$!=?nbaMxb;nj@D}}JM(iKT8ESc82E7QrCMSt_P6@7HQrlCdd z^&Ks7CqIstg*yJ!?Vs+w?YiAV6^2f>$&d#i(g+AOK=hjDeDs>?^!I+T{nW$9E|r&@ z;YdzMiSwFV#~yeZ;MXCagMJ<*%E$IrpJZ38dN;8mf5?^8sjV^l7Q`3(gZ5rx9hw&{s zd!qmkLO^6j=d>`luNpF?$IecQb?uL%>&SrRUC|+20wPj#{W+Mmeq&+i)+V|v{M7d$ zhnncAB@riWTEwSzFlsv3A=`dy>rM{Sfdgwwwsb@EYzP2I#<4Dv#PVn*KRV7WMgi2( zHFEW=<@mpoTl7c{9bMTFP7=XHTe6|USm;_y#uI6^t^`tVtP8CI#L}%R2q>yBCX;SC zQCaH7=&{?sxc7sz=(7lxv0u<-;K!7x!Vj|o(JP;G{rdHf?%q@V$E8_HArX)vsMV3j zk)w53>B@RJBZZ1geX{hC9_kYOIk(i-h`B+~=jMQ6rDvd0ihP=>tk50Z%8>+imP>mLt%!?_=t0?@uLYmn-f zo$}thniU0x3?erB7(ceTbvKs@CiozF=J|?H4q$`>$sAl83G!AQAtjjfpEM@T6NLt_ zkWQ$uB1c}0nUX_NOB(sgiu9V0B#i}f1e%qxXpq#~WDI!XdPRE7SF)C=uh8uX)aMP76g}{N}?h+?XB1%%5SouL?~>&QMms zD8+j3BRBd`o*7ixEvj)7Nt>adFtW7Q1^DT^o{U>5UxH<4M~+C|aPtIGN|H_J()+0< z5!g%nsUSlqIOi5X>q+G?EU@sy?^BG3#A4gi=^?R0NSdrp#}{;>;cKq@l0pe-uuh{k z8(O#iitM3iCQEN?ZtEo>l$|VlGOEc%yE{h?0U>f>Lk@Wh(Q{v;#b-dL-l`Fny>(gm zhyKadna0L-S5aK{o0LL9LWocE>e@47&+-y`P&6}QMwGo}-(8}CQfLt>Nr6%fB_%-3 z62Jl(1X3(VP$WPpIK>|vlz<&bYnsHMhE|j%5@Hcz7n|5u!TFs#kBn3P^R{_2aTlMQ zd+z$yy7sUV5P;(c%XF*g9|79`*(85qBf*USD1poUN{LXEFrY206^5-WAR?9OFpoa% z$`i4;PNj-cAxTn63!|p-z{nwVHnU%%mN_+|Iy z@u4R_^O-?KhEABXheU^lHBoSoHphq7S9=dzBOJuc&?EskShQ5Gm&}#+aKDgtDNT|> zi9Xk*KFrbt8(-I{s*Ad;-xT_&S{0~t%2LapEE5%F)n*yVu?CMs6i& zMVIM&a1oR=HC#`WCb(r{ZaynyXz?ki5j_h=T{s5k ztD!IuLI~oKv4WKJvVX7mkWj?+R3fX!@acLB|BT1gcRDbUFw{|uE?e&X>E0KqwgWRZ z6s5LjmM2|7QSO-W3S;(`=f35+OIKbkD*Es#qu}!~+(G0*n~5x4&I_X#S*v$`;9~ zg*yQTT*oRRjCo!<<4~ZQ5kUd0Btcv!qMwvc5!YN}$Mm7Ds#74VtPjLR64CW!O1KjD z2trstA*i0|lc{<`P?9+l?j)p|Q{Ab$%f)72FQ9k($@*97QG%eb>80lAh zvKa>#@%Jroxl~+kkx)tBSd3gPG=$p2qG8ab9a&xHA}Y_ZYiLjAMR7)3O=cZ8G($$jQRh`nhU}aHM*He9Q z@O8)%i|#^2g1bp1M`OlkCNJs#`QA?-`84*-JoHdg#=#KMGJ=dE>1t`~E1wG&eec}w z^>Dr7WmL!F=NCJQD7VEX9e(elm6=krU4)Z>5n2XqByOSjOq5a*c4peLRj4Hf=+W3J zF%+3t87*|rSPhvZV8e0;lZz=OO8nS2)*1^*N<@*6O^J?1S#U-1s6oK)SenG>zA7Pi)DUqXrJw^@ zNuuwM5J;+myM%owOos2@(pO}tq^`0|Wiu0u7(|7c~clgK=FtklmYqM`3Jha`K?PZyXB(7JEtbF-P*DuaLI8|%6 zWvAK`)SglK(F7!Dq7H(alvtZ7)Y>4bVq317bFS!T5fv#jjMa?L=SmQmDT;=+e`XQ^l?(Qg)BkP&hwf1qp39#9cCF=>;|$tK+KNNdR+ z{H!Qpl(BL!nqDat7}SHeV>6~peVrfG81dsck7}srBUX7BCxbtp{~_}b2m(FK zlF0$&b1sN@bh$wEs#lqaEQZ*msb|GQq`=DEq#XZXF>1qOM7cH_+M;&FEda&9$q?LP z8W}j4D;#}MQj!Wd&gaNO!#N{NRIO(EE@nn77Pv#}>irDcuv&7&NUs zEDywl7P9&IGp`qlhN?d{NTrEKL!5oJl$@Lg1G-$G473Q$H5JEc(mBcs)}*WcV0D!qTv$=I9`i zFl1wv=Q`dXO-+30HYMv7op$^*#W|DA&#+6RIAA5+w-}lP9(4?w*aS5#J`!p$aCP0{ z4_A|TlXc5Nm5fu8W?~$`(~wQG1tSbl=ULEYfhVa0p%zaamy6Fc?UngJY&T0yKcud- zJbUo2ubc-&7i<}a+$|djq6<+*!RRd}B3ZL5uQ3;WcK>htk9n|mS$krvR4XD~$n6(F zhWG-*gLfvI#m%BSf4If`27?J*G&4r_%&pZP3rmoDW@7ia03<3JBPF6Um@5+5Mw+9J z^wr^RkI_tqjPznsHG%knXRL&953*XidNBMWy2Kn|jLDzaI7=RuCHNCz7-Fyv(nY17)f@|=|;=YG{2UB2wHc4xx*Q)IjR{mTkX;YNt^Q(NUt zabYntM)pI>8(Jw6Lc|Gcx(=VT2PSQl)*-9Y|?h%;JAzYULt!F7x7Ly=a-uMDk(m~ho& z`0@DG`oRJe7-W11`4a%u?TUvOP(7%HwQCOxX$d%-Ohma3iX#g}yzqrg^VzL^I_a9MRNx zTG|=Uz)O%R_y(~gM;zUD?UWh?As}*$+D2E4w1$Os^>XZf9ok7nA=oQgR|!WWf+m3v zlmvumE6d;`5511$nEp)kw7UHR1D6R_`h`&M&)joApsHa8OPQIKIe|tPa*#;BXMRx_ z`kHdC#*jf2I08mam9ga@vY^(kqm0?GZ%+-{`om`L8398|nAuAQ%vIuuu#O-DM}iXw zI3@g$=$NJEAiz`CpMb#1M5T4Bo{>B&3m1YNQW{01v+F@xIAl$jNH>kCx^8B;{#pVK znOL#UI1$H!6wy>0TBx;a%a`m1e5uG!9F?%15EpeYY1k|KW&#h!t*E7&ie(dt?!a6~r%Fx4qx(u}w23`p=~%Lm zLW-MRQA}-DVCy#26{8z#+u}rxa+Uv`{I50+7Jjw{(JbfQ_H1DcL0W=*U?a zEuz)UiY}XOq?ff(5B!dU{ZsEg|CA$2hB?X@sycV>4I%?5NXRe(K7pZYuGx2`WYMuN zy?nm^;6kk?5VhO25)5N=Q}PTNO8Q52kUvGXqg_E8(Euwj#N#>yv>8wOz|QG=R@r)u z0Tz5Sa%)A{hfw1fE*pVJZMF7g{ah1O>P~x*^3eY`_$J9Rs#}E%{HV-bjX+Cdtm4r> zT#Nn%6X4=bBTOKkobyqV55g8WNgF{>X{2gVx6px~=ZdcchCHBp^O+(cKqLs^+FMxL zl7S@<1S1WbG8V=RJ!AxlhMaSy_Jf=~B!{8~-6_B!cN}O+?a=EasQ9YeRX)}6MBzw( zHC*_bHEg{SV6-zS6O<(b*S#Vk0jq;n6<#9g3JtZeWTai0P+oQ-UrGJ4uA0VIt02`< zXF^_y)molCbmnSXQhGa-${1Q}GO>jN`pE~bMgR8iLo#YFow;KE=z=?Yq8 zt0I3Q)d6TojvIo|kU|6=jYje5>Bav2n_qk{^+Wh&fg)`dL z*R3m;?20KNP6OhDX?3uTK%pI>!_WpF& zhgVdrh&9XQ(6DcB8j9nm6Rwi;TBw^W9dihxO=+BLqbSllJb zY7Z6a*BkOPCL5VTZq>`FECM33P#85q1}jya1R+kXr8I`XP*K*1u%`T)rVsw4euR5t zRR!WFUcpa>65Z712B$ed6hB0@@~02=?_ag1&M|~3gMZ>^GLonk6#*RJV-5-wnTY@p z7{Zp#pQe6@JXvTQva7ggN@wtIj#A^T9DNdGG(HJsb(kno@GogTEosgG-P824;evwVWJ@}5{i~qB(4wtqE~6}t_em?zzRlsO`vy*q+KiAs%WoO zut^nqfG|iM!A0nxlc46BG}JcL9$Nmx_UfsR)JdPKLO>tSPzq9mnI_uX;zW37S9qjD zCLRjPX*d#;LOk5UsztmBO9m3l6r_{&(AR^9X4Thmn>zVzp;8=s3mZV) zO{XzNsWZ$FkPI%L6AG$^@`1aOveZ;6pkTVH*gD;!oI1fLBR*ZmW{H*(fs1@S$+4s= zC~?0^x`TunPm@9+=~NM46+&fun0)~zvW*#QJx2MD6+|@%x?s<86bV0Up{ciHh;{Qd zdskLAK5_2%v&-!oWK=886lK|J6Cx$&^zp6QMOXw2W~S0c0u~&R`p}oh^n}GRf&vtT zkY0tNX86OzlX!|JHV(bS7x-KkwecZyR>C6_R76BsX4&gH4nf0(8lWoEO{hnBBAV?7 z66s!ayk^r_HxlCvgKda_ATva5M)oXqel~S9fE1^`fv(mYo^o#tZ7OST3<*L=Y6Hm; z>p+kW903nIBos@rMV5(3f@t+V#fRqH1_XAf`=B(sN)R=R3L!zqei^=g=D0?~d=$mi ziMoR>A2~UFIXEKzqmP9YGYE5H%7|9DUa)NV8!S+Ys`822N-grD;Tn<#)=WCjtv`T8VV(J z0YY-@wX;GFX|3NMHE4iHhO8>`Jhy)T*wC8fr-+)qRl4FISO1soFiV=2SHSIgS$%a_kgZ z4ivRiBC9(*)W;O@j^drY#T#C};t5HmKzlr+3}4K5m=AuJ@U zjkd|!yZ5z7_J2%w2{9FhKvb`iJ|dH^B&*8Qp&>FMGnU52k&bL4C|Na1U@>Okf@Fta2S7?C&eqRkPU%xy0&z{$Jg^a1 z?ve`EJK{>i=1c}iM5GKXe!ed7;8T`yG|dBCBua@uN0tm!AZc7!)?mar3W;sfks{FM zf`3csD#j64jbBwDjQK+hAUb(z?{^PK4@JHV^Z+9H_MpeY8XXi?jEyt+@yfljFlKMP zPds#Tq!G)R3q%pZEY7fvkHibWx$4av2o zfARD~7}>1Rj9AReq}ee`0*Ve=MJp!M4=Lh7tom{Kyd>ddZ1J(^N|AzHKyL^{<6`Tw z3=L&`9)q7uR;x^bvFFAAV3*y|^fiQ;vDEPvswuq*& z)fK52821LmOV@?S^UMUGMt*xDAq1hFtANP$he7t zv0^8#k;g)pfkSng)@~Uw1sdTW_n?({hpEz8=ppwphHBbnzaPxi#u3{XwBgFeX4aVW z+;bA$lzt>S*k&=W(`Uv}F^BjHFAJtD;~hmHiS~Fbnn)_iLNU1rzk!gVtWofZLa;f$ zgV#;t^65(hLtos6A+wNcKR6+VhRqn4a8V8-FHQcq|Nh=0{bh|VAcYg*9H+-9DvRPq zGRmHmORU}fiu46ZxiM5rbp*4s-`%|bJFL4ux4HV)e(%xu5cuYgy8jHht*B8Z_((X* zQl*V%Jt+`%OwikPC1@+K;@iH{WBY@R=qro&C}N z^D9?CQC=qp}wrFOg3{MOz1I=*@QXn+3X*(=7!g*PhD%b8`iWOb7^#iZ$oHHuma zn=9c;PFX_rP({v}ly*noDq!ELW${oQhH7?1T(mT+y9fVlrdFF+UOe=rm!E%**eE1* zPDDTmAX!su3mOV0vN79d6z$owbpGm%r@eZ1@xdB4iJ}l-p!@`N)Mz2{rgrpoy-O5> zJmyeCHH}9NwZ1_&>?p_?$FE2C=vsK1Aj1z`jaf0lXpgI0$emrHXTZA8jAN9nW*P3i zgN}{NWAGEHA0=4_{FG+a)grH$NhEMF6f26XQ5uX=Q{2ue*EC{AAj+euwd-~HQDbQH zYs0q8%SA+HqaY%{vwel4-WI1`|M$Zjr&hX5GFZTfBx}SQQECl&_-_BlR!E9G$zBH#e~hVYTgfXK6!mxXg(&yfO!f2-JZp<@a3+8y)r203seL zji}aviV+?cqZkn_tbN9^lj<5dm?xA(l zt#_|({&cZ_Xl<=gmQ4eo3DP{$`1NkpHPxtTSg1)3mbcImPFX=$#ENuuT1Xjq^|6ICk&fdXE;0 zgdYkV6>oH=Od7aTBn~8G*u}zm8=XNv^BEN(V>5ml9uZ0K6r=TZpH>pE27*eCUXKB` z@tB4^(-{Jh{zns@MPgA&TAV5_tC4d2w5o*kQ0#$%S ze~O=>yI~SBZ%~m&v!wKx!9&K-AV9<-K9ozMI1p(xgod;BPMGDInw+a_+5G7)5VEeu zbSXUeNYRb(gU2b$jP!SJSrvw*Uqvs0KC)(dpjvNseHB|4Jap4dp0Rk-x7OGHKHtCN z?3xn%64W#?@)BMIsX`afIB~BVYIYTxn!K&j_#w9p@U#F@K^&ySM|3oHrgU3R>h;+2 z4#TP@cGVUh-v8U-A3c2q3iI(dj6VZ@4+sJW*ux4%TJGOV+f))qc=Orh%c*)E65w&=c^{PPjS%#BsGwbU=p6y+-Hr*^MD{Mz)PaBAy)`vO!6hI{h(%k%X6W5Y3W1%j%S~ zRbz!i%wfTn~-~1{NdfVGx_qx~JbkjR;dgnXgBG&QE&A-m}9&^uh zx67nJNX$TLbb*ri2huSIm38h2D`kL0DH`&qsjmn<%_g`K8CCRB-lNa~T$Jr=QdPa# zZtplWf9`2-S&4$!tAP;9&V7y`3!;!jfgj7E>p1MW>Z+TUHnu+h()mLVwzY(w-ECYn z;*Ppt#)Ga1IlEjj)KZBZr%Vfht>_~iMY<`FVF1BR1;{bhaZr2YS|VA%iQ#8^H0zU? zGa`fu@f4b-qk%D{V~Au$Mk0imV}_1Saq+3K_BdGEKEwwd)S=CZ$#8PkK1R|4B5*V( z2r*pv9}j#DOLmc_21YE96naRD15W+zA%=!9MBZM${UJpKF_xai&`mPBHV{6@IxDfP zDgqf1s4{M1)JWqg3Cq(%=$V|KJ^sK~wcekDNHEd@LKwC6)%CyjXQ$7qPJ~YVOcA3> zlTc$?R+p<#1DJ5Zgcb`QI)qT$!c5hXqKa%8_N+cd_|YRC5+0$bWNg9bj$E|f?+?Fp zuVRe?kp+=8wYCc>SN35$HV{NCqiAXAY2WT$a_7Q{wn#^0QUe~Ygk=USf_j%L);ctl zO+?%X*$k3Kk>Fw(<__;dI{_MkjA9|;Mjh2&cZw0s*3Y3(WLOhr2!}KgcqmI8;eoHX zWYI8ct<9)3Poq|SwcA{+k6b@1fq`nuz?Gaf+c+jNHY}?=j0|bTHTxXCG#KK69m5zx zS4$J=5DV;Vlhn8SfE04leh5SNh)X=d%%N(D>Ji$4u+TUQzzM6GICKj8NaiI%f+GNu z<0ntcmTmqL3<*T93nuc)VRVz&=(`W^KVsybwJybI+OsL@(J)?N&g7RVj8x~AIy!`) zRv~ezV~Zwg>ujC932DuiaWqA=vDFbNb(B0^sPunv?zKzIJ>PpZ_KZN|_RM{5$s$s1 zITS<`DB8EQbpG}KntjKzmgo$dB)=uM_j_BdQnn6kva`MeOI)D=vmdg`{#~c?b zy6>#uXTw;n+ZysUjNSIZpQ#0d;%~r(r$)K0HFN$W5l)!e>G2FM$`4DGs5INT4FwX> z(o|XFx6yZ&xvxwLTBwV-kSa0eChL(k81X4-`yAHtlOim+s%r-%Hq zP5EV~nb;t(j6^iN3~q4(QFc6Cjfx>5h%V_2YGjOQ0=)Sk1IU z>`l2$RxoOIlu5(BF{-n+{OJB~&aCWx`qe9Y(L=mI5c{@3_00ne89y8XLRUQt6s^4e z7rl1tgqTS9!JDsjipUt)h?kOI!VUlv3U=wvE^q{dN#&7nE5=0V~%P z@q`C^^}6K@l>M~Zc-X=}jG^seY9tYwql1s54Z+HB3VK|(fW;hDW_{olgS2va;q zx=7?Qa?Dc~IfED-1ouh0GeHE}IIMixZLb6G;4UHG2qFo_0h8q`^Lh6BqeTZfYRa|yFWY#SnHhdI7 zxg<>ou7rOKC=4n z{vlyVT73@B+qfarIPF4wI0qHPC>Vv|ovc^Gq^k_9{Y6hzW@7_I&7&!t$g@O|IXdxN4 z5{+mD+CNZEJbA-&%>Yrmy}UU4>X#g&AXbLVnrH|^fuaEwK|sMo29XX|!9{yEj$QSl z?;V#014?VJVO8hB0IwV#AundeajPo!A3RHe~QnEKPqs5m&^BMWeLNVkV+Oe(!LjJBErVZhbPbdvr`^2_-zanr_&c!#f-5JEQp1y`RV* z(i$;i$R*XR>Q0=QV4nxh8~Y@ zmN{3J>RQlYCYsI`OIWLN%t*s0Bj{K%fI3t?YHSE@SxIWBqce|qX)E^l&HlktuQ|5& z*hA`JgNZ^@>yQ&szJL%YQjyxm#`FHwyL9>SyCtrJk4o(sfhbB^i+<9*VWbl#ZH$&h z^}d1=yJ-_KWkvvsfuE95Tr&v^Wlc$%H+g3d`yYtt%c8ZIK8XC8Fx0p*2-MZn^3Z&r zbV0_@21A3gk3=VZL6(i(5uzA7OoD@NF~V1u#D}QKKidC~hkBZZhL;P_f{Fr0hEC9s zuyp$^uewFhc@z0oF5ddCZ(z&T<~;bty-8Y`|Jk@;5Tt!efjmnkiTHRhGm=9(s2`et z*ylEZ5KG>@Fa(Hl2#JUUqQkKQNY~0yJAO}ZzI^s%nQ#CG*On*yzc~D(jsFb*8787gmxUhspPuHW zQ$R@Dv##6Nc-h23iZPtW}9d}6&vxpLbCC7#$XNTAkoIs6w&vfPLMMmy%O^g~e z5s;L;=1GlY;lxSiuF?X1XAx|y5Qnwpk_w7a{EVU-Z4@Jt5>smaa`b8@^$bP%q17-C zk3!F4e7dD~^n74e7^%n4z)#X`V`y%s_n!x(sfSD%=`x=IkqbZ~P&DM1=_TOEjv=f; zkudbJcdh=ce^5DBIovUE(L-+(Hq}4#kphbpj~xRQv;d7ja=&V)0NC?N6LPZ*q?U{V zL%rhoFdBBL|wn}4~qKR3rU0v zFG@mEbBoe^k|KKc4;eMBDj(zNll^FRJdLxWMB4H@`hVDe{`{~xLla3*M-3V%8m6Hf zKkS0gv!3xRanaRZ`bDp{JSs;Y{B(*n+o@d^(T0WQQu+h^2H$Vqlm=KZz`u!`Rt~8>yKglNT)NoB**H zDN89yI}b>c%{C_t2B2qwzyS#Z&{hxdab zaJ2uNQX!u?Bpy6lu3*LlpM)xXY3#&3W;kU?A~aCNF1{NXs~z}hP;70Esx$QpbJi+( zmZD5rO(kl-y9;Hp|6DosHlPw zV~AP=qZNnnpufdo-a+rm(sh_>QnXOzX_y1Ukc zW_;s=Y-LO&ijuXRejEq)fsA(jj@%3~LIzFeD4gv&Y#n)fEsTc;!#?xk5;Q|dAj+My zJkrRD08v;my;xj*{TrMA?jJne6^3?pV;WwAv}z-Xu4mrJg*TrjjMMX!b8adwQICHg zX4%ZHJ3f?y2nYcpR*oT-sp!r74Wo01-+boq;al&Oqc-0^cy>+px={5zeG+GK{JTxw zA+{gc66vT0HfdFnPo<<&r>aPf8Y7F;5Kb8+bV{FU1M6I=sf*}ncdMFP?U3H!^3l!zP87ly!Q%*#x zrxp(#S_4CK6yJKzk+B;Xf`UBTCPOOK5!NX2q0(oXqh>#Oh(LB;@4Fj1_4 z5c{=I(Y0bAEgkjd+?ji?z3ce;4|@Ge*LIXah-;|H#tJnC6vDbooT!O^Mq#xg*cHdH z3zkM=BC`J~-Io@U2FKAKt%#G_wH@<^PQ7%WQm=X$Ki9=_ijtw`}lOQ1tMPE?B$x-Tby5Y$) zgNGdGi)!RDZy7_xhqmjXI0Oyl>jHiZAraBXOhP$|#6z-Wic^!zjp^osbb=B~ti$p$ zefO56^jM}bhcuH(OQL*M^+WT=H$VPS^H8{EITHnp?BM4_qz(MAxbV96uJz-;>Gd8i zh25QU(<0M|!4E<@+9X zNS#G6HINfy8*#%T)9+BDzF*Om{v4l=QRi6TGe9()JHxdRT==_NRn9emP81hPaNd&) zEa{2@Srz7#Dc#o_<``kZ!#; zAAJu*~JIIa zNlM6(Z9gccQ$iLAIWQ9O#*jq?x`=oraB=tYLjMm3&Tp&B_p%(aq|SLrTdY7&4x%Sr zwQ=3~7yi8W=!H=*Bm|YhF{_Q*q6tX&W!N&+f3%7GOqiW^)pQA}xMS3ba6D&U$N^Xq zM@0;u(`v&^l;XiBiBM$Olagws+00EFna-1*P{b@@5fvjHxE4%8N24^V0-7D9Mjh(>KAgZPk&Gc1WHdgFpp1Q+0E zegI(R9Bjj30NxFT3|GsI+?E9ok$8>ty?0QQW9U{ZYhmWv{rk0PXF){Q0;A8Z{^ewE ze&VF4C^BXgqIsSb1FY?ZHT9XUgd;HH-fGfjExy^R$l333vpsksM>`_dkDmS4DP_%; z+-K(YpqqzWd%bc!ltTz2`t}7BJ!8+to+B^(Q%{13_@WLvkxBD8ri4f^>ynTUFDoP- zWZo7!gb;@wb&v_91Y3WCTOu%+2|aaju#q`wOt#=RK_!{27r4MSOs8(Uv1Ilx=;$^C zg~da6Sg?wY_`W{N%G5R5ug~K{8XnVog!Is`92(}KJn2fLdN{~v5M5A1w+KY<`o?`f zlM>MbU!o#0b3e9#o>h4pSkhn$|HI3qU&|z5MMpNPlktGOHR3}948cV9&BB&#ry_&s zS}=2giEcb_J=TPC}=F;hJhY39#E>B-qq9B)slrT7@*1`UdUbuDT#!_CEA6ZwcA@dMx;E~yofgsSM#qmjB*xJ~7 z@#im@Uv7KVh!#D*Bo?tEf@5ZchZ&ObPWQ(U>!K6J0)x+RQ{i$RNsvb%cp@2p=Ef8DQwf1N#Bd z_16cCuD|~9@zqZ*_73gXQ5@^GQo*Dv5&}IkYBA6OP`Sb=z{Ig7z{srCD-6IAuq3Xv z313WsW&YyH`9m*xwQ?B4xegX63K9Z70_VSOU{$uMw^Vat?o&#J_tR_}3{SaW_}<~{TC4{TnH zAurTWjv}bY2+H@{2{Pu22r(4#p*x5V$#-X51Cm| zQI4U24^R>lJ?RV2xLJ<=TW0Ur(QYeXRJ2taqqvcznBJ)5OdW}o<6lOCUYCgI3Rog7 zTa!zN!Ui&oT`M`hq|ZEL4V0CNEi+M=vsOY#H=(4+p_GIq8AFGX%#U<&SX%TlC;_3u zgD3#igAqp>%(COQ`_%{zBu?a^{L?DqVpGrQo>NWSq2BRzyJfWBBP9{<6t{>{!#Rl0J@DBN#W5;(*=w^}_Hg`H8~j8-Fi{U~ppmAj-xE zbp_se{O7`>hFtuFaaJDGlLDZf*3FX2;0Ie4Z9on8!=@mIY7xe;MvR#EiNHnq5mC!X zyqbu1Mo>=}dT4woAQYY%T^&dwP^9e^gGe0oChgkZY8ZX}1FQcG9ukOXmmX=jbP=gm zMXaI4nA{NKg`Eu6ayNCRGuRS_7JJ`a|K&%&3XH%|P!TJbC_p3#Y0HTSCdx6SEzH@w zwQjin`WsIjxbgbyKZn1s#B8Q8u)O+KOqGFr{MgR28_qb;mzL>vlZQxG>J_PM7hU_2 z=$aV}Df51bm zkksI3cwx_kqCNZeJmp0{?@cY7pn*s=MB3FlCg{{u!srNgQJ^5H>Zb{fj>Y6*y~mIN zBOphU8N%C!4F?}V#hl0CiTWBKRCWE<<=UCID3vjtC<63IU7)Wg(LN)1`lR<&!w+l6B%oQBhaqPBjtHfQcIBAqG?nL>J2;d-d$vav{V9gd)sn9?G5j4{ZLkcj?+u zyH^3Cgu+~)9Ha3ve1fV|9o2?AdSlEd*LK`S8fEFB<6uZA$}t3l45ENgtXspN4Pl6w zb*_oTJqLn{t{05nE&F!r)DLg??(uUc`m^1$)1GQYxufh^p{h4XpbGkjVv%Nx{Jr## zj#|{L@ib}1P&efaHF>VWs6eFoSMwiy=^kNdD^LW0w!39Oh=zq)TtYOc0`auqppK&2q&tKn1sL_dqP-&mf~-oS zriqfi-q!Gih4#Y2!u0&_fB2NE1fakWE3bsLpaw<2=mLTQPEQhyVr}i&dc_+LEt-mo zZ5?XDh)aUnMTUE-oZr-AN5Awqz9p`aF`F^z=$o2*C2g&*XaOaLUz1v+)_Qof4CQFD zUr~}1&vd0<)Z-Ybh~>Gk=p=?0HL5OU^!M<1JP?s7ic#z6T_! zFLun_8|2*>3MI|aFV_tD5b>e6Dn9hf)$jKoetOOXofc{ z@B`9F$Ku=Hwzk;+%jPdXDjEtNde1`@QE*WRq6_A1zd?i^G7D(|A%W=KuYdRJS*KVx zJnht7tADwq*IPI{Sx;J8=%NB6&?Trt>^1cZzqc95fump0ZHw2O^A4`9hIcJ1xi2Yv>k zC@=(sZeal;Z6T`_A2Np4giK=K@DK&~M5y#{!DRyf&^S%--ONiGKK@8}NB95mCCx&+ z`5I?kc{xfA5#@S1j|=gN^-!BOAFl)xhvfI2ID)k4vcp91=W=L&ek3K!g67d%M z_IqADVl@OI>K8j^agY(`9u(P<2}S54(4$joNXASU>YVk0*4uj;H9KAy7NHN0Jdm!l z%ZTeJZ*5E1{NfTLmD|Z-q<)CLj+IysSz-s@2ugy;&Y3}E2suzBv4anb?r9?U(2cj> z2!xm6KoE)7icJd_!@^Trs1hnTm)6BPG__IeI?mJQI*~p}U$$5iANw zi5;a$q@`N4>bHglKREOHwI>c-4XH-fJhPY&C7Cn_;`a+L)aCLEw|(-OTVC&;tI@J5 zE2>nW*QsfH`&wBRR#elu0fTn92tbjx2U?Wni&10T*!+=YRi39gR8yu@%{35J=@^R2 zBUmroVDTT%8B!%|$fjZDup0x;FzBE_M2`ma7V3zmpd>+6wI^MaW<7M~%<7A(AL4Zy z426tGphLlw5g8JMEY?K$_QudF*fP>(VT?3n4UmMPkjyZLso~*E;M3Hgc+me;DP%(Q zrvv8HpEIXkr-+hSkGTJJ4MP%RNH*GLF8Cnn6^8A>q)oL0Z5e&rc(o%os$fl@EK%6nUH|oVn}4PoFqI zps}w6?e$)edQB6p3HRtBiGXy;C6`{>IX9(15jYZHSSVVs9~WZNaNpPTk#vpVdYHEoIdA@ctC6r9Qz*xKt>VxF&Wa^9Y1Xd$+!Ra&RGM} zLqr+dKnSQ%jfvatS?A~{djO$&ktkM#HsU*we{Wl%hE*_RJ)~nJhCq>Can_>;p}Vhx zRtE@)7MWB7A{Y@T#!MgrMY3tSJ#^@zw~wtZd`FQ*dk6n z&&;W1#T+WC()^M4YAK~+&mQb-y7r07f+jME)@)hAAq(0E!K?uxVMvE8+qTy)KEHET zvtFOrTCb6gtyfY)#;8Z>y8=zOs3J^Mw(c!wPT|)a&(8E_er78gc1;wrqKlq@#AE`N zy@J$BV6di0&TpZa%nLeAx6ACqr#?-li-1dLXa`&@KqIuIS|c92K%X?jFinb&c2{3C z9Rfrf^bjY4S0)T0IA)+HrbO04)qScXp?nX$pLW?ZFRbxr37p1)nQ2Hrn|i?UE7IcXqD?~K1sO9jy&hH2A`{O-Fu zPQ4!c_n~%ZE!iqMZ9W|X$SO#9y09mFfAOU=XEjBMMyD$i!U9w}G}P1TG2!%m7=@)k zrw2BYIjcz(jgHn?PM!D9nV)8c4ee%3XkR^1_B_VM7ITV1M`8aL{T-+lG|xzx1|dGO zc8bw?41#vL!8QGSfyvXY%aEMVQYE9BgZ}==_y5U8zYOWmm&E{rQ|kyQLJJv0=%KFp zkS3Oe)%Jpp02>NG#K8&_bH}4dKzPJeS~5Ix&|0%>PA}iQD8(m zAVkE7uxDaKl7JzD2o(9FjN1M;R~Em~*>(S5Ia5u00_eEBHLvrORP)8GZWUyNKg;*e z>JEw|P_ga~;~^9cQFGmW|EjNVGao`+_|fms3qpd8UJ{rP_ZMD%A*YI zIQ?=Rk>k`MJ_Led5CNdrmUYi1J|qkgV+5wwL=qOJG4c#dffnYUd(@KWRXbQzvbmew zGV||8-Vh!7oI%u+gnO?e7_-=#1sHKSNVLYQ1tYw(;NXLym{}V}9Q$v-eZOSKjSH)9 zKh@dQ8kDMWn5cy0i`9lB)zUex5NYXNe8?uwR*s>de9OkfiRSEU4|V=?$4{Vzwk68X zVm{pynJ(~S8srNQx?Dh#k~;tO?!JkwL-noFMno5fDr|irVC}FWGmJ)^lhqhBZ!;52 z^hxj`-{IMgE=*JVJqB)~Mg|T|mfYtjT4Y4Bty>FwE@U;m45&dgP| zRupKgL##=|R@@wf37e>(hDh=&b{%@`-CHK3Rv(o`(`WYM0WPNSxhEE_1#Oy5f(Vx@ z3cgNzY9##27-0Dw1Y58vhj5Xe^TW6%p2R&)zw4|g<>E6Q3kbCtg{EnGF@`p(p*2IEvlvCOB0@mW-JAwdEI`Pt+F}S9M6oS1h8g%a4_-OaPLiLFp;<~(?3 zhnRH*i^S03iWE;pMIJSZu1%lojH{MIPm#3?iM-!Kx2!m?QRDB0Xo&=DHJVhLdbvaz zgMvkO+ZSL(HdA3)E4OsKx-yoeW|_sVlt6mVJh)O&n|^83M0Xn)OhG1mBDoVpA3(jn+gVO0*6`5- zR>hi=_!zHr*2*U}&XLX&u@*hDf7b9G;Fiu*YP=y{^o@VL{)#JZx}MGFER*prqEigS z(wxUr7i+xQsY@^Zfm`qTp<7Sgb*m=yUCKa+3+}r3osXXXuj;9usBf)oRh$SmO*+hy zLIE{1qpeY5KpEv?1u2(OE0jdul>%%GnCoJP@Y9&yC?t^z5w=YicyA+!b}V07h5%-{pZ)9m-|*^~iOSZ|Fa^Dy!k{%LJi^flH2G{8DK}8lM^qu1k0BeKh6^$JfqpY` z_!)jD<}@n$lp1Y*Ai3`>$Cy%cUDgZ8W zdiMMW2t^L?y~C1P?^1Tm;qc7AFa4%+u5cn_s4snQxjSHpmk1H(bwW`>4~4{8PppTW z^jK3yKxhp`S6+M7{+>EoTzOM4+I+l_k4jX>5}L*lYqv;)*v)UXs2-$Tjl2lmj4lcB z@J^K(4h~k&+SBQ-Nrxjo*GVUF+u3vO>_n|u-zsIK^cjqKC?#5hC372orQ$%50L7eP z2d-_;reQ}y8h32Sv+ByokU^9j6c}ywfrz=h!#Q{>#ilr7*fFW6f~T9i7KAEnvK?-g zW_FR9tX-kcb4>@85u{_8{g>_Idp`Ke9|}aL>z3BgYWOq&T64>2mQJc6Bw$FTi1=lK zM%6&FPv+WuaW&l{yrVwpf+b#rm!A{dkRF~6Z&JJKR|OtRY|BEBC~3`rko8dPn8gxP zB2e@Il!#ZtzeoC1kzus|DpTx(2Nze4z1!*3=Cc__U=b~H{g9?eRjazvmQQ1Th!Zh( zMi3%iKbf^ zi^lruiDUy@3qp+tT{RRsmi1x@RT(>`cfGN=te`NSSauPMw}mgnp;UcMI6!#wM2E>c z2aZFl|Gy9sI5LVXak?yqkS`%lz1+hP_H|Ms(88H{51U;bS@3^1j#y532~&#Dq|sE7 zMR&rJhHLHFb8_kPKl7UbM@dp0Dr92*_2G0;=7 z>{Olgif3nAQdIlg&!L}8+<*W5Tes>IkKU)O{l4k?{c_64-*bFGK%)B|%;1l)o=T@w zizSt!c0Y6|4r#y8m`+C_M+GY;sGA_MBOf-d4I&8;6}7CZ9C?z4B989VD2|X!qYEfZ zWO06T%IrFF>WrErK7~=~Ed7x6gQIwR$5hwW>@;=xiW!kH`z2$D6LF9cv168q51pYn zwFnU)vPyJOw`VxWeziTw)qBUO_#PoPEtXHfE2kf@G||+E?RL>qPi1y>{^rQ)7rzRC zlELa8BU+JY{+fo>3+>Oi5rC}A+2g&P1z>ZnqQ_#*UTRunq`(}WfVLhTQz+MkW5hZDnliJ z=tk}r5ZYP?O8V8lt^4lZI;3BMATc4SBQme(p^5xN9WT8$T>&)4j7|qonc4 z%Yq<*34KH>2;h=A3;R1$eQ}~mRF80E6~tzHb09fu!Lw&f{#3YU*%L7%#?w3MIP6pd zrADRU$Ro;>dR=~0zL_wz4n@C|j1K`HopIhime`j4`g=?N-r4-Zz25R;vy`K#Qn%<9 z1+koO@s?SXpioTikJF8=`ho8CCsIyPB+{o5TU*ZP?)lczswlOlStXT_5o9uC_*hJ- z1A=%VF|9sCm@yzI>71d5EIng}CX#I5zFq3*svS~B+xM@mD!+e5VScumR@g{KI?b5! zvt%PK_lRp!|`28tA6BQ1sOCnQ!-^ z)$0rympI5M1pQKM%wo@;!w~x6_m=*K)&rpyQmA~j@l*2~`WO7fw8 zTV@a$MK^7~^2&pHCKx@ve_;Vf?f85nnl7r=1ZPrA2f+ z??FJSc=n=~;Nzh;!H=~IO+KTtXdlp<laJCzZfhIHsFeXR8Noe^ww-ar&mtfjdhDVzh`$g;{7CgG4})IVMqoM$g=G}c;zuUYP)96 zT^<}CI;Yj})K4j#9C*RJL7395OdcSm#GR@3%d2~lPv zNv@TY51$05It7h&b8GoDIHmaFD^>-wk|^Qge!AhkQI9U}6OkXlA}S^PAcH2gA> zu=KvB?CMvt$<2#+XJL;b@jF)XK!KcB4cimF{L<@$@%de#Mh-+93;8Zb%VMuKKR3d} z{DC{WCrybY>oaBuj7aF=4CZXTF}vgBk^k;$HXqHi8f#{}P+(HDUyr`T1<<>|WKv%+ zk#jBvhd)xYf%{|e_r zysEyRPW22{Z5kIJ4_}X9*LvpJ#nU7~yd5pw=bcSP{{OTQR4#f92+j~S`aNAFYf+}e zi*oAF>23r`J4HsBmmG$Q<~GyLl{ZUjBE1AG0EB{tEqrvN+BX4NU#wx$_PMtSA>w=_m-AsDh~$|cu+uaTk;noLxg@51xjTr zGFdf2M)1L-&j>AbES`{5sf&i!r-y2zl@wXAiuanypy|g$(*g|0OA)wMW<CsUe zGZ{Pdkf^p`LzT7u%ZXWwP&N8CFG81{Gw{4dOqF9VRzj1SK3o*ZmVScZx{46vMY|41 zG%4RdEL1dz;MFtzATo>$Bh5j~Hij}Sljmw0@T7r`@@R&z3m;hVL?@s@$H0qJ5d^ACid0Ed7hLKU5hGybZpDi1 z)&%@WFF^z?<|%2t#97uIL^N4LtW!d1jl%3KoGCleI%lms);*#&73W1j!rc=t@dzn$ zrV3LKLIj(xK%>jv8bW%>gJ#}y{Gl<)!Y2|$;J4ddy1<-HV9J7T$1smNqSg$&g6 zF}0`r!EdcD#C43;MG3KImY_svA(LuLqKarkoH0qPdxXeh5M6P_6;B5gnPLCcL!F1p zg`t}8k#dBwno7wl)4gz|3(v+{fJ^<$`XrRJM3z~Xpo{~aI2Q`1$%P3ysG|w<>M`7Y zK}rZB<0>tsUn$X{?}#|ckWI_XQ4}KEM&SpLnb319I`6SkJj(7H1W37B@G9CZyiJ7P zlSNCIv*{=&RaDY&_@IekI7+peZZiu+W?{sps=$P6uPk@_2fw4UF|3<58Iju1zof%* z&VFePM7m(lobCWXaXqXdB#G%zFIN5Xd!6p#!-kMcs@tJG$Vm@)8Nq1}U_?P}F^KUW zxiVcwbxp+72{=qK?j&MvHOmuZ(1vr&j%=fio;tI8#+yqEF(2YYVqLVsF+L1rQ-O1r%++V*kRT{QXUZLTe(U?8gv!4hGGz4=Aro%T?E6NzKgI zI0R`-fybt*4^v_Os~A!yMQ1A?Ak_@5c2*-$BZRmTe!lK>$c{-JjmmzZa=5K4Iy7o) z1(fj6YIUimnyi%`)8;`GTaTTf)l0aXw$@7V-_#ee_E=TMp`RyPh6+;XE~o5=7v|AI zM5D*BjT=14gNusJau9?RUlNjFD0zh^2Q-|pJC)cRGvtYKk^iwO@K+uiG~ob5K>Y#)r2J}cqo9t zhLO=M{JJJxWfNEjW_f{1dMAb2!T9MJcOuz&N@hJ%pN1jFuR{nMTxDd(bg6rNm44M6 zS}O&Sp&~BrxQm@MviYK~WgX}uoPH*P;H?2A2lnXHaqbKz94=2+URHSWl1nf*s%nNl0nA)w5tKsAFDQkVhHW9 z8VZuIW>!TXGmJ!wwjX@@u|vwJX+HnF((LN0o^3Q+AccPXJO_!is*Zf9E0V$r;UY;` zdq0t<>&N4&g)~|-DD)UZ4JqqBG41{_n0Ez?Y^95gb`2iXNYRgiwYswQ6}^+37udwC zJxHGga8DhWG_K18l8lvN#{jM@H730sCJROhweQQi?%4%Nw+yQLblpoKE4@$e|52! zwRgfbiJ_Pf=?D^Qq5d+Wem=AjLkIvejEtgBeC%n$wo85Mj)CN^5itJ7f8%WTEg+7yr~UNzRg>f1=EC& zyoaIYR@W3^Y{i3wrFz(6Mo&IPMXqurSj`Y(e0>*NrL zId_%eL`QC}W~3g4MeN-LIe;pnD(pNDk)coxp{)@(-J8@z!6`$MrVJR}vCc6AKL{xL z^`HOglSh`mt<4>WnN^Jy6No}fnc_ylH;Xsj%az9s^H5I~9uMb8f#B3jmttj0#+_`N zQiM2p+N0368!5`ZEPsFIZ&n1O>s0vR=CX}?h(lkU%6D zed7AZdN4Y8Y~eRHb@r48TW4!pBM2bPfsl@MeG&IMjV;HC^rN7Xtc_9%8;Mb|b)kvV z)pGU=glNo1h8TdzusF0L(GX*)9g2zo!BAyXU9{#;?s3H?bR+M!e7&0NFA}! zQ#evQDfCd2Si#}X_U}7$(?-z=4F_4Erj!KIi%%+n=NweyhvMcb@WUELzGO~1u36O6 zM7yZbkAsb5%=BheWG!?Wzl;}0jG|xv*_T&e`7f1~-Z>?I%J7MF@fKhpp&}I$vKUv; z?hO*IFw#*%ITrETs9=fY;ujUa*49=zX!>KBGqh29vZ|UDQKRKeoteL0Sv@M)NEh|< zAsvEH0zn(98TH*1V@muNMd)qc;gG>s9$CV7c3 z!jIFlQN%qF2P~(|iT2=~#UKJg=0i!}J}4p`v2UN$M268%zxU!R|8b_K zk1e-Je%KdmQ}5&rok4?+LH$bn&62BC;4moQOk>)A9s;ozn1~=L@lg-z$^2+6 zsfqGrUe!iXZonn1kgyfjH@cyn3#2$hWx`GUj(2##h`c%9jj?1ui8ZSC^-L;33?js8 zXnhqLuK*yO2{pt^^73atD;%ACMFkb&_th*7)0B*KlTq-Exh!#U7fQA8yYWdlmz`l<^zS39!e3xruf?0im5Pq zPS`lFVY9Ta%GNZ~UL@r4u()b&W`$%EW)`4KX~uw3aL1%x@Y#n%bL!L5TttVUJ1Lbl zN=13Cr(OaSQEdUdt_KETJ|&jbI(sJBMs2^D+OvpM25EcN2zd^Xo{iwsPfcOFoGN2f zy;)Ki72BIuKjnf$jR6|i+Kiw%)aWAZFCOXmL$ivK@W74vQg_P(lf7W)LAD=rf=A%%^YpgkU6v^vK5! zExh^VT{E?L6}isM(k$De*cBT#4vbhy-r~K*$;Iy!$~MWc>jz^%55oppLg-og2c;x% zjAGQtq!D-2b1at(SiLq{quPwf5-3yIEQt?29K)Aa@R1Bim=}xZZ_)(zMp-Mo=HeHJ zC5{-Di`2UsF69xIhX_g(qDsoxjKDISE~f+5TNl{y%D*jsRcNz~ZlL>*N4{4W(y-4S zig-_yq1~~8RmZXhA%W;+w9)D-?|o3KUSG&KY(%g2lsYZl%YjjyqO`z0xD1gbBBvz#s!^WT#Sr2ILK_*illHMB z0n|^bZ_){b`iz+|6j1aTq3D*+^kDSZOA7~8I5+s}Yt32Wj*W&m62T&g76$1Z31P?S z6uA3>)#fU1hZqw3&6{vKErB`D-ulUzV@9exHi30_7DWVXCh84NcT4`5cMIxlYN^@MH zvVIbIREQOpTpzccO_EvjRv{(42wAHQUl>Z}%T$Es&Y*P&;;;lIc`P}Ee`rgtj zieaB|_yu(^PK90os~ze;&cQ#kX{Ftai2;$YO~R}=LK3a4yGa3xbrHsl68!>?PWQ)J zIhC|wDr#4A&mTXd`yGbSy4H?fCFAJ=j7jg_Zds_T20?3KMx+O!1d32Z(nO#6%q_P_ z8-46!hS9Nwf7spKQxJ?&QZnOBN{Gf4J4)-0z!3e8)Q{x#mQT@8L_xn7TB}1%T1mHF zE9LSZ(jYn*HH_2mo>y0c0EICZD*_u;Ov{C@jY7a#5!~BJMg5uB5*avjkdVaoSentM zg}rHgNY)df)HU~&Mpuf@xhbz=Cuwj5oe^N(B1G3TB}#} z@SZ)d{K>C{C7r#zyDXCHp@fRUjtsHVu+n7>QiV#0poUm-CoHuy+Z^6-t; zLjgp+0Wv)h#g;6#WdT5Oq`KdhCCpjEmL)MHMfB-U-}32O&j0iyk6GS%Y;p0Q71|ga zoE;r&Iuje4t|sYN4c}yioxL?bPuZlk!e}8Th7KXR4;eHHU{fURg&6s1P~y5Wt&SaH zlLQC3Y18=jnH1CsI<4RGL_nOV)m+(_A?cAK3V^@wSu@je`ugHTIeTL{4QniiR4XCh zu&IL$&Ytl}RlLW2!fSOfe~kA5LXLhe67Dz=1x1|~|LF%o5g6K#xbI7MY&2y_MHFBJ zHClSAyJz6=mNuvbvJn+TR!Na?4|kS5_CF#JSK&&oP|^>qaZ2<*VgGcNO8h_x{n8X) z>HC~MM3Ewp8bC#v$ew>&U3iIRd&>q}=1UJkegUA3zWq8yge3J)%!vYwZaM$_TY%9g z9((+WgA0pa+0{L3esFeTXna;6@@6teqf^AIv$|w@)F{(R+t+X6%;(*zilRB2Xk$gn zMusSiU{r3kf}14>Ojm04Y1t~ep#>RN%2G~UlM%zd)rnG)C!@$(rVm5M!UjPuPr(`> z5`W56`j+OUh9eMCX7W!|mNCOaqjYE3v|46PY*t?jH(Ppz7OKHesUrbwXJMl-?G9Fc zr60qW8^g!v_jEV? z_R7M;Z{wB4dHbXu>d%%X3y5?ez8XMWlE$nbLxzwsBouw0U}Sal_#f?9T>MIBX7lrN zDiv}r4PG5afufMA(nwYBBN}Q+HR%9j#2ihXQZ%ZKqf8hEBPuQTo=jN1hQ65|BWp&h z*=WT&Wx7(S69kf;G2a%?W;mMgByuf-CO1t=OsXggq(=FZOqTS%UVWOV9nr>76xYdF zt&BKRk2~-1HBb_8fQbjDQ6kn08Z+C@(-K{*rWu@fQe;gujB53NU0P((=( zoezvY^N4`-$m8!^wRqLLGg_oJ*Bl}fqQnN3`=A8T6cIt^Y(0u$FGrdO%S3rCDhks)~|+= ztUAbVN*6KZOw{PVyPeI;Pi+>%_BtWFGj@uiYDNkYFzh!DpoPJXW}8cYZ^^dtRh2uboGUh5(BIlq9A4&;wDk74DNlV@~iGA|6= zD|a6pg)kyh8%82>H0=UjevxcQM3tu072zTBA0;6Q?Gr`Z=MC$5&9Tw1iq0ZCn9u0|0@lFj@cnHO(O!1r$qB_3kpIp zVXh2IEkw?+DI`fRkxPlJiU!)l9)&*nf@+EYoIqp0?)dp%3nm&KfX!z{3{Fhy?r|tEzY;bW{g=@ykZCeADb}_eU8lYJG+W@wRHX88pz%v7y;wv_jm zd&<=rYRCU^>>0upNdSn~=}g(0U&gTpA;dd5-uR+m^c|f?msLR9)|f5VU#qw&_>&%9 zs~2T0B{<%2N!*9+c_K!*b#QL-`36%7J`Io>vTx_f!j+=xIDETY7|ulV%e!&Z7H@dl zrfe-X#3A^^rmVkw$P$yG1c)jhS>3i${WH6=Fc znTjx^xQ~dDH=RMFFj|y*K%QXSHGv9hjakIsugM5D*UVXMxJdl2R1STx91)4j5)nPN zd6e<-ohtK|Ln1WLa6W(=>7UR^=vcP^Pt~P}k&|my zlmZ$Tc7&{OEvgbv;}z!z?WiqY^cI|YT~E*kGK3Q11*1+tXoDhxRDZgkQzOBg2}PQM zcI&M@82!=Hi!Z&e#_PE`D)XE0AnX}-j9%&q@FaMRp_-^(^zgG3bww4JHEZyBnX&YVm?G_qRO&!9phtT5#2MKV2lc3TWU3GSSm+z20S;pKK4j7ZZEadbB~p=pqX=s`)u2R)nP z%7_s1WyNv-Ac(yF2#p25b^uBIsRX?$r!$S`H}C4cs>JMDJAx&XBrz1S97&Jt|H`^r z7p-GNx9EtC*_t`CXes^^kISTq8y#G{L5bOO=LefgwyX661Wg39Mt6P|5mHprCX?o( zS$X->6;c?y9Dnv4B5@+Y&ET1?5FS$Dnj(tiSiuC~fG81ML8Y-uL{;pWGn~-H}L|B~2NZSQquNAjsxE%e8q|K&GME}cBwnR)8?mdtsR z=V&MqC1{aOQu707AwpMS@gj^TqP-XTj00yXB*cUin%-sdaLl3-`|hf`X(*iCP^(=d zUzk6)GxINREMYVyh(?(?EP$bc25vtn$RhO}e|WBru_*^vRJv-S@^!aSJIClAe0%sl#V={_@D5 zd=J1u5pfto2^6hYL$N8-5zGH#Xl=QaRMF>u?2R{09yz(Gv+D(7*g2}_Nc>St2a86H z!s}$bC^M3BLIkxaC&ZB3*K@HVLI7eyx)Lvv#e;}9v@5YJdnOdMG_GY(iPdzQ!yu zMKPOJGdT5PLvG232DR|@Q-NRWYs99^WscNOGuC068WH+|JRDt`hj2hfbXky_uBF70 z1T)u$=&qgtcT~^v=VMe#%8=Pig1h9tv+1G~xnnEvQL^5!3&ACX@Iux=^&caA<`z|7 z;MWAR_2%XePP_OeNN22v z*7B~B2NQ%Swq>z3(==GgT^C&Nq(P+P_Ql8c%{(-#AhrWXEGnl!AGB1s!R-oN_X3r@ zC}W4VMg3rCR3U0wgK9OJ7WFFzX4PfIl-8i?QT49E20xOv%8cq#gBU4*Z9XK_)x0T> zibtVHf$v)XZV$)*bM6bwotLBN9i8M+KQ7KUR^ZV@UZNbk@mZ0{k_~(z-odE%mzPx} zVhO3UoD@n4!=~=1@f<70%O$+BGMp$v84;N-Lw}@{B+kM%l+|aYJCx7;imp7^*Ftjnx(F-uf zq#6vp5i6pwY_lhyM}F*QPA(n6Q9Hh+H&9KbuN3WjJ}AzY097p5`HUuEJVjTd73t8= zm2;xlST>ovBA1-B)_4TLjx-GqZ55TOLQ^`OYAomXbkDkB;n>@n!-zdw3r{DiKDbf~ zyUs1s(eK=wRZAYkn8mDG=1kHvX9m&5cU}Ag7hGT%Nfb7E>7O@sb}NXzZWBRPM9n3!6;-G~u3dr6pdtZ> zCN(i9^XZi0jQUZkUX$XgWJI|V<*^NEcx93z5W|bSH}UEy1gV+SfVa$EGd&1(B02|z zG`?4fvnlt0_x7^yEN2hxPF+Uyzu<2ldGWJ9924q2Ruy7ZBROss*9M7W)`R z3rAMvsBKyHsC44x@E;bh(cCd5%_+a3K&(1ch49Xpkje3AT1S5qC^vLJ%cgQ_Bm^^idn zv`}o$0*G$aNb?`K8WeOm>hCNcqHMw_NB;w(Z0c+nwD)wOy`@gW>F zG9Pl|iB~I>s8oj%t-=wLLNf3mx%?SY^RR8#z*f{FwXus<@A6 zQG}cC+{QU2VJb(KtO$01RWuoBcUoGiIt(`oqTfjeVdgVlQdS;sjmuYZ=9J(n) zr;`#p^?FYb>J8Qm1Q|sS!iW-_C~3k3NC?!KuezI{Ke`y1Ubj ziLIhVV@+3!=m0_@$xSy*d$#ZgNQ7jV5dtY51uuH9Q&Q662EWuOjg8ICpVi&UeI^mTi>(JIQqS(6! zM99T=-6af(6)9i@iY~YqRrJIYmp(%fTTN<%gYcmV5o-!Xnpu4iMREv>4)a2gjSIiVa5v?cI zvOn~Oy7I>LAAkbH6rs|P9S5jYa1_Q#Ly|lOgqeJv1L3x7`)Q&cp%YezYl{>ynw-%P z$CFI3+Gxw{l=)%+C4GBK4?po9g(QejMuL$pZ+vle>EC9$XDq*vOS{#T4p_(!etg;G zi=~19*3f%dxeYyZA6kw)nV6H_R0YzrbwZEmwGtp{fP^|KvvWgZ#dF!#{Bcdl`sc;P zYp=(8t!3-uEJs|#xR$t27Q{+GkgD_J5D~K8GXq9gv%3Hh82W*|7hG^+?~_82&Sy?N zeeBTtyR_zE?(Di;e5zp4HZ3V87?aj^(9kgz`D}W)RlXK_rZRnYWwg#* zYy7U-g#G#HdX12f5vP8}t$H7VZZiucGs+87X+A!>t~{1}Q@L33{?yQI6H(FySPBtq zd%bv3A6=>MfeP*=HG!hgxlkZ`W$f9Qu@k|(=qaP<@b2yhmyW#rr!jzDwr9Vgnwlf2 zhx!$fA!HCGwj@d!Nq+Rl{&sO`^=~@eGmf8E%}b%sC{%E~TDp=jn8QHq|0`j_iXK@c zKi(>6xLT%CP5j3b)f{yMNjXWYkz*fFt|3*_Q0QoLbN;wm9e-nG@rvsMhL9wX`W>@= zO=J)SF@%yyGsJ4h5_ok_6YUj{F1YPB!$`9C!1iMYzcw>-?x29v1UsQHgmj3KsHh57 zE0AgxX6Ucx2heJM1Lo|dlLdnnQSc#$jb0BKH~jq?S1o!T$W%^Nk7`11u_91| z0+?ck3mxYi|fy{1zY-Z`%CPqeqYa+>id~ z!YfC_u-k{rH93Y98?pE(XEQa~(5N)RKVYUo3RW1BBBFK;FP}&miCQ#M)ir8*$&$6X zQS4C&WUC9n(ip1w0vWZ1W7~gcjT>E&G-K?)bPa|4HT^A`C8@p|Kge<140Q5F)>(3I{I^W|LE$G)qA^pmXB96lguzt__2_y z%G+{1wP1vv5kMkxvBnKdy2uc3C=v0ahV_p$nBKrqxmrr6F>ft%q;x5(5YtfM`OTf~ zH*Z+H=XZW5_R3=A<9B3ZO}^z8#TbpDK2wGy_zBxGFld678^%a(WQ8=^tVyuKnV>$hWHjlM z(-0xDTVH_)ky=a9q9!yB(MH5ih0o?1h|)N`WVo?jiULt2l!4FriDV9qqq5)35)Mo-=Q!kwM2+_azW*F-)HAP$6jYC|aL+Jm4s zlAAU$WX6!>ZZ1+rNAJFS1%^#Ie;XTC5Q$bg$Jq@TSph?_BFc&6#5)rrE=Z#XXK`HG zCQZSkqK*>SQdN9f`+xQ8D6RJ*ciHhMokvJ{HgAM!I9$ zo&dPT3Tl9AXVMS+42F%cCtu??zJ2n=&x#Gb{IU~eNr|IFykbSvm+FWYO93JwXl;Ty zTcg3zQ5|XQ2jcL8{ zh(1__f$CZ1IO>{&io|WPV@oVZsRLRIw2&$xrS5Llw62HxXqsV>n`h*M! zhC-eN47sCGyMmT;GiUzek@uvD1fjklHP?P)mLSxe04U*?CG-&XEUbZBgC*b zAK#+69|F9tf`*}!6BQCJ$jSmH(K{2s2G&kbCAekq9w1d!j8O(GC*6|+Y^<&yXXhrF zkZL18Hb;K{{e{K-zk0p#qXXH{)X~qVgBIGrhx+x9QG~#V5{l?H*5hAu*|xn;2u0Ym zCtrH$KOgEoszH5T5mRrbDp)ibF3qu4-@VAMPEU~42UlNHCR^uyh-Dv8Wska|t93#O zhZ$<1$o-6U#$Pc_ovJqRApSsiYKdi7dL_w;iVP=$T~j|~H6IbK#*fh>D0KvaBm$J4 zz^92Ep@PJca_mSb7pNGCJZm;AxXTG>9_g9`PHIMDwmAW8F{7W}poEeTO44c&w0^&x zwNL<&^^h>6!*Uc%5yS4zX5j8YR~G+VJ~5F)3|Fg@70 zCh0UOD8+t&6R4u8sF#YxQ$sU3*~-!wT@`j@t{R}B)dQMMC>x6x+V&MyN`MHgOgZKp ziR^%x$dkm`zE~hvHXtYwG4hX!!i}H=tU~tH)z<2I#98xd{K7fBmt+|1tF_x`B4J2p z`|wkp?td(;eD+TP4-*^u2@&?p2wLab2cK+>5CsvTM$ke5Mn|QMjv7X9-rLpEWQ3ghv|yys4xWZiyS*_c z3}u+>z~p_|FyfY@{()2%KuT&NqPMJOAWTF~5|`Z-MS_S(k!NfWWn@mHuEk!_htTCf zb*LdKxW{XXo)6OfAr&WA8y}6Ewg(D}8R4d#Jn~O}+NX!sdu9L$0CDuisrx;%H4Je< z^cff(U0wRtrtU){%UCJUTr};0QxE6x<)iUpaopy694%bJBNKXz87&>>Ni~<3U)Cs* zJZqzkTIAAL;R-!!Xku`o+?suLZcpbMZ!H|V{xhaQL3mGusC6d7LkSEeLDcV=p@$qF zGKAJZWP2vjUh3Cx{UaN-0|y>?>5v??v&s#@sE&t5lk2D$5`*-?jH~qxpbC+IX>sP1 zH4V$Dh+SjSG#ZUIM!fwLB*;+$`Cu-}=c%ee457LXp^SQ>GyfMvWF|*k_aU^1Gg#5k z??>eapU_mHGpB@#qBEq29RTH+b=lLd;Tpgwl!u0v4)BWW;)v6qKC-M?H~O4zU@ ze3ERaZ$DxJL4i^SLS$R!VT%v5?f2n_kN(udlD|DFhOH`*aiUTSWi&2mjkj2BpH(kvpKBOwY^q3yUokTMp|gy=i)7{}T|iWJk`m>k?_8H7 z?LvT#uT6|FX!7*g8B;LJo+2?)09=L-#FulGQ^< za;p@lCB{$?LFnOoyfXUXqk293Q`h}140}c}S}v%dPCV#5O-q}slA9-bNJUy>vM8O? zkU!SI^&8&D>{qz-C(DvvnE{lH5Gi9ukkQ7>LB<$GKOq!JgrI<;Z8|Qz{KS(_-YOV9dElAH zUOK2s?0tiC0Y%a{&c%xGSRLs?;)69WV_WrJfg-u#j08uWKw?VGkEClMJ)kX&dmx>r z2tkN~yWb3+#Bl3Gw+9K>qQPU`V%x=T13qgSv2H11)P*0>^x>KNRy`iHQQV@3a@R(f zmsarLjWUxILT#kOMUgITG!|>Fb5Nw^ftl{VFRlLO=l&Q3_0@eO{CcDhL4ABkq}s;J zV(lX}gEFmo?jco(T{hAn-)AZ zoO|ga;RjNLaU2(GYE{Qum^E#?&u0hc&y~M_?C7<>`iW2URpZ-`#cZfAZ4ZF@YZ-0J zoOG3#5AC%kvY?4R87L8Y$g*wQWtU%gK+H&T;J`CaANtiV@7i87+^#;Hj`ENe8Rh~~npP*!qo;LeQ8iIopfwOVRDA)O$c)Gk zx*AgkBeEI_Fxn;*ZTra1j{u{+6134f-|Xy4z1G~fuTK2hCaf~HRS>e?2?wS7tgga! zV>FSJ5m84vtA&n?S2KFLtVolxX~dblsyZd@1-Z^Bw%yE-wrzeOQIuAqhKN{+8WQoZ zgqkA!QxHM#3HhcX(r*Sjv@tvchBhjq*tZvibo>;j#hmD25Cn+s)2chY?&0CW(#mEzYRio_ zyt&*cO7BED@~`KK+0dRZ$jEW4nhXu=zQjo|C@-}ndM#l=bW4C~m8ek5OG~9Skpmc& zSSBTweU2Tq-CtjP>0?iPCedu4?3Rve^80NWX9zPQ) zUAXgE!RXcl&+L6h+US^~kGtpQ2wjg7e;lhjm!`8eM02$G(S*P<;d&vwHJ3W7VsQe9 zkk7AS>ZXZTS5j=D_RhVG>-$qQojK^FztGT{L>7OHe!pJQLqH<&?JXF7D|?ny2oO!N ze{tCJ>S^)v$D<#{w7xk)KtE1QR^}H2Cm6#?Cb%GF<6gTPjKKd0Diy!b18PrD~!< zL$1G%B4I*Fq!qjWkABNCcka6$cHDiVZ zd)9N!bb=v3WZ8K$VWYir(+(VXV*mbs-7~X$XfC^z>S|_L5%X!9XxBxFSpzGwL|+=C zKRmM`5FeTr>2YKhA30nV#F`0_e)Dfq_Vx8ryiw1R^+Y9x^eRZLoB#tgSba}XzWibm zuaT`BI3+l?%%gBe5>NRARsFmDQ3v6?#-2 zWcBpk{W>+2R75>Sw5ErIm3x9Jx{sGY1dMR`!O^FVuC9DdlZ}>#^H3nTu}Xt5X$0lv z$SW=ZLz3@5PCTioCyiVhqSSEoTHwi(1Zn4l9#No1qpJOgRXO>(%V)*0=a?GY7@D0w zccweLe__8$@R{k^H~6F9F-t03?ga&0Z6JN8XOx`!mB?(LwZWWMy` zfnX;tL_U_dQ#}qgcJR(}M1I2DXDCjNfMH45i~>-=YsZuY`5y<+~v8AQ2b-K;tgT-`tyfNG$X)i)e=8RovUind_o(MKA_)OyN zr}ZQ1W%IIJ;z&i2Xgy4qZ1d8B5Q<1&I`k;UByeRL^Y?eUufBBj&^sShE&d}9k}8YI zkY2GKN(QQ9TSm6Snb(*Q8AFB;_<<2^0Fe=7dG5K(4*by{96(My{;fmr@1FVK+}yr> z6I87^vxcL;Rap^nBeJW3YLAh|uLFzP1R(%PM&ZS*fF*T>bLM$6Lq01R=TgRC-BdQl z-r3Bi!J2qb(G|y@6|Srq`3!IGg+EC6Mt&E~98RQ-6k$8Y1aT!wvKiEZdSW7{R1Xt5 ztgpiM-;kUYtXyY=nQSWxAgh+U^Y~Mp&R-u{`D)NWF^b~8pcp~P*pS5`YO5gxEo6xy zWEeeg-vi)?r<$9$xb)_}?yl$O)74_3Al_4y8|m(#5@H$WEb5Iq#;BBr8p)wgr-=|z zSp;5W)}rx-^02yV7)ZH>=1=h*pR*17`(M6q@!EHuxaAQH8FtJNN>J*UQyW7U$B}B$ zYC$LgB16dOj@twxMH(U190^4Ph>#0!e(ocp*GAESM-J`(R}JW&wZD%so1llRLd`u| zh0rrmA@OBtBa$2l@T#XWWMeB-v9aHyo;iDn&q7si%UOn`o3_iC6U89PPsb%lD9fTU?@}<1 zN{)x?=1#2LWag)zA7?JhcuEX{OIRG8_AH^P=Zt^FeC(7K#|TvH-2hi>06Id zvsPBbQ2?TqQHUXIB*6!(x}cG0H5|G@5y&{|i4Ovm5Th@xf}%)2Zs=Xp6Gg8D$wo^Y zn}nJWHAp@G`^S!6@y#9pAre>~@^ihoz zh>DY>id;@YA8fWSPJ|A}i)W1Q2-u;Xno+B!)wx1}d!0fWL1~#P@n;6g()21SWUZvN z=Q#642PP3<2yoOIJA%=JD!IVuJhP*ms8No|54ARV12LmFY|mmvWLc+&01+X^1dI$J zFl6^WsfVPA9?+=?)WGPFw9(tw!LUh%3@iSuLNhTS0f+(-t0BDqoD7u?l4nJ-8_K*= zTqIsiE37z>YSYGL!SbTp=%ON4PB?0aC~0iDPNtE!1_vKf*y!l7?LX898K+1Vh$FNf@0vqjRo;M(Cnx87nA~TdixPLJSE8 zHR=IV(NjvQR@g*6>S9>dGoZ4d(3&RF>jqI$j)ozKcWNlvcSc4m@=^?7qzDlMi?}ON zu1Js?2rwQkuxLoKK9lPRtHweRnxLYQ#J%krnu;830k9m-$A|qz$1s#)u^JkdA%`J=~{>Akw1LF(Eqizyms%+Ii!ldk#rt(~hmKer-lW zqMsjgJ$_CMPKj98g`|hkGsQ+#ut^t5+lL(gcrL5Tk9=OLO7=`eY8*g~^0b`5`ao2~E|KXDc~{( z`>s9aq10QdPXxQj<0&?BOflTi*^k)cAHG-mNQ)enUzq9q&C=@6(4d%%F(wgYJp_XE zLI-g;)nN%Jl1~;JGn+H7vDd&wsiQ-OZn*Ezr*1s9bW-K~`<`#Wtm*Q=t>@jW888Vd zf=JqFSY2??^16j#L%y@}?TJR}bH3DoU`q*AWyJ9Hy$_f-IcxKCnwb0#$L@RL)EXV? zK}ZJ}TFI4JjNky1i5Wd>DOv0n{$R%8c2k?1<8hE3ur~1 z5T(2>RWOuoAW}!Oj9Hv-?&^HwcaH9V^3)UGcfJ8+i3zopPz<6VF&|3Ah!QAD1R4{@ z3^9zXiJsN_&gX8v1Qh9b;>Q2nwR^nY6ozyt@YqzaNRZ<2LSX^IY1b?ji~<)`vMFJV zLLy>8r1B|%%&N#SW&7ruBhN4Pe4;?Cr%PBcD%NwdFK*H(QvGW7Q^Y<6LQ`PcJ{znQ zF46_@lGUPY0{*;5^SR&=XI|EiSs{W}0V;%J=W9eAk{H!_Z8G~&01?4QDWmRxFD-rE zeH|fPmPm7O7)E`0dkG;%=Ut_SID%iMV@(es;OG!?;{z8RdUN$#n>%|7gX5WW8CFE~ zW5ekUxDuH&9aJ>hjn@;gB0Umb@-|*s>Wuh>p0)Hd0+22Cw2B?Itc;qC)@)NR`TH~P z?mu?rlRpH6;!f$FZx4h*hZ!wq_RC@rSwgGvpjG`olIJp9&FPyM8x@7O6 zukU(@j*zVtr9luP&ZBN8qxOSIY6vn$^cY|GaCGGizqa_4)LIyPk>D4-~8 zY!!yU(M2zqe!cUJS7$wirm4g%>}UwVDf5<3+Kp&9qtj8q(Px@n zAx3rLL=0}6=24am8p#u;t>dT7CNxoq28F>5P#urn)74bl?1*MY7l(}KJIFQKW}sN$ zCxY8ZnH(96O7pGR4a;-JfhxrYlvt?mqN5Q9GbsUJOQOD?$|?F7u}o8vTUO{%E5<*s zwpObmXY384q;tLhn7yuspIQBAY|RRnytz0YtelX zl07)Tsk=vx+GkEZsR&U}L!iiV*P3%L^bn@*^X;!D@7ia|;-p!^wT~UMo`50Chd%xh zv+Dyp{!_)XTWL;{RWp}X)N!nyfgx7_j^>4emy~d&K5TKuRcnABn1dgrHfz)~Zt zJQ}}K=UQ^NM@lvN>_rtkJaN_83}(^cb(vkC*B%&QGAxk6@d~pxF>v0b=k>niMkrz{ zMgcH#+y){HqDQQ0D5vU4Y|~0O{LC|E{TJ+$v`7G@|H4S6r=|uQEX)Y?fZ|dET|P(T_Gjdo?zlO5bA|Es^sQf7n07w zf$;OwCT6?DClEzeo#Bh|MTcQpB9XOki~4M z2O%9y|424>;M-f`Q1$xM>qf_n3u0Y_?ELu76DNM+lP6AXf4{SNu0BDRqw=mP`m0eu z%rDj}kGdJk(gMUL4}nT5$>}q>`!s!wshWsCVHq`}(}gRKbx8I1;t!XAPcg^Vm6CAc ze*AM}v~;J3GGk0ZB%42?ZB}KEACJ&V)UdJ5GdO9~L87JD(qJrw-ZZVi8i9B-0^j^9 z0(dlE6xmZxiq`#Ul*v;ea#+d;6zN0>=|t?51uX=Kl8UH5SbamEW2VCx3ZkKla3e9tH$c&>95t1R9KG)|dyS!FN&c2n>ger-)tBTH4jgZ^uu%5O)b&cmP|?W9fg4#S z>7`;7p3MVd)@htFNO)Q30ew*^>(Y>xj2RfpSF^eqY7sYrXAiz6fB&6_Z@=^u7_u6Q z#lC$n#F(H%cJ5uxxWO?4L>ox8B>|wF-@Wtl%Rl~S9|1)leS6cc=B!i^{8|^acVM{2 z98`3xqFQ59w4JesNag#KF5>Txu>pi}B6`m#7D*c#J7Y4YRf*9uwWx^lA@W8twHI3y znnui>AwIx`FJ$b{GH@d)7|7Fru!hDVi2!M4?}x0FTaZGCGVCE!?t^w~_%}PFWgMBN z#XK{hL(Z-ZGa6Pu2%*QPR+kptUuO6OGSugmUFQH}oVn+K5yhqqI)ua!TC-*z+$nCf z|Hl3M_uu~Z+bgTTySuY{WL{KARYLCVpkc8@&VE>sfhY?R(ldBrG{T~sph>3PO0ZcS zwO~$^tpP54G>M$G*j;NjnscuWnLY?_-8iYgNTX{*?c68oY1XM!f#+E`FZ8GJIug!rOnCFEw< z3=3l!lxCOTD~VK*y|#^dezY7hT2);Q>^eOtGy)?6**TLAeuFrQwx_&epW*v9dATtB$omr=yrwW$)vJV$a?G7=FKfEme^?o9 zEFHQoj#DQL8E29X9ZD*qi#BYr2S)q%U$h?}y>;En%3rZYZ7@qBA68Q(X9mduCM4{s z{|LD?R1kz3?UU6nc;WyO0Y+&q7eI;)jbqVRY)8paQ_84GCUpM(neMw+J$%L9A8@C1 zZ!F{b#C=adP_H1?5b96ggCH#021V3|qn(%U-1&t+vqk&!_dEADg`$d#8LDNB3;7PJ zhscNVVT^+{3|K4U$V0I0zD-wn?llb=?_N}eNb`Tav-zQtdy8FS&7^93_EtcJoO5PS z?J1fQozZ9f3~GpeSpzL{q?#nIUQ$oY0xhek&Ab_21-k z+Bwxc3Pzgw+fj4c;@v+6i26%EbRf`WRzU6D8FUZ)*rjV(7a$ zcRnZCcEO2D-ud%KXXf@{&=~tbtdWUVn58TOh^f&Rxt`Ye5U0#wL!M0Ifer$X8yNM7 z;1Uyxt-%DMMBkb0k`#&w3+2@gTTgm3LBYF&BfwOkU|LZf;e)8Qyz}$;Gl#IJM9T5A z;+e!no)aG6MLinS+d(Kjwpnx5L#Ja>lip*^-!So!<~I64zO=OZw-mu5{d@=*0iu2_ zgaiQjV*FV8cir{oP@|132X`D>dGp=wu6^@T_@0PX7Ecyp(v4~>UCk@$>fQ7LQobd8 z@d(mGr8Ny8d{BCghui|7jPy{Jbiebl`EhI;kQ!>tpQZl&`|kPG3l7|RY24WXiq;6V zU=;iIeS9di34tK%p*~t|2qp0I;qU(LojXN}kjrmYYX8JFSDcu+UsGpEeAEr1=?Wu; z#hbHXlkB5o;zNEuYm*E34N1VB@r~jG~OW!A%MzUz;HT z--uZ-ZAFcu>hklQ?%%Ag-0gWEu@(w3#(`+&IL(K8vTl*RLDbtbB^d44zkkP$ z{Rg+-xVZ8gk9Kws4pI_S0!5`9ZoJS?)jT5D4{8T%2Cptj^8gvLwUOQ;m0A;>tP*WZ zyXHtYf*kQ8d2|A&rcSe9syRD2xV!U>fBvZlE5+J%dmQcxf8e4WrB6r{8LkT(* zn=wG7H((?T2}d8l@WeHjTyg2lSu$uyQwZ|TeE76s#1>&qwhGIsDlVs{eyBp$!xLpQ zCPHBvmVTeX!X-6PDx%$iT|5rp*gcOJu3u7$3{Iq%X|vnyZHvkyPHmvE^sC$m{j%6P zW!JzItI(!APaU5hg`?G7+O6HLO*~s)S&AWUL?QOQ)nSDm_k3$f$|z3T zW697(2GDh`A4<}p8-fzz==aNj&}lYJNgEwxSI5DFI}X0Je_=rk`;5UsEnFp&kryr? z+QN)N&qaCq^8bT-A7-WrZZtAVxK#7$EMHeSvcg(c=z$<*^dR=IA+u_W-n4w& z!m>?UGaX3(>eir!^orNrX`J~%2}pYDrGiwNVo z`k_SahmMVfXhzWKA*2JrmT}zt;hR4!xm^xY4j;pR5Iig2ug{KRh1Nh z>J+RQ_$W1t39C>{lp_D?>U{R0r&trCihx$qoK3l^v4m+Ui9h3b%N@f}bDM|AP=Q6B zp4M1y$Qnh?pRuz#r!ORY8HJ)o{SgMSJ;`Q`5RCN3>Pd9f0N2Qr%2O3}CnZL)k;@1a zH)2J7POxG^RXbH$KDTq`zb!3Z_bIHHK?EI&V~o0hA0KN;S8?i?RVNtH21Rrw7f;2P zB0_-C;@4Dcv|ON0G4Dy%GGnc*>>Ka_GdfEx#f|uic!0DG<&AdJLJTq^xPdHq5*jHF z$rgC@w>WG#YC2z?-P3vXru(ixapG#aI@bEjc=giuTr6=_bt25DBhI?^p$HJ^0*H26 zC4E@R=;ocWS@UQ=zGCo$!jDlDwM*0-l~jVQiNDV;tRB4q z44F{JVQN6sS3l&mYtOa6F=5Pr5a;>@z`&?C{S^p-qPLDMy!38&=KkeEHVYDRC0(q1 z^3^;P8OZ|++cj5Qn*$Zc8$8e#Rarxk@T?e8TBynB0g)rYM<8l7Ewl2I=3d)3)A{m_ z2Y%(mi3=EazQMHzM&Jj}EZMBsmwQcUq9lrX=1eCTk~VsF+a<5x^sn7n2OH~jzRJgk z?YIfw8_#JDf^M@LmP#TIY!tKB)zkV@w8MH_NQ#+Y&+JRWv(~W&c z#5)vTdrVTqWGr2zimFf5s_F+()#HjNkWd7UbQ(d)@DKn(U_<@B{pmH4C9$e3#?kKx zMsM%y?insL##1s`0tk{OOwD`{JS}31%)M&pp~}>#ug)1yIvN6x@@Mk-Wy2WM=g_q? zu}$4^5rn|ezO&~B&+Y7a_kr6lRi+&jC3@~-QhmBe_5W$kbTNKn-%Om^-kE%~o1c{e zd*}Q1?4D(Uk!192=_49o$*1ywjSUQXpe=-nA9|N13$2a382gEVOVRt7Q4=-OW0P6@ zQs|>DD*VGM3JuY&!guePwitTBt9yrB(;t?wj6j@6xTDofiiJRAoA+T>&QRqqT~h{? zy=*^Muabo!wb|6jkT&vYf)?G^M2XySRB}Oo32wASd!aYIxwI(sxJwZU>9SZ61wGW4 zxW6$Or%qrf=0nMaWz00c;^`iYuDtf>%D<`DXl{P4F{ke6Vnfmj#z}_FsB+wqJiq7; zVWVnGyqiFhjow(EkYfq1Bn_ngS*auW_j;)6Ywu~zA}#!V!bTU}v-gCi$Q-!fwyO`s za~qPTENRR7b7rw5U3;$3Aw*Xwk-+ra<=eJha?_VPgInu$Ir#YbO=8v=bW)uRt4^0R zW`!k$_QuR;K4bVWmxyFPFJa4D&$Im?k}$0izsmI>?3Wf;&`DFEV{*g@fn(>ywFNU1 zAX@kJcz}U2IexD4p(@mdkAj!`6&+TBFXOREPw@@(o8SPIM}&$Tu?A=n!A5=76K^_- zn5Upp+vV|&2)2|_GU*zshd6>BGM`SAe&|Rd2szCPU~m8JtxA0Q;KA(&uYCHZgNvj> zcE9@S9BuZNG?_4LmFydSm{5{)No<}l5Sk}H4)xZ13glFEDLTNU$E_C3n*Rv6HTcoS zR`#`SZO&<$$U|Sb@2ZP03E}F59*WgavPH<*`xr)sPXa^IJ$hNdkFH<{WA-6wq>o&( z?UpZex9;1jMn)a(_pl$PT&sAFY*k7)(UH;vCdVkRpYc?pd`b+e#_n_gBxg;wttL0E z-d9h=8ZZi~?Q^?O$b>f%iP=(=*B-L^1=l7zQ)aGI&`x|<&IlUjJT3&8B+lfkL@9E) z8Ab~&1d`B0LK1<>c*GjzCY1Gi$DOy2qoNeEDuxhJPJ4pd)h)-*?e3Ddj}eQj(5#38 zkYd;D^rkGPL!3m2Vm4&yt&c`DMOd0h8tLh$586>XWBxUvNJ0{|h$Rzz)IJnqi$yD* zaVwx)3)KXOl$*e{N_eztv!xr>O}#d^yYuMR@3~eC z`+^G&T%AnZqlDIHI|8Xby=S&lI6@#o=%Jf$2299@Zhr2*}lmxK=55UK7_q9lTnYwWN8*wccLP=XP01< zrPE$485RUmJX9u4ZKLBvq|5ZIDkdbtUCom306}>fwLAtaua_i^lcu|tH%f~l*-x{> zKiJ$g^UVi8_32AqPbOVaLzb8gC8CULo;|0Qf|7&+{ zt3>WutwPY6Weto^ch#akV%_A$gB^YC{T{9m8I!RK9oW+p^0T`^j(&1JQBeukB3y>7 zs2W;<1M!|M+K;aZGxWpi(8B?S&(I-j@=|aOzh@TfiMZ3i$dnZgL->Rxg9jZ$cp6P6 z9<8z>FzOM-%BSR_DFaoUh=VG%wscZ)3N6M;*V1rXF2C}(Q10a|4|O-MEFD!#D;kJ1 z5oC0H(G}GkiG*1aL|8Kbl-Ry$F^DW0+IiE{GHM66U;p$=D{t@XJhWxLF-B}?yvaBs zap)|L8n97pQJ6B4aK;EQ%9asXGIj){YE^qv$+_`OT5g=a8Pk7&C{bgg$yFc%Msw%R z%zXLj+b`XANvwzZH4zZn5UEa@vk;~Z64N1F5y-XD`4?XH>|fm0y}x;X9Zn?oi;bR) zkgbLBX?Z%RQ-qDO%HgwyNFbVKv1f+I=p+#$bvDALrWse91QGdD6@q{pM+&7_95~b% z#0=2#?esnP8e$IwcaMn=F6M*_sgihLi+B&_=|K&YIcvq<=Za%7i5gQ z69iO+}B_dH@iw8-q2A0kn<{{lA20`x+sVBsTxbs38Y$`q*2?7Qdu{_45V7J^1uZ z6i*iNLvk!FOkvQZdy08cr>d;pn7+;sTZAd0fy9Ew^;39}p0UtAi!Bq+XifkigQ!V< ze`e47J1+VmrOEnxJCZ@h^&qksMlpJPSOIZ5 zn~~am*{q3Fh-Z6Hh*re7gso0lwbanrY?k#Z5U<>i>xpK{})BA=LPjKh2lTK zY>AU7l6^A{x(Fn$9?(DMmid{^msVHrxiREBVoFV_!vNagl<5d^dNqV3c8~{QrNYye z$3CGlk*{b-%lvDzvrMwGID**H%VJMHYjVWJPP}+CwyOnf%%Y%5+#5~G-3PuviKf6s&~DvL%*pK}BgYlp@)-N$3dTJ7IZsDY8XuJL>GcVl1ZCCq`Y#*%IWSNM`1tZyRIi zu#p2rj%Yh<9l7_d2p09nZM0dKe_WcBqn4REenxlq&#f+sU>~~SkgQltsgd<`+El17 zVV0ES24EzyW{NS?2GL`WJhFe~*q6IA`(7OscWI3W5CI{;05(!7A%lBCMLIrD79Ud0 zw__w>^-EeB^PsFX<`V4e5XlhmHq_G5RR4}Ht@&|{m;B02+YfA0wAvsFk|0CCXk*&d zRt)Iy3h3eF3V=R-^M}9d;}>3b;b-3Ou)U$V)#LTq{wmEgM!_|n7>$j=iKiW3g+!`sWtyE^jre~Lh(L8RCEd}|+yPG`&hcb7fKIu!LbwtiYLy5cclbZFtFZ+3T$ zzdA>~G$0C?Ai0|5lq=TE>OqTgkCe=p6inV!DJwh17Xle|k%Ah?;i8I1qSlZi-E+~y zvF86!cJ?uH*LfY!&CWuxN74}g+OqeXhnz*LG((}RMj+7V|>aieI;-u~&O2Om0%Awv^E zht>he5V~@@>IFV{np#RalT&yMD0n;nU}~=h+p$UA~wG24OEG>olen) zp`d)&^b?z?OH{|Qs;ZCx5t>2UEee|@s26X3*+Nc3F=Ty7F)~G4V%y|Pj->^XnMxP7+p4HI&3uz#- zw`Ak2^2(%0$}elY+MW|9o_bJqAHglVQkG+s4k4ouLgEO~s3J0kK+&)74=6e$7=5Ir zj^pEGQmr?nUXXf*RBQ0tfQe$!AO?OU&6CeZ=&Cw;2xXLlQx|kfRbkRn*)}E(9J$XK zk6-@&+}wLVck?X-hx{^xlC2#h$qwBOApr+5cFd^*hIj{v?!EEW+qb?gAAjbgUQ`vS zI$zd_c(lM_L&k*kM#Nj@H$sH8hS30vSzca<%lfUgjj6}QxC0l+6+2_JO4IhC5c3wdD0&UGzL*sYf?01NnC&K5 zw&yuqB(Ra?5Hy{#7Z0cK#9L#Tn&n!4-a)L{?#sGp={SCpvjb=sup(Y0Pq9As%F9=E zHz=ws-7^XzP9%XKy$qvd&Nb#lBksM<2Vza+apsRaBiX+ni?;vZspU65oTK!yopEcT zLPca^0fQM1YZ*JKlp{jwHxqUwu5M)}d7wV|w8f=58eNyLMjeY#N zZFlV&W?a`N%x*G<08#AR4<&#IR*k4c_THVh-v0PK0?|ozRBL~X%^4*`0)$N!5l33m zvMFWw<$v*>k+Zss5;1^S>(N8-C}MljNW@y4s4U3PR;XApDGj#e$ukOtKht_wL<=z) z#P%-Cl=1YKoZWhzK*k+JfgTHgWJ4%x1EJKr35ooQRQ~mFCWkN`W5gh$!pH>k3H+Op zw)#z7)3>N%0;0?k5+4JxavnRmSH^oH*h^1sAKJ0?gc=0JdWh2~8cINrj*$hx!t&tB4p*#EhZvr_{_CJ7o-z^{;U51p#F9Zw=(T;r1Kf^@ATd*x{+Y3_+&7 zi&6K%2U~cFL51W8u_e~GdMvh$tsCLE=)?lIrp{@i(I3m(zvD4SiX?G_n&ZmxAuFL| zgorZ%B7#IZ2Ky2pd4y@^8kZ+c1dI-zJ@J~Buw>p^6m`*=?|=w%p$RB9CLvRg&^wVr zczh#kCJ>2E8$J-Bf*rJWUd#wfX0?>*bd017fn!*9X=YI{I{DGIpMU)AuiciI`f+8q z3=|EJg9IkTO%Oul^eof~hXsVKk9qINciU7lE9m2Vu@>p~b{ftwTQ{$~w2Kk91-9pS>blt}k^Yu_o z&}Y-;?@E5_H@;_3SQ5{`WW7vl-G`>4^CUDWz2Px@o9pb@w69j7&wqAH*e?Rc^du00 z-So@n7W!{pJpJ4cZ+<9@v0Crg1EC}#N&+ZJhhjox`9c%Po`qcszkDdb=+HqqYMNwx z;leyH%1#upQ3yPUXkw(Rv#wx?k5dI@TvJ#)qa|g%l#zNu@aEe3>kZPf6FQukP01f> zFP6H!_ddGs%$v6zJ9ca|(*b}27ee4eF@WsU-^&HL7e%!520fsO?s})Ur?kiI)XKDa z$>+uiJTEj*gFcX~tXafX*p$?bXQx$Dk%}CFl0%abh|=aFZT0L7IZ?inb4?>#6N&#^ z4>|;)EujJyRYP4PD!}bPkZXMrsvC(miuY2{=+xJDzxNk^@fY9w-fw(Q@)taF847RL z%ele*WhgGqf5X!cCYC#ASRq*v?Z@s3>w=K*uQ3SnpZd0%KcPEw~lubj-g*cL? z%(Bk4k5TmSdKB$I6Is4k+Nx=Jj~oC-2j!?Of3&Z^;qYPc7ve$YIU)>}4)ckkL2Y$J z-X2~JS~@a(sel_KaB^xa94u&~n)C$ofFT8C)V}cX!rXhmaOUmXrHFW03nhY#2}Lx* zhiu7wv!Ib~5Qw(E-6j3%nzW2AwJbOygRh#im>Fdph@^m#_2tcMDPk(9P^R#sCKaQ> zCQD3|Z6`ofW}F9Lz+#|%Dl#EtRzqvgmlVz+A|(3j;4;A@r|bC*Q5|hVH=~wG`_lKW zTj=#AU5T!Ju9k4;y+206Q~c}R!t>XC?=Q51Z%c8O9>!_Hmn3wl?Z(&F94^MSf;(Ba?R=qDWk_u0iuKsa?Ulbg-x0=`(>kwXgwohM^JL(rsgdk4Akh=7gsKR zpgC)Tk&I8K=;il9;797R?b79f}YB#`Q{8{`l|z>GRM3DV9I` zGp<*Xf93qwzaPjy{P>!MtGm6czWP;f`h;4KP0hNjNzhgPPC7_v_bLTP9GKxz)P{v} zYkaoX`|!l_Lzl&^<5XGfk8xR_EsH&Sjvef3%LfB9diT`wbDyYY zZM>uGC7L58GX;WD0~;j}*|tHrsVoXETq@Hc2wA|T7y1ZP@*vCB#@8o}#@gRbNQ0KZ z&_eg$R`;KI^Tst?$N&o0gl7+8xaqJJ;~tAN(M`Ai!oT&~CueB)n5imrCa8Gy9t>K@ zV2_Mq)m~3cBAe}>NYm#P@Ug6<7kYMF%6G{YTncG-$!G2AcO3WvU zT39lsS37)2hAi}Ncw{5Sgyq0?!*$|}Vf1uUB{u0cT>0X(6j5|;q4)Y<{q+z2@(2I& zFFy#v{eP2x)awVIfByNC%S)$UKmBp{OJA+4%T{4&VvYl{287h6t291p2yB6NEg99& zPT@>z4Ej0bUGP@h=icDfYHum6`PKvU^*FfWfM2bh)fGO7^h1;IIEj`SU;b^$q0Db@0;TlTSYR z`9J>SKR)@=>#uZQ{%S;4h3ycc({Q6oCFnqDB|+M-($}ad4Stm_5|qwe*PT-Yd;6UN zP!dBif<_EkECx_GVm_2u0t|=}8-lzYh&jSXVi+AfyZm=ob@z709%J!4C;xbchq@C@Tcvb>J0nufrpFqJ*~RYY!D{Gpn0w%BW2i=Cdp#$ zJfCRDi39<=%|eCNyj)))Vjbd)f{FSUK^Q|D322vtO~ZciDcnUtLRx*ImpAnO>M#Ei zjQsOo>;9KX`Mp2Cviz!sR%_?WuWBl+1~#ZWap|QE-NRb)nVnp?Ts#FKcL{}q{iQO}Ys-p@2e!~B*q?U`4 zZVFx#Ke*7{``YT!GwhnaeWy?}M2`Wq)-l5$!-_d+A7FzrvUh*4%F=e-c>LSP`d7`* z0HR5jo_XpJhE&Wb%kV4bHL^fOsWD9=K-?II9<$T!Xe2@Bd^@{{Hy9 zU_?n!Nr4stA~G!^CzcXu+Nvu`$p?$tc|wrwEH%rp>RLyC6M{n?ch_bjd9;ye(aFWd z{`03+&w!y9ZacnfQ~`0bjN~|C*IojQK+p}rn%%qe-g|f5e*B4lqsAH(*|En+IZV!)hERw!!8YoV(K2$l@PZ&3u5v{SOi^gA$^#1d&{zxc^mC*10?r?pXge7>tty!KDu2Mi8PX@W) z@|@)U(qbm-8L7xu6G@RPaTb%z(Fi8f|S&}!;EQW&;; zG%#KwOeA4>+vXd0Fya-pDcF&x7ED+QTc&&Fq&oFKK6Mrd9X;~q_l+T_wC@ts5ePD` zHiCxfRxo4;@%{#ZNC8KINdNY4#H&l!l6jS6M5Hq=iLJ6MIFlmTLA=N)DK#P<6c~gVG-K0kO^nj$KiKnw!Qnrh%ZN?ovDejhueY6^Nk@ zNV-CSBTei)*ZZ+RWaGtRdv*c;V7R$VhvmrtoFRA&pkKM4p4JD|X)hFg-o56lkvc(O zKlp!uX^Rv2o*hDz$iOK`8!0eBwWMdi77OExnytCC^w^!l0ChMl0YBD5F`JHMK!=Q? z?*JlX6hlb7*yR?@6&k|mz{-8^^?;GfI`Uk^LTNWE5vvZd#whQA5I!3DTQl&Iz7yy& zLY{aB>OsBD4p=X4>YiME?S&V>(3#`MK@TT~B_4$q3wF%+fn$&I!7bAR&jCcYKCyQn z&5KR;2v?!haGjKb6jD63l=Ko&MctV)+z4g)3JullkCpHtL7S?qUE7#1wMV9cNKB8% zG*Va(o}fobn%flRMJw6i{24x(cU0L;Q(VboWQi;RqQ96Eh>W4X3=kSFzl#xM?UR7f z{hS8TgMyHR71D2<{@Z?fDx#i<5Q7U%G~Sk?BB6%0GJ=p_LYS@u9xJTlm?}#dQLnbN zvh7Y#l;A=M$9^bD3;`oU$chMwW7T%=EyLwNmu|TQ7#*@4x%ArdyPx*vT8reLB4ULZ z2ZCh8(l8*rd4njEQkE^RZvBm=5GW#5R;%XqJsOO_OjV__&DJ&j_g7wg0T8`<=J?Lr zcg4;2y9TkLVSp&cPLQ7!{V{~hsKb+9GHdz|`S$VtrjD$c=UY*hCM=MI)iniW#=ftXFr z7TLHiCJl^9@9U+AHj5G!A*~mrhKMS8<6Wh^IIaaD^B0$HesPpeBWnuK)?jGIFw8jE zLarmRG|{X&UUH`s6g~ROf{c>^3J?YLGtXgmoEMC5y=Zj_OGsd z(%akG$h<=oQQCx>-ZRG=J36p*83L2MYa2+8IJ$NjHYGr{H>$7tn(iknm(RQ)IrQ!C z3qd+;&xYOh?sbh36xv+ZLdXcZ!Q>hc-FV}%cNP{}GtHTqjmqLfikgBDgc@Bmk%jUI zPsD_j`A9MJ+%-f^0i9Gw1W96l4DMn@k#u=>H(o6-?~5VnF#GLgDrgHNBzC>R8h1~&Or z3ho4nc7$7?bL(JR`B2sBU)BHc;(;@-3PZ2H`Td=8@6kjt3mR?fK;l#fC)&s22P>g) z(SOXh&-brumjt2)30T^sAwlS$i13iCTeHSkB56|!@s%D6vxaMK_J~4h8g5(zWu>1c z;zTv#E^?+@YRC6JYImtt3>FY24OGU77T>0WL-{In0HsIF#?pzdAcvoOxp&>`e=Xd| zZPyh^>Y=12x_<;du`NSXGrjw{-MdeG-oNf#TEmURd7R=XlzA*`o}}IvD!|BeDERu4 zI%GTC3tB5a*ZpvL>GH#|NgLHcyoPEhNvchV93TocAcl~g`^4s7tl6W;FY3iEgh=MfoRRUefMMP~D?OoW=`{3fM zM`g;u(7g<#){&F|pIu26qyrjl^iV=%QjG?ZP!6Kz#bZyr-(8d8i7M6XqRMESHk_ws ztt7;BWC2oax+_l7BGQHa05gtXuSF-7Y`nNA=X1Nh;)rRIygX)(UY`ZAjeir z0*G_~B4O$Hu6wt>(^aIgNgF;JJYD?i7NG`^@NloJJznWe@KF6<-k((q1Wm#H3HBJ81LJetSZZY@mV`d6Mk)m@}H?6=@XGW@UU- zBKFdfK@?p2_zHra1aP9}9wrDj1X$AQkFJ0BuN;?OxSPBV^#-NQ!gyNQ( z++2K}gSs(y6IPTf_$KLe4v(XZmhU^TE!pClB-BYwl;lJ3Y9x5~NMiXf7&0S5pw?b6 zb(fw6qSep3GuSg4+k;!yaS=6=@+DGcDs56k>@hDLl%Ybu>Iz31hA!-#`)uV^`}V@n zaqs7dxe)N-8oy#0(x4bku`Y6~5p3x{NI{}DNi$OTX3#aUR8-1S8zMDB z{g8_F+o4e?a^Vv{%dlmb5wrQy$Y`0ImMW+hqg+I}$bmQiA@!L30M*)l7Si@N5BPsv3Sj-;Jz>uk}y@h+FH6l;aUC%ZrO(b7`~w!N4PPbcvc zLudp;)bqSoNU*?2Ltoj6|8tWl}3%!V68{CX4)iGRHLY< z8o`|)P1O+)>90fC7j1~wLt*9OtCSw6z6h0u0ZA))+BsdRIAyfA@-2&}r5VVdDISn} zWw!t0m;PKRN+2kXO>1wN5tOiI8vhz(cLG8bOKWz_*`<~4hI4s>LIr4bjs;S)Mcwn8 z*@p;P$b0C4{{=*{b(eYT=zUpkTBXol)a?4VR+e7;PC^H90-!`GtWlI8MDQW=YDb7h zAq0ll?J~d+YDgk5J^SdTTQ04>*Ik?tN}9GAma=U7clI65bw6dfcNcwp7oIdoFF2*YQEo!Ag;+ty8zZylcO z&3(4C@+bT6UK1BeCOU=?vLypXaB3vxL%aY{%&NWHMTAHw0z!EDM{l{b|FOM$$4fQg zA!>J06J9G5rSd5_bzFev!#=+4!^RTKn_5+o?AFv=_sB(YA$R0}Df%;>&_mp1^w2{&`)~>pikwX>XdNAyOh9I6O2+LoTx&V7jU3~Lzod?u9)!91?z6J3OgA* z;mSEPcRJIJ#@|vxO3m*7cQ?H}H~0E-&_j}u0MT#SmJRjLTG#CCLuexG+073=eHL1D zP4`@x5!V%|PVwbCf88)L zeiCFzhm{bL%yz_#C}GW#7~%v#K23>^9=){sZg1aWNktTzpeZ-3)^Q9d1AZigUwCR~ zorP-a%vt7*rECYrKWQcDeeC5cill1(w-+AGi~SsCd&lfOgG z)uuvc5#`gO0YrzLdPGe>Ee^Z&hO|Y zS>u-J2nezmL1F$Y2N*Ib(z@T>yS4hK+dCJPTf~Z#Txsd1ZN0D{d1{PrcZ{gOH@Tbx zL`7V9Nnc!cZhyS_(ZJ8Bk>Elh2@p2oCE4ZN(L5K)YvM1sSZvHilUA3QMK>fWC@e*T^F{cC3Sh)`1_1dQy>lgf}2Z?!`N z8lN6M9j3>Sq-7*%Qyv+xLjR~zjhqL;?5Zqqd$=?&S9Ua!HZ%HG^+GD4G}gNjJ>`LF z9`aWH8g+$5E4%opC@fL!Uncu;eM1|jzz$mzN5j;r85#x|6Jf^P15$0*jFU0cLeYau z7yEn9MS^p#ED#Y~tQV<~rD1@ru?)l!+uqC&&M~2f-BQ%66>^%K2$VcbVf>Ya?)xjt zm!H}3BJdgILMS35XsCwl+{eZY0YjF6p*5@;*X+%A&wkL~yI7JL13}aulU+g&;hGI> znzL#VLm#Y#@W>>J50zAc*4t+cVa~qx{hh&z892K@&YCu95Bk`dfRGDd2R^;;fm9z? z0$&(9)?1hth6JKU>DmT^K}g6~g$BK7Fsn)PL|IL0z`@sqMrqTAAjL-Erl;-IWXPnd zstP_fR5wdKf34k4l!B;rQRAvT=>wfpnN`uWSTwOl&mR&#T++-GYo+*QO14nJM)frb z{-QqMS5j2k3>1C$|V3jJc+iEqEDfU zHtNp-CCW#2)t;%eh+GKSdNQqe_Qa}zk+_FSVK=OvvMGD@;4$kVn=vlNkz}V0S`ZJ5 zi3>lHViPum5hWq#-??qyTzjT*Et7|5Y9+9Phu<`pX7(BJLz1r934~leJBS#Lrqr1z zo*o$XInfY$yltpPmLxe9WTFU9E}}|`)D7Z_VcVFsNvIK%M$Nt(8QFuzC&Y{J$%@Qt zD27$SjDAsVT0J`VeD@+K0z`=2GD{4hAsa#y89wl#7($^T`sv+gpI&+C$9*mRk(MmW zjW2T%R#`<<7;C|@8CvWRQSQrWV%5{Ka7De(YqGScQC62!rTh!o1R89xz=-4RjgBci%kCFS8 zl81&3)3sDp4M~|Jt)bz746>66ADu6|@VTU1Q)D+ETP()R6CI7BX z6KYl3sMYL!eDRbqbnxJDFoZGl=tW(OpPgtP-S~t%fuSIAGl!lA`0)kLo?^7`Z+&O3 zzo`L+uBGFQeP@$iy9kmb?55HhGvYt^X|*PU(Hi7Lgd3`M^vYXJT`#|w{FtNvipsffYk#MNsT$iCqp7*IsJy)NVRC?F2F_AUW zqnz53)I-TgwIyH((faz`ps2TSxPnVo_E_|EQ80r3GL&vSKdqp^9#G zuh~>j7`w*hta**b50_ti_gg}d(4zy1K?H(g3}MS+Yi1c5GXSJBcFd4V+g{t-9b0Tm zN**}Gfn(;_VhOLfgWa+zOTm<2M|!W=5UzcD{<{9B7f&4pLdfj(=#X=(G z3b0J8M?r)F(p93jFe6Z;n=rJ{J;_FoCWrKpviAxrRM9i8i)})Ssvgnc!Fz1=Pz<}N z1C7KqzFL&Y3T3s)lE6iueyX8pfFjmc53~52r_-X$7%j9B#b*S7-GxtUne2wX_|jC_ znX@sIBzU+kl48!WZmgMPc9y+B(O15w$n_iOAdb;ghXd7XgN)E2a0G-9gGeZnoVeJ1 z{#?Btvi8DBn&{#zghy{dm$Ua|UdJFpdPYx)Sm)D%bT&;CJFn2A2G@7y zIFVtL8a@<*DCwA)R9h425DStb0!OH#GY{Xn&|jRFuTp~P5O(R5vQj#DWVJS|x~(@U zeVz_o(h}G-{T=EZqd)LOHA7%hffbHpZB-~1cD z5q3eiS= zsI*8ySSi+ZRrxHqN=phk7L#e>w2n$^zSGLI6nfnEqotbGUC=7k_Q5e?DwMG$qNUb(|egp3rACMM*e-Ptw> zGJsN|)szptzxtvu#Bu!lJ8jGyRg`!Z_0Ghu4gj<>NuxoK?>vDYteW8jkl;h5lbh)9 z;E-?6{?`d>K&eBQ#DbCr`-3N8I-+o-Hw3FDkSS%TxW>*wqashgmj0@dCy^$K%~=iK zx#B={kyL%fN{I$Vd-%SUbz;OIL-zh*O6Vg0kQX^O@+V?0{QK<7SM^ty?te=A$i0p{ zVat$FP4skv4<*MhO62K3wEOgj-RVVz4yR;wwMvt?Vj88x)GO?l3XBTYN7~C;7t9JB z{IYai@0lU36C^p;usFU?f3oF2eD|6C9?3XVL&@mSKoKRZS)9Co!ugITgd#oY^5&b{ zAD-(j&Udm*U=V_A$h^#~;}kwN!ufAmFTT$PH5AKy`>m_GpDZ0XN_;3_NHV0+Jn52H z7Zm&T+^}K+Nk&Ylh2@F*Ak;~Geu-${D_|D;RgGmo6 zWOeZ{YrF#wb=Ll-@y{D+#W z&ndw&)vBa7htjh=$+Vofv*>lMC?!R(lkaB9eUw}2x>EV_*|Irh&jq=x*CWseKi=%m zeYU*%`v>>$_}2dYiO!&;SB4(aB^jzts3IFP-2|mKZ@#71W2{5leTX%6lDW$@gOgvE zPRiId%R9%Q&-go0lLmk~9ZEm0>V9zIV2BSLg;VcD1A!iH$Y@804gi$ULblW`}x>`)tgb&G*>8^MJ^+YI2R#P<_O3CYu_Q8t$#AWzRJNgb-PvWg98qlbf zsD!FDnKcn90H8vsWEMYD!BHk=U4}GM{DIo>_E!%la_op>+8KQkB7%#f+dvHChm_sh zqe!+065Qx}-36ft1SLU)7(Srr3ZB``h7s@qMccPy(M~O&_^G*b%FR=XE8OTo7a30g zeOXko1z17Xpe4JudtkU+wMi>SW0Bj$kVQ8NXN2W?WANmX4O8?~9gS7ZNB7|U0qhM%sZWO=N&;zNSGl8ksFrOe()W!Ie# zNx|B$mzr`2QtSyU#@(jOkt2pU3Xu$lnT@{mrT)vOmuZgHF$$vHT%2?@hDH=o&_s6( zS4;_6r_=vzT?q@HM^oTlVdq=;dy!A%=8^&#I1Eg~G`tJD<@9u6kaBhQ3$?*V3T z^)$WIRHl{bg%paus{F@GZ`}XfI!*MTuW{*z4o)XLlA9F*hV&9GI(6a?`kM}?=~l!I zD2I--0%4*$rA360xEGwC)kC^0Vl9uKrFLFlrzvD*O(%CHe@oteR|NaMBZpk{5%XzY z29PDNp|yJGi6;ah8?$G>edy^A`d5v6*OW9p(4f4c&CxM4qQX%}ihiM*!_(%|x>ys< zFV4-mduCL`u1 zAzkUJS7yWXs3gPGOl6@vf{ry&6;Hr>V49&U4K+k$l0)c`T@>RcYn#+WWgK>GY#~<2 z!UgdikmN*%>9oi1!F7t*ELV$6Ys!_8Hciz&JW9k$h$?&NmGbHJ+-xLr{qLur6pEf( zqlt{5B&SA3Np*5J$EmZY&Yt+N|4K!{#&q3mnhxqZUnq7x zt6N&M2oPoF56|!Gz9(=08%dvx16hZnz^R{busVno(V2H2darxU1*Vq^7H&S&wW|73 zG>@l%kk>#Dap7#YMKC0`WhIw>th{T#?BE$;NP;HPYmE|0fJiV3jM`&i z@%$6V_Fb*`5K%^#p{WL>N#=`uE2J5y%Pt8k)XS0hh+Jr0s$s(L>Is)d&bBgUhTuD& z5xFL9GCG2IVly=yWXc5s5R&_HIT$gaX3$QA8z!h!cqBHhL*HTGrtRZ9SV^}Uea$%> zZ^s-p_x_@25gqoUDOZk!CJHO>Mz?bW1ch-!qD3nwHg#pt#Gcb>KvRcHj}^p#2t!gc ztM@1)9qb>5TbI+VIvZNOvW11x9bstRJH}4Vt8ahdZ&!Bz{E?VXTarmv1OORBgOr&Z zv#rjToq2Qr(-*qeje{T|#_>gZ$eeS9xH3?d&*(!N1R3*sk5fcdHJrNFyS(y3j3L@O zLV{z!ta*9gU067koTR6I4!zT%`2`qZ|<;uQf`m9!)xj@5_24iw45ek2eL^$;i`-GMnn5`EHNtXSa2IM`TBga%q@ zcvFmVZsz1BN2)ndfZVF7lIXE#Rtp$eX%(W>(Cuh4l%lz}3|6EJTZ7g3uEfT-C#YmT zL}I^*T~6fA3gt44m0UR8)6@;=G}|s$Ko+`(V5Ht}(Umm1V%q$WaHBp!WM%k5c;r6_ zdxHypu7AzxH?*|X1w@9>f0ygPFMDY1dX9L^&>c6Q(s;Z6-opg}rK}?-$Gt27=}EU) zZL3S8a15zJ*})-4lLpeM6EyPYomMto2=!v)xnB2^)#u**=+GlScSzdk(6AcT;@%F6 zgF_N(8A+Ty`{wU&|G1xN&5#U(T6lR?s-!m-I4Bfp688*v!AbZjW=y9vVQcRBi6lEga2@D>NF`Ty<;IStGf z4PHm&W@ao__8@A&6v0B0}xj zj|7B-pFy%CydBGxO%MyoH8R2R+FrZfi{)iI)fPr)h;Jt=LK`qLA` zl30YnwBo)1XM$zP1LE1Ox%a+zhkT6v0V3J66U0 zcNHTEdnH*J@2Q{aRYUbarfYnUrqX4&7yqZ8Urgbb6+LxaKoU@>X=GD0jEG2(Bh?r& zDtYk9#=Mp+?6A{&hHzG9Lt|3vuWre9u)m6lgCjCFg;oyxmUgsJ0IDlOBqF4OG`viJtkhQ$dv#zj~)tjL*7uL91IY}G;*T|k)2u&09q zO?uXQ5pdFo8#9uV6#G}8P$-}=%6=rmlV-}!Y2y8_B(2$Kl-hynp(M85CC296x&71~ zBG>zRhg+gME#gChX(2bJxFUgSeIm0ZCq7C;h4SJ^k&q@ZwTvD;)y2F$v0VQN)b|j- z(S*FkslDAjzqEVXrC_cCL! zX?yW_$5eW$;SX|(Qw%A}Y?MW7UbwFR-o@9BlD2mVT9BAd56qdK<02v}pBsTx?395Z zi4YZ&Y71aG{`jp6iVqEZvPs_zBAFl~V{(SzVU?N?fdww)3B*-AN9j#{CU3H#_f_IT zQbqRig`TioSVj(_1UpYQFBZqB^9lhT)iN@Rk+#J_6x-a#$=8aWAWu{DySDTl#*cC% zoI8Dn$jIT|fgk?D#n~-2xoFp&e&a!Nq8LEzkq!{zOnPPHJB%PpY|NN;rg+h*<<;Ka z!&&{0mIk0@2df$HNb@VFqSI7>2xMvIf$B@|lM+Xg@hZcKkhJc#jtWG?vO&?|#$4~i zmB)@;N`eT9Ni~WHc~k;_A~7=>Ch+6T*>UNix!%l4Lq`Ygu2ra!Btz!a z&?zzfl_?FVB5Kg=z6&5V6sFgfYakH;q#BHxjUK`g+DmapuP16?{W=L^Exn;IV@0%x z6gU2labm>ql}VYY6R}gm)z{=kz43mqFoH;bEReKuA|>EmxUT!&#TRAAC?DEI zcxWdlj2cosIuQ1YKs9$T1IH$}VOpe@#gv*`uw^y%2I10cUNvoIO#he}cw~&wSNuos zz(}w#sr93IcLFJM2d1n ztd?AP0Y`MlEXZSr9mW=9d;vI=6$x!l2amKx(s>F_-ppM@2@_0NJcC!V@#CgKMNn?we_w`XMZxcaN$$< zkY2(CzKvNlz4auaG9@8N4{eMGS4D=5A%5C!EpF(3aPh!VcbC0z{$8mdB(aA)PQLO8 z6Ivs2OIbjX3qTUw`kk%&=H^fC0YionNwY@7^r#wKeNkzlihKtew90K#l8GQfvRxwL zK`}K%xBZ>GGN;-YLL;L{6H#+Q_vAo_EPr+evWN#2aK;1#Q*5;Z>B{ayc2&<#2_jT6 zJ7`@|pN@BYQai#UQEPoXjpr>y6j3~y?P7oBt zOvv$}pZOWhAbMx6mnA+#Oh{%7m18gwW0Vm?_c(S~Q+X>n{00?8WXFW7hV!aIl1*Df z7rAfLhzR>t;il1yyowrrXRFqpNz@a$q5?J}?-#pdd*vL6+0e9>un^1M0&IZLG!e+M ztE_GKP(qc^tVWaStiT0jnHAVVk8}Bzxt@00J#}_nzAQ%+oen2_~j#RF@sb~FlC_#?aVLS@rhEdoi=fnutz~RcXDJ|#fT1qA~^@E zfQ!o1T+d|)bxNemH1%WYAf}AckK==hAA>Sj0SeTNw7BZ<0@rxDIoFV>*AZ&bbP*T2 zff}@Tp6|}9_5;{lJ2BJruwkncNRd7jk+G9iosmZ-Gt8ElsL2N!BSR6I2NQ!a68auE z?8#hB{b=q;9XuMv5UEu1-fXQmH(}O?}E<0hxF_uR=iI3}r!4g*@ zt0ws}3-EJ%BCvTEi10@CjOh#VBuj8xTgcvu0o^q{ zqs8aw{kB$#QJQ9TL{R%=cAkXY!rZ&0?FAyI%iuv0o35BgCl+y%FdJI6IMx`mX`mD` zumd377UDzAnQPq>saMJ-tjS}OTMENOAQVVM_8Xl~6)qx&fpFxBD|#nBrp(>=dZKqo zU#nDKOY|D_Gb*(|EY#18rQNF`79=5vI4#TOopM9v=n?98t}NOxIDHjF=kNHvm4=O;W?kBUZpG}%ch#wc@<(Y@qXhQOQz z3>}EO?aw$&?F#C+F?v+ujQ!5FQ>?sb!N~C`vBMuQge`*r0h;CvDc6kOmYQz#Aoh$c z5*$dgsu4&RyswN@3_VOTY|v>tXCTTe8Kt6h!F_8AY7FUX==um6>hLryCra?uTx+Q3G7tKa+^9hJa-;uuCx+3XB!UpM zP@L*8ek@=piK2(Mi5qQKSxC3Bm<1Xwocwx=9+0};DO`;Kt22Ku5`bo*&;`NGJE60B zVqY|TN$4SB)bXAcLPz9C3)S}cg?-&mFF$wiF2TrMiaG}gvp4U4@MOQSSn^V`jak%? zrw22DP8e~+bl$`BOr#x@$$FLs<@+qaJFK*Qz|gBcpdA8d+bdOR|ji_W(IBG!5o3dvk#JVNexPrnZ_M$*~DuXCM<%JpwBZ3eT`Ya!l z*9E8M7euhx3<846d`M+u*%->X0#N{^He+)^U0)u|K z?Jp!dxRP*f>IoV|4Ot^o6Xjt;kg4@#Bwq{e>`7LQnn*S9iXsQJYx+v)v9JVRA2p=V zgguhA`Uu+xh?1osR1`_~sLMWN$>fMtq<+dyu7Xv0Ul z?|gPEs>s11!b3Oz(!$lkP{++aR8W_|N+(pG>3vECC)72jYM$T=Y(%L&1cZ<07w3Dz z5C}??evs}ULWDR(y~di+>8k6R7$fIUtclP=b0uxOwj$I=287lDW-5+lYcNgwWyp7~f<==#+ai*yZ$J{G6*TmLT6sD{_<>-Q-ki^GIoI2F zV)=&y5P5g>u&2Xz3>jHhmL%1&==r{fmsN9pI9sqgkQXL$9dV&@mQj1ek0fc)BrxQJ zheAm?mBvP*)WTB-z!4slvE$aUA_7I7kjq$H6kg6g_6ujWx^##Jvr7;C{z7kF(L2|2 zz@A-$A8KIRVg3;BNn2*&M*t~Hn>)Ok;X{l44X5a~clpqH?@ELHDIs#Z^ z>`9$9NZ%_RtGCS*It&Kx4kA%15+I@PEcVV==tC0){uHi!QADRg1koNo{KMY!Z_TT(wanOty8Fv0&o_YI!jBSa$eZ{^y-f<#(GGlA7ChyEC%q5+yZ{$E1Cy1`4-amaqb>7 zM$YW~*S@*8+icI2+QLmJ{t!B5Tt$SB)^@yF;@XeQOeyOaVY{F~fzx9!gD zcfPjtxBWeb+roajMbI%@?$qHty5w{x84Cv@`I^TmLnVQuD6)ZLwQpJAVx3f)&xNf{ zyMXYui@s=Qyo%F5_p(SM%WIYy9Y!=bu!O9HzZDt+P zKoF>no^XD$`|0v2(q+a_Kv1$Nai>og;u~3BfVWm0q8`-b`&l4Th*9yOTQ~Gd!Vu~Q zAs!?cMO;K@WSlbWmPQW|TI5URhZ~f|HYQ!vh|oVxf*wX~khu^FKt&~SAfeA)v2ySU zFwu7gh~hX=5zb;K%5$Qbi3lsIFrQkUzZ^49bfCQ>ZgORCBHyq{6GbG%@I-ZwAY?Z# zN9~bWi{8PrEA>~dlZ$rqW7b1KVm-uZQx-OECMY#By4wX^wC(8=@AnUnEAde#d7r~m zQ>hx+{E)M!%|m9agZ|0tF%P>gr0~<+5u4TeUo4s;0Q#iDe#8!MQARdz<1<(H-h1th z-`u@>_wVd}tG{7fOqtymSyaC#p@n^|DKZ^6{RO=fo9I9|ykf2`RWOKCZ|YI{VOti; zhjzMfk?SZ6O72huH*qOiymX2?hkMNQ+9|$${_D4W+-uvjr#Fa{D^$piy>tuFA=x9j z{NSmna3PumVvJd9I_lVv`iYpTuOElGS7g~V-jm^0DFG+rFm_c{CB&07au&{xiy!ym z%Qcg2;$mC%Wt=n}>>83da^$ZtXYQ897n`Q0+P$Y^Ourj=`4onz3sz8T<)+GbbePGU zuDOcBM(LOL_Ox?D_RI)clPOC&WkWuM#2E6LGRd}Iy6?d1(%#;=#lmd8)uJykCw(NP zl#B8Jrc-wevL(ELnmVdSIw?y#qmYu%@;NSkJ*73jd>e?hzKl|M{GF<@T$|>4pMLVe zPd@py*K01$q+mlHb~fRRykd)VMo=z<4vGt@^~|F}DQ34}d0GFx6X+o@bgR2uQ9Gj$ zvK13*92YWvVkF_?2Z`~65<&zb`dp9gy;}G&qYkxV-ozD-qj9219QkSYZc#XGI|d@;e+g4m zr4fq+4S4p@Ct?B6BSDDRw6m^)lrUy)MYWH9=~5&8q#zHD@-g?vAMVYaSoxtTwW~gi zAmQjr8_1DCpgOT{(Jl zot%sqyjoU`25Sl&g)hCy0;2*raxeWnr5>H@7JA)ockZgMwxBmA)Zj=cvE$CfAw#E~ zB07}L#`~UhtIbSoY&HSW{58T5X?su7vmTOAQJpYnuAg4FzHFd|;s~`1VUtz)_REg# z>+f;O9;sGCRXrIEz?3qX_6Aq((oN>lQdW8(;#dhkipz zG@*xKdkyrF6j`a6)I=?Ova)ax5B~HxM&dr~?f?fg5QRaFG&p+3!?es(k>%Ci9W!9h zkRbpwTc$?Mt!w-Y0$wi63U2ACbG$GFw764j6Srt(@?qhJ5ADqIxQg2Pds0n1vXlowRgBb6Q^98v7<>fUdTs zG(UcL{P0`a%xU0wcojqGw6MGste#kPspAp!{(tT;rQRJudeqrYcUU~N^df0{oyL$g zkz$R>5?Cvp@L)F0n*@r`KD;b`@Z^Sopw%ymbM_+!VM(>sWD=`2nKV#2ERR(wNO78YwH1q}%xDuH z`e)iZ@rn1iR*kUrn4(E&9CXjL_)=N;a|VRWE2$EGOmmU$P$Edu7~{*zvr<{gM(n9v z-~Vvwp<%vt2%@!_jyUT&x*;guFSYIAyI))Sp#RmWsICM{OX)DjiOOO{Qb&>d76~L~ z$&}U6J7A+O=Ho4zjv1xp;$!F1b^7Onug~`oXXY)~u>ub>E~C!sg08VIub?i!b_MpOb5jyl44vM}eG!xfrkRbm1t!#*2o7!sf$ zN5r@@BH`Ym7ZUgxxt=wlZsWD1S^NSk!b9Vq$SUfEnni3|U7*svh0C^ix`1JFNGKHc z6ODxXK#y9weubEJLGu=b%rPSp4OeW{;pB3{Ynzmw`n%JY*Fb1AG-O5%iq`Mkv=;KY z?Zs`UmQ)r}k0_T_KTzKGtSpyI_p*{_dI280@e!_knjn!GlH1WxMFBI?Ol9c|FR#}r z*K1w~>IgxYFkj?~>F;5%=x8&Jv4b|+Nc1u5Dz9u4W`tHE01!H+PhQyD{mI35!H~?^ zvGa-yVax=e03ly3fkpG;9*f?@Dc5jtCi~0YUYKiBKLpkZE>Z^DWR6XgV!tBr$)Gl5 z!PFgHO-aLq3EBjzb-LtL7wYc;oK#U;AD5c4WFCAa!l$>Px=|@xSUQ}@GaQHBSu_UG zp+h=AVT|Ov&6XIl13`omU1rVKdP9yQhy8Osh;tof(Q=!;df5{?i9u3DuY-Lq^cvdV zmpK`KzW@2f`(jFjjArbO9?NDV2qsqakszZoW)rJ}1qi?0r`ELaUSS zhzsRYbapsi?I2?~3z@8x6_WS+HmyWBUj81|BTzX>nekJ$#3w?d*OgD?ZjSJ#+YiQJv45s$w2(3nxo~*n?+Mcgy$rOb(5;i~x?V zSfL3{l-89QHEEV95Gcaf6+jY_%Fv51A zCJIHAzX|Z+JXLw{;?r`q{J^k}2m%C&MjU3dXsGzyI#r7W( zyn-Zs8FI5*FZVw_y=oMVrtA}2$PJ*x5^`j81NpD-xckLzcQ5}*|EhEC)_l2RfD&$m zOXsq}3U7nS>ezJUX*J78AraJOs%#bkK;%C%^>id{1$UOk!?FRWfD9b46uT2?5=nJj zJuP2uV@7B)`N`CRQ!!&IgKX~Z;}@!l)Ui`Z$vdSz3A;Dh{>o+z>*^T2<@_8S8Z7mzpk%%xNq{@bF zSx`fgcx&snJ58?hhv(;8U`VQnnj%v5u;vaFD2iau9dT~j%^*nl^McbP$@{hlQYwp` z3zGI2m`JQhEottpr$3D(r2btgYcAB07UO4WcJrj@aIJ-Xgc!6L3>+tVXnd}Bzzbgn zWsHCbMl>Kqq&*S}iFXOFjQdd5ZtU2(^M-qsFZ-!%$$hR?e=}HYrr_CT4~Y#v)x4_OfFAF4VcH-v;GF8pOYHaIKN6y{Rn zmF4qU0AsGrot${pzFaAE+xJ#40wMI!_m7_kK}OH09vX(J6Z&UVV9^60F(e-l-EjPy zw>{sjdB%Ck2lNd3;s};Zm)^^_!l+p-&1A9ykGQ{le^m{YXmV}HQL9RcdMl^iG_&e- zRoxH>Yz%mJlS>5K6Cuhr>CGqL-H_cJ@y^ouVorX2+7t--6X!9=mH{yFAxk+BXad)! zP6E>`3JR2!v29!QajO6t(_wKvZH}2mrlLl0&9xUyxv262Bid8+CmRMfJfJ5oAQy$jDpmI_JMap zo~?ttveKh_OpLs)fHXHgnTi=HWQ32yfLxBxLmNWdwkld&oMTx>zz}-K^FELp6A9PY zg!#oC58rqhom0&%CW6C6*}r?kf4Zy*i=L}zV{6&`5H~Kb*mbok@&4BlP7}EmZryzi7dX@u$?H}E*6(5 z%}NFpwS;5FWHOK<5qR*%^qyoJ>m0xc-$2X5J~DUa=iDui9@BvRdIkNZjOVntF9Br* z?A}`Ft$?C62wD%Kq%AXwVhJcZaNxko^4!AVIz1cdV64>M3UwwHL@bd4S^n%I*ZTAG zS*k8*aisA5_3!+)ZLdqgAbx?9hRz{VS#tcx8p#Q7l#p{9=`d^hK(w*v>irPk4%-oO6*!Y%Bu{^{&_kP{ zq}airUfw=r(;+Z)WefqK^%w$02euu!bLn#T<0&`(D50Omlc%vGWbNJh ziiVN?DPXADDW&yEbYv$8Vgnn#PPGGa+8sU;Z|-2ffJ@cY9TE0W|5wgZqa|p%SkXW6 zbXDsjTRen?z?#^w>H+6E-#j1chbTV-K%-?JVS6Gau%OAU_Q;yVhZT^SwKMqCpmmyi zPJ*G(r%0nKq}bIT5wwRWp;u3P7mN~TUTP29hHBTyxMoGJC*9_u@0Ff6)y_(1HFQok zn$+rx3N?UR6GG*{HqW{=+8c-7S*z^!49T>Kj#oNQhb%12x=<9mrfpFlf(HF8B-%iq zOK1sD)3_|NCxQzt^57TgHwpi~Kq`cq4gF!MLR5_RmHwwoXPvW;F_c6PGP=gzd&&|^ zTfLj(TiEWJGhh^%sGF`MK{n8ZfVnt$}af#5NLfDkVxmCa3{In|QDg(8+nM9o0bY zzbC6(nOSo6Fd|jT zO}gnLp4>nf0(PwSX>?T+(k+fdAPDMAIb#GyCPb6;9L9puDugD=G~}Bhg42ZaXOnXH z6LF$+OIAl3M1S+VtPlBn9R#V#ojQ;`Eu27);;G})Wrq<3BqH4UDNH)hZI1yKVmwe? zKWqWDLf!ci{Zd1v<)n4=-`SW_anTYsY)#F_wkrpzABb_3SatUup$J-ZYI#HN<*7D7 zN3q>>fvlGOcx#jhB-u>qLmq$=cPGGXozi)|ZBuA3vRq1aNX=(;&4UkcLS>HCjw|qu zB5FW51R}u0X;maXbXEV8iw6J@sSdmLwqwrN^MDmgav`qVuE>VM8S<^HQdE5CrhNBqggk`{5T*PCZv-H-YS)&Tc{6WFK_DTbDy`IniW6rNpjdAX z&P2MiHKC8`L`s)6Xy|W{KPxq(WKmk)rPX6tC(RGeO@}Gf`ozn`sClJ4h(Sctjxg$= zf?8jN0&LN1NL3jy>A`b}1Q?RJZmOnSpWQ$z*}$&l^+1>NMDy_8{;A~`VM57NYfwVE zUdbs#276l(Z|BewydA5nBG>b@5UKuE0U|LX*|LC^VABjCAxBY09z}{fPQqi`z)#%{ zm~ca&L|~$i)$@c6BOZy|Sr<`Jq2&F>P()<>ppx7qc(Rl^Ij^18aZjIzc$2;&zJwBsoD88~&4F*3G#ZDh&BSP#z2h0d zLTaN9BuJCGsSGga*c=wsvG*B?_b8Crtz}DII=i0I{`R1==CK zrb~+rQes_Xz7NmMtvt7l{*4g?T`^adq|=D;!wHJE?O(dNyWw!5&2+}R*mRER(E<_o zh>)~H&u#RLC%|-4CE+R~PYaxqRBn1 zztBx7Gnuk^r(A%70wBj!hF_2!~8(GZ1Ql ziO3?L&I$$)ITC3#uL&Z15q6Siy_9Mav!vfBR0Jo=q-JUn%?BwFY*bKS&l)^jrI-la zMcW!*kYsa{(PSf0sGp_kgW=GdNqvMuYm;Y6@%hxjXIH2x{8$cAfMA>qH6oJgiU)mE zt})96tpw8S@w-`MmLWnYH)^hF^TC;JjdwRJoqch{BQqCTA04{V2J*m(o_XY34{Y22 zC(EDp_lOo1BHVoUlXcwBp`3?8jW%p4>V>SD5EnRF2r9w5+u_LQ$is@DTuA@2;CD~M8=bUWp; zAiHnc(o~~kqa&&$Jm`+nk+unR0-Tkk)ybym`;L7$I;6uNaRwyK&(8@%2gBqTx%Rgk zLqQY8I08P&Qha=TNc5T%$M9*e0nh#RUso;c9>PP69Q#r0_Jj}~<7ceQdYys#;lcG_BnZ(0?y9QN7MI=|aA8UH_w{%cC_PNzZ;H(_!EKJ3Dsh96(5iF!abXa?$pm zS~=OPES6JwX6;dTUl=r%Nz9YUpyF{cPLZ#?3^AqC)(Vg~H-txw-J0x}tY%?4RpO0$ znYD&(iNUAG(a+Fhfy>X^d8O@FUoeIU4;>5jL&1{8F=^k75XXgfuA$bh9U1|YU!n8< zHuab7(R^xGQ%ji2SQFWy8ovZ^j3V&j##f-#G*u$r*sOtn=H??zYeYfG=CPtvK$&!> zE5=SQ_gI4|B|;*_Qk5rZTuUEXNl~N*lRx1^!+N4L`oshGqTA1Kr61SEq0$T|H(GA(0}Wb ztqD>GGd2|VDtzni{oi`-xw*Nw4kIdPeJeJD3Y<0 z5_U5qkpZBU?Dk0EYY{ONVt9b@OqV^UfmpTc@^RL1;)zkujup{Sx=QfS$g|ei!p#nkbp%;52>^T|;;%Xdy(5C~Vz) zVE_I%ez-7q-Bnjzwc+`MY#0&0<$JFGyIkovd!OH{WAKJ&%RW8mO?R)lTrIEOL-i1$ zp?CmeBKK;I!xiD_if&ol*kKiE`I5iC+=2#c%;{D}g34CToluB!!P!aPxwd zl4s8r)0(c>Dk`JTEDulSOt*_u{cD!a-aV3T{UTw%Ce4uuGA8vMn`R_;8zv@BT~k>xAD z<5`C#Jo8RRQZD+s;?wUXD;HNdhQixb$=TJ_1BMVo@&{`=2oVJUp^6fYnEULnWXoicE*~5sBBD3|UQx@%ms~zG}nsi4;+4G*mvDnn|_{Z7zTzC=u(-%8nlv z2@;7b;dbdtrNlhIN+n z6sUKwYl|Y7M zh@7hD7tFWA9U~cAs1qKVolchtHqNr4&Y|g?eKSFZ!CC$=4I$!ROoJ_X;4Ss+E9Tfz zFAw`#)b{?P<=@c> zvQm+L%5**QZIP5vts03Np`o~B+{0L%7=kFPszVc+;#7suP6ci~MgCsS6*R}pCwY4)i>+rls?}n1IG?2KKFKUKI16n^H@L$^#S>U>>Isa^nNH77`4V1daKWY zqGTQQXqv-Hh|>WgYa$ynZ{Fnid94U~MoLEb;p+R9bxjgqyg3-dj0-qh4=eDbLnqgS z92L5Bd4b2Hx&tIlY)}=WXH#=U6G4rVtU8WXCr;iYhEN15S=$xp@qUo$40LGY0-EhNqWUiA6hOi2d_%bZH*br43(T!-AN-aB^&oLVkt#4mH( zS)KJTWuQswERA1AeVV7hHgB8)j!^dW2noZd= zTJj-F44mP+W`la7geGD;Oq!t#-pN3s9;%GZaw-D_LIM_S;n$B1YK*o3BK8nXQ!=Y> zg~Ms(G+Klq(~1*$il5MjS z>$r|l`9A0WPnb=OU&eL?w<>a&OB+kvIO2v@oV5U>oxlg-4L44G6fiW`Da~BVe)|$r z%`HU#h&4CkE&=>;*9W09l=;hK0SC+oOC&HUftrPH(py}z>tQyy z{^VW36Y%oN7>plTH+NL$$HJ}<_H43F(+vP#_p3M&T4^?|8#;-MNnQM!ELUq>YtdHr z8$oL@lyK}50E$b8LHteS9mX8Q%V4es#;%C97 z9VehE_>w&5rto%IuX)8O*4hq?iPr_GC7FkMnxMnLB{SFH>)0l4WeyD zCUoKGJ6s;OQ#+IM5AY@y;TqpGKO>n^`qli zX&wv8hnIAZUsyj$tb9OG3?s|i0Z2jl=!){u`L&ybA)Nve2-3yk#4BktVJR^|FScgM zB7DCwdaQRiVi2K=grW0u+Hb!{9+}HOq1sXUv_>$F-8X&Aw`sals2M^GBgbsIkq%nLe(rY@uhFFS%)F%pvSoBD^kD!KP2n|I>Oov$Y&lyEY5LpBtiy@StLcy9nY6QgsgoGf@ zBn^5zHfRApT&#l(pIE>Tr<9LQ1PBF1bknZHZhIkU#IuhlFeWC(B;EWFr%v93dnODm z%(DZ}C~^%No*9kZE^Klp-z&1@pfzpIG`-4xKagN)xvM*>!M@?I$)cfCgeg&It0RV` z87>)Pi{x#_{f_MafP3MX30FGg=E+~9xkx$wM*KT{-ttt)Iyn&foD1lmwAdDdTqR{UtB$L3|BYvM zP(ZgUo+N}Kaqi5vsDY-Uq?dyzG`>4j8;t~`CTHYx&-Em@2q z)9M&RU}y*+&LDRU1x3WmifD+TSQia3gy50I4jK4K=%JvAf+6F&cBXI~AL5#H&JvT) zLagYEHHxgazv1{j#}?-1+k2?R*NJ;i9H=S$%wz}-f+G3!I9w&C%1nPqpV45SvO5OJ z7QVbw!2qb$z>4BQ1SQclY=c!=Uil4aG3w9Un`k`gP7s^X#R@3ty-KWIX5D~`CxeY3 zgRVq$3JqpXn33ct=QsgN7|R$B17UwBbshi;lLxG{WHBwTF7E=j)@+fcA&G88r$JS{ zr5n(}`GXG;1Sr?R5H^h&9tA4*w?A_Cjsvk3ivi?&KoJ)J#4E5N&e)zMJbOq{f)hP! z1SJ(wuxCTBKITNa1ba3lL@|nDVm$zmZ&pP&3P-wHynHh;3Kq)-gm_+qA&$|~A%n_$ z@y_3K>%yFNPhC4$V8-~W3}B(M(4^R&Sxt|)>pE+)IADj-+L=gNhiX<_e70sqOkK3* zi8TL17~tPAvxr4hia@gqP3Db>j<6toidG^X)F7)Sn^=QUy8@spB&!iASx;0CX%C7y zT%TC<+G#Wgp`U_rRLQmMhS}T+L_h{`feWk~wA?tXj}?r&I@$;(?cWG1ps8F}o~=%0 zk?1$QLz+efikN0bi$?jo-S?Nb?cX6yWCU?>e0oiqV_ksAAWHJ;7(}Qd!N>@*7(zp8 z9U%0$0Av_}pHW(E8MS3df)QD)hy);=Ajseu_6PwJu6hH20Lu_gK>_I;5Fv;WyB7Y7 zw0jGc?jPd-u4SBUUT3znW{hpnm>JXF+kRhMIOkBL?eOMQLMXXXO1pN-1?qK$C@v@> zu8>6BNJ=gwg(P?6LK5Y|h41J2S!ONo%LTugwbrb|p8fakujlzb=hy%A&h?Y_S4S;q zuNFw~bvINPDi)e3^V208TpvhTeLJp&)gHprooAcCN# z&Y-8Z0YQwjLeN2-QDq?XoPY=+@*c!c#a#zFJx+d=o!8fM_qb71O%~b$SXuHI#VRuOk7Dc7XQ)S$l>D%?$S2*1Kx*I7XE}k8%|>33rq_~e zC7T6t@?=tlEfR?&ctAw}?N8v&hd2@JkeelE^0lOfO{w z$}9$6fC|M%M)h%fTYhsmk>(Ev5Pj)5DvWb*76l{!K7df{k?oYN-V8TxYPSB(Sn)}c zCVrZsj&Ae+XC{q9)K)X{nyGPp{gV%U zq2$msje6=Dvass5niR4%Z3aLHk!Z&~FcgP%URKE=?-E27QbZ1U4H%kIqNyl?BN!r$ z8nYe(_!C-Gv3vNrXW2bY%~eg8kwuY1KmBB3XvMgf&lu<7k*;U!9Xq}V5rVV`b)nb* zi}9o>mpX9Uo-10nfhvU7hf#ih6j%)O0!F0Dyc1W$wE4cFabx%pTp>g}Qc@(df+({a zlv&5;gd2?{(UVS=#dwVe0)y7;isWIXKb5ozZcuKNIUHveJ>`1tQcTDY4`P@ytw+!Z z_6JBwcH@|iDAL$2S6obb{?f0*h?Zr+Vb=O8rbz5$^{tP5_tu}j`n~5*5Yzx+K%T$6 z({MCMAsY}iDRj_gc=g~S$}SiRDaz2YdkCUh4MQXIo*Zg?Xau6Ebv77hO$ZGOe2P`K zHJD}hPJbN~7|GN2gK2L+_=ifF0TIJYeydEhJsjx32}wZ@#Z>@8YkGyQ0TO>8AG8xC90=9~WjHAZOd*#=CfKygj8FR3d?8eTahJ)-B5hbJFS^7jg;9t&0 zZl7m}O<>Ma7YU;iz|m4hjz`blj{KN5!aVRdxIKKm6JkLW=T&pDCTb7at;t#*~^aBZq*fS@%;v zT@ zU{fw^q4$7^n976EWd$KYla>U_S|^aA&9dUH4t~@!C%zkbw0vrm*6AYAR@<^{x*3dx zKQYgEj1;Y&o;~Bn?ce_VD-3={TzbrcaHHlQ3Wk39 z<6Cz=l7Q<~xrfTtNF?bMtmonmPSCy{&zQ%6$2vd0TnQq%G zc&S6^^M;YH|DHUfoGR|vi}h>$(dc+o;)aszjAcNoH`UMRkWZ8x>ykw#xTH>zQ;`Zo zb@cYsNJiRgGx_zYcAg>+8PwoGbuq(~y-L@f6?nEe7ewO*N7}t|FAppC(761*r&}|B z^}p|DB53E)Y39nG&F^>T&bQz8#dwF_)xdLD6F&WXgy?Q%h77e}M`<&!g3rGM(bUN} zSoa6p-Zl<*AVbq=2l$LY~kF?;hDcDeJA^(NpUc_dNqHr(10 zs;lgS4_d`5+ui5LF=K`=LZfkVx-C2HKttoX3G^#EySg=2XAHCS{hX}3xs5t zAukW>U)QtKrC>ZbkWknT!@jJyhNg5gCVOsfVI}<9GwIlmX-CzSND78rrRHkHc;r#2 zwoGfyIcKv~ruNJ~efhwjeD90@Mu)tnWXP+n2BB=GkYhuW;QcfKZ6t@r5V8gojirz^ z@-f2DloLVFpwC9-uHZ8;Ywzg*vjIGlsnX7W`r02~K487VsbiDPAtfjh4rM3>G@&<( zoGWMrgX9aX+=$xJ?8B5ZO^UFWe24~9d6t{%s5KQ@;4J}zun3=SpC?l5txakmS!cOwwZ`E$q`;ZrEeC#0w*Q`2H?lB)*ktpW(Y4iRF} zpAtG1GRb}TqMc&VZ?`uCrwZvWf*IaQqE2`uO|vzjiccVq79kRp)zX!p@;s*gL(b+O zIllF~uYUgd@AyKYXJpz#koOcrEo9b=dkcsTrajYak^2;2T5#3OcDkRG01|h7(U6HIjmR2YjI8?sKn+5ZMA#t}b|BSb57AVaS>wR#h*lFs z7CJQ42*Hj$b}jpUnmavU$17`SK@<$V@27WQNN-{JL-Tb$bQhKU5l&1hh2AgUip2nV>p$?DL`&XYOGq;&>Lc@5?+~fxhxpMhBxmD6vcMY z_;qU;8f?n(--n4)s0U88+0KOAX(Onqc2pH5%ch_eMXH2qr5mx!L5E_xd6QN`bYhrs z0m=N3d4CgIUgko?%QlzP3?&AF4-r&iWi`Ss!1YX+kh=V>$#!k3UHajDD^2Tu`;)JH zacYqjeui9F3y5H7P-YEAO#307(K=x0K&b}^SudV)YNQB)3P3h%3Zf>720BCr!4MD) zwmtCJ(O;9S>b7$Mg4#O7JKpot>rW0C(n1DxR_t0av$dw$DlbSMEK1MSyU*k>dNq*P%qW{J>-NF^=qyMUl5k0 z$MrCl;gTc@gjiE?^_=R#h~PM6(<1-r!4NYIvQBRbthz4s12tMcb=d;+TU{D5*(}(g z-V7945^uB}teplrDm?hBa~*u5##}TlA=X*IWC9{PE&igj`+f5A`=5Eq7r*%2FCIM9 z^w{0;R%;ZwpXM@7K{Se&S%VasSZ0GV3w&M%MAk@~+1DC`kd#K88g%SxjakrWp!8sh zStBYm%vwkuek>d&+j8oeLmeqdaYPSl)eZiPj50=*l-5k4sv8(%~tZliop=(AKf5qN2fJVoe)K7UU&M;+kg4$=cCa=tjA)g)-)S-$g0MxE6_3Z zQ$x|++6<6Jc3IvB6Q43WP@;nz@*1%s5V9JQz-E9@-sAaq_QR|xuyImm{n#{n=ZnQU zB8O@yxw^sC5n&XJY&7=Yi+13^T-fXI6D~z|82+2$GX86U_DHV<2nnf%lve<@=9*Da z6}Z(rUoJ==8kEZdVe9-n%bp**QjuUVJiSn-Y?cdL7Y7N zg+U~wQCqI(T>l|0Vz?#w3PESvBJ;r-6v=H2LgN2vas_*$#+*g7x%5;seWS#@nyCkcI|C;3O7bo+N-{o+@i``j;J)CX0T*WDN*ghq@yduG{?5DPzp zAhIT$dJ3Vt17^I8gJm`jyjloRA*pH<@gj&}oDoIsh(w&8}tw|M>dB3(fR6 z7b{){K!J~hG#^o64W^WYVLv!^B+)uewd2o;3p!+Sz7DCuhW6cVMF`a_j~%}cITRFD zXOLDjZPWr#ME$J;EnX#Q0Z_8rX^OtiPe?@LVC>bH1W^1|`G#Qg?CJm_?8i!~hq~0E zC1Yh;3%8O$%PNkp0GqM%L7J+c`{qqg-2|2h9g$YSk6tu(Mw2ny^XOvcr%v-3ttNO# zrA6*jUU7DYQKLkyfBE)!c6zoryZH2jZvXu=UwOqBzc^+?LtW6pt{>=7-jvzB2~msO zk0P$4@{YE9*+7Uu2#W4b{+L3@f*^}VvjsVKd4#40zV|fTI{1gadFugJt0CrEu0`q8 z_z)0rTnm`B*@xYxN0_VP8PUndT_6#{lS4DGYIF@k%mpN;+V{{{fnv?*C;`G8;u+n{8i=nnp|NV#^vDfaJv4fm$~VO;_q%|8{x(k#FsHSCK+%;rew1NgMYf z%qjr8-wkv3X|!r1vyh{amy$%i&H@A|Tep&pmZZ2wwi*4}IH-=T-04ovP;+;$(4=`W zW`JVliLZfaZ)7LOx2qLq!{>LQ_a-|G>g_ z0S}5lF3gfsZw$!H%Hx<8Qw9Z`TjVaG5>3UQfm#Y0l9Y zH~|b0?YbVef`h2Kk_4@h|AtvFU8C!w)ROA=f5_+HQc*L=L@n6mvz0Ajmc~??LEpZFVm~ z6!=(!j|hHjcdbeMelW=FhV{$%ht5)8rdk?~T8$7C9E*40Lp+L{ItqCFb=~2896W`X zjQ)75MdfJ#NQs($uajPhH5u2Pre{)pFz-+5gnw^)H17`HMDLx^rp^-|A3@{ z4dqyFBNV*yht4Zph7Ro_hGc+fTgERV3buk6MCKxY(SC#FiI|R2d9e4MIRvoO&GXu%;NAKi!$ z+@H{-f@B_viB}#WxD)57zIuBCONeaU^7uQQhJ7P>25Mbp8tuY`gg|zkn+Fg zUc)NkS5f+iAnE(Xio|1O!is|!Z!eN6B)etov+@N+mR#Qy7-T_YjQA%Gi6OH9Dz^>x zW$&X*@l^+Bfohx zS90?WfxbYaqheJ4kOn)-XCE}N!~{-N0cFpXSe^31u7d%|)zu+Q1YeSf4n*{wM5A?= z@5oyB!t#uJE~FyjaY%Dd3viHHl`Tm!spN+!X+`AFE=o@1<7{=~*5iTb(RWV$bv46> zo&bZ~F^YjlD0?D`ZU6M`(}M%aK=5>vw^8wiFe~}Quf3gdXqQozgColytk9k(=RAnQ zzbP}OSsN;q<17xNzG3Jj4s;BAyV)8=mKi5cy>tEj7rtoBr<)oZR;jYQ2R=PnQv?Cg zK@QmpLj#1o=`!Jt38@C3yYoM6jaL7wF=M%^4}Geq4fGtxT*!w!{*A;sKDoHM!l_rG zv>J3`D0?4R?sy6p&uWL~=C5Djdxp|{K@gD>VK!aNERVAA-_g68*Bp?m0rQ$@$ z3>miCsM=U(n@6NjV_mCz3|uyWkJ8sZh|$^ztOGRol_BdJ{c0|x`HkBJqQ_i6#ORH5 zo6^lpqJEq5B4VHBd12etQK-=*agI2o8^Y+ZB~i5{lAM>D8W;l><~(r0hezI-`JNw8gJ@~IWP?%`vMoBF5E`&;5= zG3~QdChjVsx-|_}qF7^FbHj|p2$L9Wq8FsZ#<1yb4?G4XDG8UQK*pyF)R&Ti+m-o%Kl?BoW zHX!9Z7eaL+fckvv0xO#Jkw0Bht6iT7jP$cEs*FTB>zHVS(ec%%zjga>ANa+)2M|p# zR2gUo03ASN10TCX)OE-~vy~vaXL-j&7|~_Z6cCz>lhIOTHm)yA>&2D;a+o}2=4Oj| z*hcE?4fxRCt{;5Pv~MdkI*P5b7BBY118|gO9~kL4EY^`%%j)PA22nmN_G8`6I?q2u zG-Q)op4rG->lbEpu)0c-(M;vn&a1Ro3zfA&X8U*(C4aEMs(gsCr%AemH&SUw8Yy;r z2oPmG{_@6YKe&I4c+~_%+~d@RnSNE9LlD6ACf;Ne9#9r&>%P#Et%&7W>`O&$dc&O_Z>ol)C%R2t_{V)Ic zfnPMk41@+sJ+zLQ$RXQ7m5nm0?NFhD(WHlr4S+`0{WRzbJpW!L)Yi0j1I*k#(|(A% z)~D(ECEGsvL#t-oA0-P7l-7KVQl1H?*!YnziZ9qpbH!$ReHyj-jOv-61vi1i%DsNW z4b+oBFxKrb!s~qC7{)@ld6IZ9q1j*#$_-o6*@Hp84)5j6W}~?- z#4mDIMmxJq%}}9+1+J9z6YIPZ+hKdl6Xd`}$40 zO+4oJzx5~}dfesxpMm1=8dlykC$$7c8cv#7Py#fBQy?PhRbFfoCD+d-;FBxcK5`2& z@&qx&q$kc8UJ+BF&93oOKe_jO7o|mikz0Wkf26Q%&tDyQWlmxzQPyBhW9sjBdFT7z z`wkEl4^mk+3fe0!o36m;(Doxc=|gvkph-tGYr?AU@>Y+f5N!rQIt_c{L}i^#2z4p1 zfl(iv)!Fe6{kZan&h~8kti2{yX1G+_hE|wZ@z;g0y*#06y%{fZtaDyPwC&bfIME}9RUo9;2aL|VxotSD%KkDIg} z!kx#=@l{y1U}xo`cx(flWa}LG69bZ)*pf~T@UxU5f;*YhFrOvP}NNH1jg)A9V9d= z%ZGHy$4&)Mp3M`CDePEoen&7#sXi+A4jd3GgcZ%EC6PQd&*jd(<4X!#7Yhls#(?_X zz0R-5^ORV?TP@Nz)L?h^$p_rWder5IpR$xWq-sbUpR*{hRFI{dTB>aR{w5{{TU+rC zPcq5SB0eGKo+}RIB+qA8G5l;NOn(|q5^VO%WiM+#7s8=W=F9LPs*i;hDV471J#|(t zqC{eQ{R(FD{5C-mtT4pUYMrZ@RF!(a((|nIgPUIRhCA31VdUFZ6%dI|+jKb(LY3TKWUF** zz}V1$Jdit{OjWV8T`9IYax`javwyYt`z(ZjDl8H50k?CFZXo&A(ENY4msM7T-uP7QX5Wv*OGha$-P`_ zbs#MvWK*`;vMY9!SZtDu{`i3(MHEGwS@(|E+f7BU(=Fn7b21V@0VBvC|C zA2D@&zKp|2K>yPpzV*7GXsGBQglx|{P$D*2J6Qjr(sf#;2t;>f5qTY=9km~mYHJ9) zW_OqmJSi(1)EW0eJ!!`H2Gx$`7>?Lay`do@RYbmDYN^k z)1bRZB9xn0naOh0`!n|z!K90@BJm6z=R%ZfHtVCVX2;8bi@(}=<>%wq?0o?bSSkfq zbjZk3$FsAKoPF!l*LS}6!q<{Qc+hBxlijm*4ER{nussM(j8DlZ#wH0^*kGVO=J$2*gQ_TjT&$U3UHS(W{iwN(?Sf()^-npD11CZyz$7rTg( z@pn}hhJ5!CPcqIh9At*vXicihheTrGN7|VqTpANG*{LU4_@NEzfkJ_H`AI@B+E7BI z6p_B)>o)_;vawld39u+9+AJO^{%Squ@+u&BMg?J!Bg5?RWY(D8=T8j1F)UbGKJI;! zYq6EoQ-6>nODM(pN{8|t0gcERccVSIl2^-=4M_4nUJ7!=0miuJ$L`MyHp?6$p2Dfm z$)!Fq{3MPTW*>x|ojv3urw{ntPhR+==Y0x>@{at0pd5f_94H$s`|M4MnI;g8+|;(a zJ6U=Cd%$%JAg@F7m_pDbk@_%J!E!RT@9DE2KPx`Q2d4kfw^Xo4m3;4c0jjdiWE!343rL)E zDy{D+wGgI!;9v@NdP=PihUUTB!XdoKXv*_vUf%yFZ~Xa>G9&6!-ywbu1=7}`H}#=1 zs1`;&tRRRAyDK5~fT2Stq6&9Z-}7L|##jAL@7B_aGGp8`@3RU(Im9BfS$9zUit7w)rnItSa-^cSuLtR9g zX#U}5&a)#n!i#-ZvWr60^8lMl+CG1ytPf9{0cf2oT~ZdSsNzO;xQSL*ife6*Fr~J$;ap^Ze~|H%mA**K@Cv;lW$P z5UZeSXmlquEB8#mK92`CtrE7IAa)>}M8 z=jX@AgwgqN7nA=Pw?6#FH~seKKl+iuW5JHhp@Uz@0-&bJY-N`*??pPiCZrmcD6>)I z6@mr;S-}r(ZF*3!W2eKXPlckFzx|uRP;lhPfoOvm&CKS{++~adXicSi#?#qSK3pw<==G^ZlL|l7A|2$H-Ge z{XlLa>k*gtCpC=ML}qxZPg|Y`3T3lMfeN|f+}0}V|?dp-~YEemp4-2Y6@piBup;0%7Z9MwquY9VPL+k zc)lv7`<3w7o~>OKgcb&UTAnjz^jS7q7JR$q=ax%9LNl-C1$}jhv!~e3FIV97PX2k2+(JaW%F+MObsF zw!=xQgbiNM=^R<*v#L6SkdFG2D_uc*F|?k@VU-m|NGQ}f>L~`@oM8`eOMasENY{0t zNeSA|lFmqtg=XieP2T}d{zV8YY!B0%pX?re>w$lL;j3Tu)))QA7`zfXYN!8-bxsG3J|YeYMcHbybA#r zz~y^`OPK;tliy3*=C}gFUAT2r8GUwihlOuF{LY>8T@OT<%=W%Y3v;I8u;mnSL9@Gp z8(OU2CKn%jmS>L~AwPQ}wv6)zI`2n~wF zfg|##Jk&p`NQXdK)?r$Q_sBO5NKN{@V4Qx|yotmNt(6aCPL$U-gCFVB@KP`eT0q1W z_4O=Z$(3Av!;e!I?d_hFkGH@4rN8{;```cCzy0af^^LQO8y9JE9t(fHHgh3*OciPg z3*{SuljxTER&76ayBb4ZCc8E=0YMhIb=;tW)>M)Z9{?n93D!UbnBX4jD)oLxy0idU z2#yj&-yTinw;PF8{HnxTWX*;8Hc2OfA*fmBp>jd&gQL}r>oU$-TH3B(@-@msd?Ktg z5d;{tTHqrK)+@Cgh_Y2xt$swMt9r(q^lW;PM_?8AQQp|K;8=+j&Y&*9LYbIlJ*@S; z`-D&S(8SCSvT{m5bvn(YC}D8~M*Vq={rlaxb9w9fPrv{3-+%GuSN*E3SLbE#^?$9m z{_11)Ss#7tM?d=OY}x;1I_J->ecFMs*dzy0n{*SGK7`1DzEg5MEamUm4Hqjvrd?nW9GRg0-ZkmI@-h{4p$tUn!tNoO4(R zTrl}6Qw@oCGk6mU+3+gyF9yPSC@uncYv0FDDKk6h0WoDDkGy1@5k&WS%&i;U?53!e zybm23xDCZw^|}||P$wp!=3buU7s>)A1gNxIxuyf3V$}@%Uh03(r00eX#=6-u7zxv% z1Q%EoPf{PPwxHX}q@TH)^Q7n#KPTHw*mM{07@JI~t&cO&YIU|e-BK8qU)8Ws0dEy~H%`b9gBWE60GO5FO zD!!zOhV)%%rm3ysvoG_HAX*@ zo$uG(x@&fsy0l2F*#pJ!Ey*AcdPah%a3grnWZ#OwmOtpJq|X*0f))&$enWkhRzAcg z$DVN~%qqv8w8DxoA~)Kj%aS&apa-_)uU4y}$ie5sa>*V0I(^{ydL3)|RKRhxe4FDGU8D27 zrbi3bvddvu_rL#x9{H`0WONzlkQ}rv(C`wiPBw8BcywJaT1vVTzyH)v&c58C0Z2z8wqPqmgilq}nntkTqv>27vdg*Um zDI~__cglp&@80)yoC>*EdxmrDpc4IMU$b81 z4bfqUPcEF6{7D0cTq~nZT5IVoq^ZA2hjBUD+&W>u~y(| zscI=$$jt?&&0$@a^;Tx8u|oVC$M`=zHTCnqtS@EN{;uoxhpUtO!#DoyXFh-0%7^Ky z>z?`N+3>Q4+tYP=cD_o8gGB~C>b~}hI#7(z4=O3@sx(&kG{2A{jMWZYvNhOHKvWO} zt+k`ZklH}cEInq*>PL@yEQj#YxP0w#Ur;k(=6*~vW90+vKx$90$xhM_#4pYr(o7ma zw!Ug6QM4#iV(0>~zJ4_PG==tnM<1t_Bhn=sFQdhS{8WNQ1&1()!5v{eM9<7g4mr<@ z*kbdal4Y#=J~x#g5=MQVTU6p=PoD)5Vc4#2Nh4~j@1wPVrc;a`dVL=aphR+DqJh*VinYphH#C!0x>zS^zP z5@hKm!@*eesH`;r$^~i-8|tFT!l3)|41uh8mobs-P#_reQuV`T$Z#(g84dtGc&sQl~zuGy( z`qh_X?<5_;wOGiFi#!=Noa2)sO^yPEo#1=E5r|!1hvI<6Di_I|J$Ub~_WPr&t0VC7 zH4#6X9m{O53DNFo&!9quRuZPyM-`{<%STxMbsLj=<$;Mau(NW;lq!%U9fQQ^#6Jj` zgiCu_XS-5ujrFKCT5RTie$8(QQj&uRx|#%|(`SvyW354G7wGwvrxb$hQSntylsFm%Iai&O3Rwl^qYqV*x2p2aSknGKt!?iyob7Aw z_mODTJ|M|!1wBVGqVf%C!$~y^2odIT&`KzX$ctpTN@J>A@jzHlZgVBV**ah3=^zu( zR{rBknQ`BNJ{x+y!E^S3N)-$4HB$^w#Jh5pDdv9+DFQ%=%aTQ8Q=hG+8U1q>K7E*9 z&1yr6m{WSAn~R?kG$#GJ+tYyy9rj&lpzL^6LaP$1@76tev=jJ1-xY{ROXIEv*ZcEV z59@Ka?!OEtlJFCo3|eYhg6`(qG>y@Px-}mHlWnEvSMc4)yKUi)EP^h!RpUNE(l)9n z$AF1=Dg%CzYrR8v5+|iRkh$=g9P5sWA6q_>Iai8-_ZU#O%z+5nmR39OB|Q1|Xc0M97q}_@+*Sw|H%91F|8Yf}m;`x;x^pK9Hiz4?pDT zWsA{Zww4VN!J(*{Ste)bCIO|lnQS2nsR%XC0JEP~BdbW2j^$2$RdUBX20rDqByva| zKY0vYDquMi`MbqE`dso*kkWVM+!lz1I>fq3riWnIG9HE*xp__|6&c-2^y4lwPN^oD zW($o_LUkcgZx>wfyMWV9Dd%2UOfKaW;;D;WHR336Jt-*4pTA&IDPzRaJ_y?7YIeW@ zH`?{WP*Y<8Sn0xC%N~5Lc1Km+8h+yo{OZU7tAcfSN5im!N97)A`WSKgg9NH zDM~cv#iWrRLHHgYVq}pM>Vygl3A=3=Ze&b!n;vdFDjvEAMfqO3_mY^qsbv}#6 zia6sX3g8ldwObe`XmI5+3NB_$&-5U{CEP@kZ*V7V zS##5qg#@)0>n?EM=shx30~x|>)}@X@d&neW%k!8~NKZ~d=P1B3;;ujzl#ygX)oO<< zQC?Sw6eLBV=2wMq#W^jS<3EDOIf(HG&SsoFW+VAtwxedl)KoF*bv70`8LV8D?`Bhz z?dnIXGO@K3=_tGsOa9N&?HT7qp?M5CJ_ClHC_k5we;2==Sg*03GIjlzgf4lN@7yDJ z^@tM52DyGDfbiy6I#!V}J`+y%T!gXcXEFNGBDX~6bEgJ_NVHeTNb8~IODKzB4?4b= z@+*==5W$FCe8dU8LUahG9+_&2OKzj9iinVHextysM~jX_nS>nY^2sHUS8nJ+02)cv z9Zc{^Rm7?%V5&Caf;>sm#gYI0AaaqoIpKjh_@|40yKp^B3c;c5itRKCr1%ODIGDld z>i7ao)^@dM%0TWY*g~Ol;>K};Vpkwo-?)P#V=vv0$;OH$V9YYxlAnM^{Aw7#*yiOE828acK#iDy-0eTIE*j zcVzP%neZ@?GRc5RhCDCpTZbTS;$E@4x~r<&iECx=wb#C!lS^#EGIYxu9INu!*IN#ha%0x{Lvd`fmW+KGfb!(1O$BYR zw3xOFoBbbSPF=#gYJejE0IeVbECNL9zQ-qG;-jnW<^mlI9DG^E4q2By)~gDEL9LwL zr5~})rmp7IU8m*2!nQGcyE4^X5wfm*FG% z-hnR}>ajIynmM3B{%H&ljxpG-V5(DkTaLxw5fp`1NQ)Y4d(r9!*SG!y@S+)3Jr4VH zksVB$k!e14P?<=Q993t9D2;7JKCr|g4!@Y^ReH-pjt)Y@O=xCR3(Z6Q%lq z12C{50yF)yMbt4+J=p?kb+0cksyxWj!~k>luxEh6sq4{*L74VLjFKXuQs+g(x7wkV zcb!zBS$8HO8`Uu~&^w8>fNbde*p#b*T55~J(Do+}`#TujOeuV7eH+3JEbd*iZvGFy zlEE#4t|JMjSFTr$YVOL8!2W#S58Q!)@tym?YnY^(90O0#wNSGkurn^8Gk}2+=tt)s zvw$}N)#cXnTx#O!JR>k@DPwkW$*WR$+$S{73G+K)QJMm=+0BCuLmHDlbZk8yqiHmL z?)rUMu7BQ3i_-wTU4XFiu+bn61Jx+A9% z#z$bjS8+52k`W8$wx(ZKLBWUb{H}?!zY6j@OK6FP!Y|=E)e&Y)+zWzK@(aO0Cir1x zWYttpH-bZXvcKChacF6aI1eiHPI@eR(7}YZ)(zzYR+pVQjz@(X_QJ5(rm zo_rp??W|r;83rIKkr*c=W3n!qFIa%v`6)n@EnVpet_SCmaMA$CJ5U>lzHTQ%d>(lF z@O~mRC&y`m-ZB}R_}rj8WsPPhjCM(@k6)wAf=6o$!LE`tud-d1YA;cms5~@k!p>yM zu4lM7)X&Pm*8j{-%E#TiF6?m?iE;HyMTj6p!rn$a9v7)z8&4vmwRj}w%mzRY6v^Iy z`t3LVW_$mKk+Js}i`;#sq8HqnQR&=reAyK_AdH{&UrRWe9d)L8!$x)HjUYt%VoS-e?%Fl;st6X2LMvO>a3i3EG@4m== z5u106S`2+3E?#oHDDKRadZQSlOeYn2j)uZtSiH*%Z3mg^Q1M$ z=gAG^_)DSp9+>0B`s5|nh2Smf$n_NlyBno94)zAfdRjHZYEBgcCTCLFE@QdT0Su73ej4BAm9s?7U-{PM#zGYoL^qg6Rr17 z_7oGO6hyH97Gd6|Ue{kJr4Cp;JeC4^y*)5I!#NUlcR2PbLNy3s**gsz)OONQ$+%e$ zxh5v0d67KL!;=|=^<=6d$tE0wgCd?CqLA+@O1SA|cOv9w%J{Jq;g+hfu9) zjg2!?mfXWh>@mX;@>>2x_6hnSo-919|g;t+JxPxx38doxpG&!56Zyqn#MK7Gv z;DwK=pLWQsJh$VuU^>DeMo zpaMcy-1;C^>$~4}gzLz0+9pwfrqm?9f4K2{or$fH2vgcR59DwC*>}F~0Q?7E435xU z!b69=Wj)D_0f>GO7vMKttUO#QD+KQyAFGNwFCj^O)_!pxV;jtG$X;O?s36s%`CusS zZyR?}j%3kT!*K^@l^EB-SEHQCBB$WDjTj?98l&dP@eLGT9r5vgofwV!mr-);e|4_8 zu|P->vQq{!7$ZqWMlrWy%Z)ib=Y*lnfyuowKx8G84qo9&#zzTwu^)?XbOE5H5VLUP zL+z{iSt!u`X?4;YpZ9DT;DKGmIw`qFUHC;%8UiLUB?pJGP5rO=w?NosCBZOy-m>Vs z)Mm!_7?+>p2>H2Ym{Gl(8DUD$@S#B4hr7P|D&Bb3IOV|F0diY7D2VhLJ}Lj z(O|_$p^_Hub)NpVN(?94mYYNU>6FeGp`5~>{4AV2?*mGAUoXe4Sv!=fV?T0@u` zg*lQ!&)LB9aJd=`5h1SAxj<%>L5K8Oo~siQ%NgH*`^s?TX{ioQjvQk*H%25W#2a=l zBHMYW4xn*mI~x!9zB+#!V_fHydm|~3z7Iammst_Ft3u2-n|l0ooZmbC?h3=x%~@r^t1)`nFho z$x3_KIJYkl5-7M-C()D>{3Oh@|F1E! zIFAG%W~=ftSskp}k3fEuXu$tK=<0U7hQG2}U18{oRqmF&qfh8SsE!uiXOB4zH{K#v z{#7Vf=NC=lb2iwNi<^s)`wu6OknWfTlnVl-!qtJ8XS~Yr>~FvSo7ikknEC2eQS&Zr zddP0joge0}tkjAtE$r}QdwtdZt06~1HUu?kSuxp~Rn*u_Bao61{)t(U;H@Iflcl4q z)JP#w*p9xlcVnBuPR&&?Ft)I%c&$OF#-N#>Jc+cF*S3+sSdd2+VwK+FPCp+w&OAC_ zZfBne+XvP%2_B-Gvc=KmO=;hw=355J^(FS=^tQbgKtmcdnU^8c~ z$ni7M=cdPu`9Tu@lC)MNU-LZ#1I?od%vB6gcxL1$ZfPl6Y!E6>!@#BuSfBBdaxIPe zz`nHv(3=ahkBs)=<9GiaL*_c%mq)JvOsV4H%Ssu2K~Tds>LM{HM`1>YKM6!lqSlas zL9biw_9JHkq2yYd>X4Q#^SNuyNF%cLmrdY!i3hx6MT(NK!B4_Cy^J7-M~`yv_QvKU z{i)Y0FPg<0CNu(0p==<S5kAqacvRT+hlE zH3NaUq?S+Kz`$H#d3HjDa6AW*U0R{|eVUb)bG0z?7$c}_<44n|u8UQPKE+q%_@E*w zyXDG7X#r1gmgBRUL1loWwrT@snsuJrUVZ2NPyZf5x~bm)O2owS8)dTJe1wHGY|xFW zJT^fKVXO`M9Dt=g}ruI#CX&w#Ko&1(LEcK*37( zD|A2Q3EjsGz1;TEoJWVCu^}+_5PQr4Hu&1Lelj6q1B{6zfLar{y=N8myAHG2Sh3k? zZzTLu)Gy<+aMLfS5shJvZBm;O>SI+}x=d zht+8Qd}uvvrt_r28Bu?t9Fl)Ua!eXLOGD^X^E(2+kuPQWJ+A)yB|*M!RKu zM!gAS3nF;j{g6P39(I2HzIn5MK&{`ue(0(~FUfLr^stG-D_Glw_XKQt4uxm1Bc*2b za(NF_To4}yH6Qddx{O;BdU+bs+dk^JG~Q=I0xUFL1|c;=lt`$9K*@djPj0Z+CnzbD zj>i6ZXnntOND&%<8CyJs&yub*xKUBy(lQ+`D`$a4z=}_!L|cVO_?WddBlH@8OA~Mm zDJn7aV#)4F$pgKPCv;nNhK{z&tHhui*=)37a!qdod$Zs0bFf}xgiEz!y4HuP@v-10 z-JO}4lCB$!teNG7S<)bCJJxg$HUd#!W6BxAwiq$8UC+#fPY{}*fngi;+2Z>n`cXB^ z8yYqT>m&V&v&ULWoT)w{2H-^Ms@2JnF<*VPM52F4tw-ypZ5{4+yU?s0}DPwPsAPpN5&T1ex1GY}cQEZlX zR2TAS!D03LE3F!f4UuBHt$@1*WGf~Uhqdr{G(DDBxJO|J#BBQsh;Y+f+`C=9WJZW# z*E^`ADmPhK28{Sb=MIJ@EX<}&u9bOKqT(o*&S64%3(E$v8*$FJQz6%<;b-Z49q20? z(ppy8szQ5XMVYGmmV*e&j+5)q^1O|T!M2Rz_CX1~R$-luM;H-F(d9}QHNtL;)}`GX zEeD>X%e+#gHKq~Sf-Mfq40NUn9U^$=EcP~H_|yr1tVr~asrBnD%&BR?zi{+cm z={$1~X`{VkYr7Lpfz+bQJwG-_a#z13RXt8dhM()6L<9kOb~rpl>tGi9GQ@M$i6BQf zCR6e>c`cU#VDNHa%VH|wmuOCPmIMQcOkPf12%uD#z96@r*ZR*XWUl7oNe*{K_MNKr}EcyAMx2LMZV_&-(b|71WHa?XxR@%XfjbTRc`(AP#Yb}NGfqXo72P? zWF6}hbCyW`r+*g5+GIa}e!gN!F1bxY63d2GfAx=P^(syK&iCTW-dbuZ{O57UkD43TL$Y9e1>DV4+0HLD}CXnb38{5!AYJvnA^Tjs39VO zjK|G^9RuRt9$j_ve6P6U<9ItT$To)R(NbxKsBIy~!-|V8J3j?v=krdwi>jaVK~pO# zkl+=J$e_DJyQ!_bV!mM}2c`&VxbP(QLTIvCy=mkr1JGGw+?6P8wby`(A0Ys0nOPGc z+5kITJQ^b~uy&XaU1s)uH{v%WoMh2EhbWI%MZG5|3eh?P1_@KeN~>EBh0sin`svl= zv(3$z{Cu-6iX+=_zkVnv>C9#d;Yr0uhpcn2msJqEfvz9apZ&alevelFBVwHqpg$S} zW=2Sg3uv-a;L053G2qDF`M?Pqd04IY^hFidAEyGPfw<^O2(?okj=_@3|I7iL_Xw% zBx|qw2gu3cBlBDu8-J3mjU3tQJ!z$GPAnz+o*+C!)Zxb6Mud1BC1xUji0Yzuxj@7v zHzVtY21CHsiHmT99Aj^OejZw2J_TFBZly@(hihFwp;~q>II~v+xK!~TRj6~ud&!v% z@NaMAUjOlC@ZOvGvV2%G9(OVyFPasT)+m~s!hkGEw0b^-XCqxbI-&29`A3b7zk(vB zO3iD)RXsYN14P-w0t3Zb)iRYCL&1pkncd8C;L388pSK?way{YE_0=O07s2+dJcJEA zj&Y15pR0m@7HDWkbQkC4f~bDx*A*(DnGc}+B4^}cpQ36@1t9_D-piTX2J?Z6N*Hh< z;%{o?IO}kCo*!u10hblt9IOlSYfcNn2P^WaE9(GQf-gN5mUBg$bw&kgM!8lQYOTmh zx{4G#qO*+`S2~Ut@-$uTRq0P;RRJzOO&%;0hA-XvB1`T|tbp#I%*XR;c*NwVsGf0B z?7)RzyXEzzc)S@j`{C)67W{v_>8Jj5QZD_?2i)gHZbKP0SUpS@h%e*3YOCl$MTXxNl7^&VT3oEvB6ND~46d(fMzcDLukAE1dR zbsRuPaRxMbq0!)cc8}>s4e`g$F$NWYmY)VclpHwp)@wL3Bse} z_C0P#?PLR#hGbB5NcI;oQc|?u|v{wjbgI8HE5zM!p||rv`1;~cpmPFxG`r~H6!GSSFFyEom9sr ze+D;Ojnf8K5LJiQHEpoDH$;pP zd&Q+7E@+t;@=70P@epB|qlvYP2~{$ek*Tg2)f!+yBfEiSj=YUdn$u_CpZ}`%}?T zRR}Q}+s*@<zX75Fz#j zr4G0nvJG_UEqP#-WYZ#%Xq?;&d-x+H9w?Yexivb$Nw^n!{BCRc>9Rxa(mr6BtRY*PHD~2t1ze*5mgAHX+MrSv6d3`W5yQ+q z(qa$Hr-ki)4p~v1QG@1Lnh=IL4h@Hh*i{_ZHCf4!17T+iF?CA70d(Bswd*uRJuVkh$*WxM9+mh9PH#G-fTpdCK86m z4_#9#uMa}Tq#fl^_JV=)Yh|*;j*4CSpTaF@0WR9D4oPjUjbt<;Uwr!Q5C0j2^bURg zd6uyc9xRkl$w*2b?=hkASF&Fn5|gbnKiD27X%(PVT5^cJYT}IPal@Nn4V77$v(x5~ zEg&gi*n4bSDl9$N*UIf8E9Tk_Y0aYK*e-0^le2v<*3BEVFrg*)H-BZHL}%vg^hP zHEGIR5Hi()o!$Q$H1z;1j~m3tP@&a9walcmrU$?70)(VwX_J&|&>%ysKvKR~zi8zRZ?DTD3sv5YY!4lNQA3K@E`+Luv}YA`|@xjs$TOYyQtIJOCv) z68P-oM^BH|kFuM;vxz6`a6O^yQh?Xc!jzdb-25E{-t#WIY zOTa^1=I0rm$A9r^N`5E@AsPt03RUh)u#|ur<{G`X97IN|j%N)~bW*6OO4m;MA}DaG zrPIQxp4X4x%KSALW%WNhEMk}R$CRl9RxS@`p0X*J6zfC}sBd%Fhogm;gE=GwY!Qim z^zmO9B5nDf??+3|@Z>g-nHVb;!?u+Ky))3N>9Hy_Q|v|=su^;mmaAt*nbCkPh1S3& z|2~GZ6{4lZ3MaBrx5Mk27q0@Lp{=FYDHCG37?K?YKcUBi?PA>=5>sm~X5M@DJVi^w zl39Ax^wDU7E2sSOm^=XE`AmmEDdwgNnL(;LH@K6T6P9+)gWF3FvcIXNS7j~eYk9Oh zNlQ>qqGT?tlA&cUZ%B+v6+mS=00t1wc_t^yj!>k4Lb)P2lX>V##Fp$ZtKWEIKn_xD zt^3c>UE65{r!*nj=4H0e6?)DtLd}eataKghV%>^J@H&Wt7{Ie}>c^;leH+<7f9JP9 z{#O_>d+)2a-;Ce-_1l?<-K1Fl%S73bYfw_|!6>mt&7tvNI{7hf$m88$r}R4V_il2m zSPi7`UPC||w8t=M+ZbjO_4Ok}jBL9uem-t`-FO{glJJim20#VZ$TS3EG7a*FCsM5w z>scI0-j$$)a|x1aW>9%-$4{1D_kWX}X%~hXuNCJ)M(89RP=-mo+<8cynwXss9YcN2 z>y++K&9uk{9VckyP}P0ly9Yu$>R2poAiKqdxIyS_%_1>K7AF9lAS*=c{mi%4a54fU zs$?pqZ@(Qz)Pd;qCcaO$)|2K{Xa#)<^&- z#XmAi;1BQrC4^+qK0aHlbXpM_#nC^M$(TL*r4AB+KmaH(Y#xkFN^9AE449B}E7@Mx?XIcs znq`bTxccJKSecNA;l?K1L9=uDr8?Yr^e9nH6H%vy&8if>EeU7UV7? zG3t@jo>*N0kM0t`=jFX5%)suxJUkRgr7h3I6A(MC7zCCZFs&(0-Lt!bMEBvyzFF9% zI}kN-FKM{v=QqFm{=bHhzK?Ie6XxT|z@vUTooAgsv@))1Hls%kk6qgJOSz*tpY0l} zyjWVz#Q0Wx=ovD?$s(#7dtio&s%O?{6N-ZprJCio^&y^5a<*G*wEJujpFi41& zyDv4S1)#37!Fin;?{%3lz&0CX?%vDAu8hgLiV>37kfg)EY+mAMgZo=!K&6eLIwv8W zfOdcTAbtPuY(Fz1uvMVM!Xh`I8w=X0P4u@trTY!5UMrF;;WaLX|5V997G2?%k3#wDF!P`NGH@8{fCfzlm zie(81d>~WOhT1&($@shrP`wWI^n}5_U6|}epdDZ6LAaW30m5s^;esGqt&Zs5*d#yx zprx$K*yB6~R5!)ChBq1yVsIy%czDa)KoK%D20amF+UY`fBRJAT27{*^P6s>GI)cZqVN!`NmHsJg&_I1R9|V_vrwsxZBsIe=<+ z-mj=sq}FakS`xInxo~+v_X${473HPuul;dxmob#)J|KY^X(jiq!y(q#q1r~81s+8K z(o%NoQ1maco4NsNj~b~){QhY|OO|KQlD5HG@~f>~%R~Jv(TP*M7LD7v1&#mKW`yHc zUjvbRMjo`2$?5Owz{3+}BsOD(Vy|`=VNZfJpejikUOFJ|_kwZx1+{u$#j{iQzDPYl z#mvAC5*#4$Q@w!y;n7amcv08>GZsU33CTfgauh&{HNf!;-Q7x_38iKg@)qoOa2tEc z%Q!dJ3bQSFr2n!3TkqSsnojk-Gtr1iTUAA^5tDRkWqe?EQ7B0)?+-+pIhtCxX8PME{fb5D7HkFuHgVvv8D>KAZz51^47X7 zo;ceAp|R94rfpVPAI$a$BCmYC;-)70H|De(G>R4dQ5g4GpU}U#{pkJc-(0DSv#V|< zHWQX%(MtCpy2QPjS)%r$NObBQVs-`m>`iHTCsxyf@T0k2K?;1_aYn7y@8+!U8WU}- z-T5HVJjhMCv2|6;MGm7h#PzO(rPb0Y0#&5_neoA>#73948?sCC9r-^m+zd9GA zN~F!NT`YP8i#k*N{jZL#^hNN(=WRK(jF{FTvbOkql?IhlS4=?4E{HouQ@XTf^f|HY zAOb`?$e~!UD=eDQ0JE!#)8DQJuibB|KZ3v4nHKxge;Y%x`LE`?w^bW;Q|nLMvwt+C zD0Qj90oSyan>v+JgAp#&Z0#Iu5bo?47)h7;UXDwh5Wlv%>qj+gnL|%DIeJ&*U%l)N zBa{n8;`5>Uugs3ny_C7_7U1OK!_CLa5hTuIdm<3hnW!amcRTq9rS;HRP%??N4Jf zb_sF-ru85$d9jqeGiv82E7iU-d9m3Bv~9PBdEjB%c}K*Ko#Y%#1B^LB5`wp-4(+b3 zXENH_0_~Esv>={NXX9xVklZRpACE1!1|@H|E?DaY-dX&%i88^MiC@lFh*9Hc;;*Gv z(IUQX@D)KQh@AI}cS<{=oadF3VB*iMo`7)nr_+7ic$7;}v!M}G&okZAxr0N51pU#& z5&hP;p*iPyLvZwc<&m1)jzo~InITJWMXQo+4wqws;^|b3Pku(ukPkFfc1Hp^1GZRQ7_^HLqUhvUXrdCOZo0E5^uK|u$>r{adXa(ug5ejTcieypNUKkS zFST}S?%C9NM``Wz>`VH}8M;pCCxR_4j}K?XN}bg7_fj9f3w+hak+T z5SSte0n}Qj)0JG2Pz{d82ml9RdrnYeit53<&L4mQLJ-sKl@o`8##N9;FQSD|g7Bgo z==z;LSbM#Ogj`=ejt!p5Uqb;ZG)cyCU7gU-DQJ6N2`lh@D#X;q|4dCXVwW zqk$;|P&hRVEKfGd%2!*eNT>0h9|x#z+Qp<-XrNR}<5}v9)0}B}eN}D8xwr zN}$y^Z20v#!0=PS3HwoP((YBn8Oi}@5KVn#PO+tgM?>#x{vtd-7VvCp6U>*=YbJ(; zV-a(alrMl5veSv{$tHUB=!c&_D9-=SHm!_rz8*=Q(R%8|lnu32x(vp^a=mF_fGH)) z!l9}*jviVK)p8zu-w>J)r|@H?A6{?|15(GM^Y z)*HdC)gt+p+Tf^`1THRw49y{C-C!Wkv$Io^P+!uI*VqOYWGW@(%%&iVP0l`vD~ z`NsTPX@zH^Yqty%_4#v^YkL=s?5s9P@Nti+g%s|?&k0|eHyK=SCdDX8dL}QF1tZig ze_jcArSwXA!(yFq zvxb?2kn^=dJ-tR&p)ifrI%*~WL82FZL3-Gzj-2TraMXY>*u& zG@#`mCl(*DG}_t$Lv1Jx85inW#DN)+gwMGgYzVlk&taNuG=Q5i4vXS(;KWOP z`vyA*RF2M~3G(Z}w+Y*ogpj{haj*PowibIM;M(FCwze!DO{Qo`5tR|DYiw5A}D)5A=g74h*koyO3#@? zF`@zamL2PoKi<;*i4{kam2>;{_%ansuFbRZpmL=()XH=ANE;Z7=OEQN*b^J>l)@b% zA^9ZeQmkSbSzRdvCXy9b=5uN$>z{0XP~|TXU}svginV*{Ls}TQplWjbt2dj2UW}i8>`Ucq$yZoWYc}wqZg*%i z5TP_O~r^Lzv4n0-vvRC#-^D%FxU8mm3l>&Y5brhAu5J7QjU1c zyYWP7_H$XI+cmk6iif#)jmcc3TASisRrM;soDPXrS543670TvK>u84%erk@;l>Tpl zfYrpvGkMWsR8|bjz0rMSaL=4k)#e;6u8=9&ifV~?7?~-b)SUl=AwjlwZRuh6A1<#yeitG7Z?>QP_A9f4jE8j?NPOuQ1kUGzDIvLK8Cu8D~(66!gq9- zaa=N6RBZ>82q|Rt?)G2vU9RTq@`V#?-v>?)?)Y{PetLaQW?zaVN)wMmGhL)}XUD-J zFyP{`;v+y+?PCX2=29OOE(K-*3V>VcZY1?26AFUpgsAT#YHE1C)nggo;x=DfsYDLM zp;Q1VL-k=Y5@*OL>Z^PAf<=?mjS0Qs28$QFi!YI(qNem+)>V{{7qC~?wFnSE<)|+l z`k*jWY8=*M$f5D2eVULaMqqjewI~ZDeSWW+@xR-?`~J?z=nO1Ld$ zs9Aovvaqy9GlOPVGR=JHA`J+kDSKw7f1qad+oLx0L;7?}gkBoUxw%gI1#B%u%&txN z1Z}90M(qeR5CSxyr~;xyG}SkpW>uvY5AE4;9ZGzr}0x)==+ z0BHL6P?rf8<4WU^P<8e=Vv)r1$HrJf3fttp5@8jhuIj1*+G$ER#_Z!;-c2qm(S{fT zzqk~zE>;*kd;oLw^arH+zuT1Ue|$ZD1uiXR#CLFbk#5leqEY)=R`xEz!rfF>XL&A0#qK&mOa{pi{)Pwvc4KNq5#Ljq+_X<{oq@00qRE z6{n)P6+H&=YUwbo2ih}PSsouM!}Ky~I>G`a^#H?SGcN4x7A+KhUo zQqo!xKssEn7Yt!C&e!RgtVYu11Y{xPsM93NkbAr@G@l;IFSf#3SK)06)pB$Na;w($ zpi&@J4bpQQ1*aHxbeo@6kX#_52v&KNldRxG(uoB|0$!x?nvdndbO)2>A5|Rl+(_S6g0xn zL+e<(+aGZl(h`}di|VaH*P=v&2e}AQFbJKj=qS^pF3sh1r(Ta(*NK44-fV6)r!Q$H7J+-MYF|!P0sN(5sFSTWCCon2bvyjN-7Flln8=RPJcvPjM1JGA>bnDYw z53^cWL=h=oB%QmH3zp3S6{;eo8toCD;YN-Pzel5nRaaElRPWgH(9l)W{I z8AlG;^eVq6p=+8P-+z`Ah&ZvN?&Xg!p$Fc0CuR?>T_E)CIU(( zK_*ZBuA~Q{+pG@E0wUfnnrIJ8w_wh1!L{a9Rr)Mv)4S)}=#>nmpx9pra2aB5vMbWM z;u%Xn{m&R$>koZ8C#<>~98=5CsC`2ISuk1WxA`?h+LvQNnCc?HkmhJUWCyyp{RiAxcNPf-v6!2;;~kFiweglq$nt(C_TvLg+4+;j4~NfGr(DAV8QE z0^08vq-zH?x_+JogcU9ML#BbP5Wwxc5YiyOKA9foppX*vn=x}H+caicHP{aKk2|x7 zZpXxsEEDXnL??t@Dmiunl_($KaX}y?`Jh8raY$J@j>HEsHSTUnl-RsuieqnfCiLh$ z>Oz8NMvLkMWsEh(btnwgD;qY2m(#I5p^yU>T}ZIT1it!U6~PEZg8Tn%lmGbkJElU) z0v^t+1qt^J6KD8~;U7`88{VQE?uGPFpm%s;9UDYj2m-BnkJP&A7z z8y{TZsCV<>!GBbd*$x&#^sYJx23dyq#Q^L&+L2>_ot1*XZ=%1_^&)5GRQ`4 zgD1!s>QJEwYsHkC%Q7dl4o$ycYge6jw`;)a<_y$(=GC5&ZWYkXfQ&5gp|x&1%SJco zx4&#_KCm?Kq)}g$TmxMD4~Y}Y4c5*q;p@a_2DGxvxHdVF*3g#ylJ4olP;6m=knh8X zHy=O3(EklZzbPxCS+9XG)I;WF^y!{6jFLj5Q6nQrYZjw~87X$8ta)>^E~V^9Ta2}!!To$zY31eRVNZh^b#!&`Q9G4o4kOV{s7l9=!Jy_9f zMrIVr(ZF-?I+46#neQ**i&=kH z+1jYX$}iW#TsjdxlT=pDd%xq1Hb$uoB71ET23U*{`j(q+NqU{PvHw)J=xllD$ zlch+lMcmr{!C(lmi<-u@@molU;wGg!BnLCn_A$OKF*`8{+(-rYTPy42;*yFu+0?3< zU%9D9oKfM~B1Wp1Jl9s0D{~FJwTBbwgQ(uMx{xY)OYjt!6(8*7d7Vs8&^V6u>sY@$ zefaLfVo2%!7cYkEsMLu5FiR{z`#zGbdx}QHWbKaGp{wl5@@nXe0}=YaqBQg{Y3_Z7 z!=uQ&MvMX`U-EgKV4K2LcUN(uu8+e%6sWh6b4EdF)C@%ccE(UNY0v3g2s%~6V*523 z>49?bH55bx&v%KVoqrRwSOU!qounv*U7RYra-whRBcw?-bGRLs?wI%MvskYNR7Bei z=KhWqk@1RX4coA$buhaQ87)mB@k6ylbBDVj~DEjTs%gyUAU)u|{idthk&1e{< zx?|$B=>EXnRXdw!v!n60C5mu@9R;H9IX5P;nonx42ZJTUFC#Jv^+7{2yJLn`)CXKT zuC2qL&1l`A<;1oIbtYIJL6%-C*U zePJiwhklqj^Zv4J6l=HJ%#U=OwSxgM;?ly==2%_~``%eE^t+O^CN{*RX#=2B4kQY0 z^)GPd#CG%=SfbK5ksiI#81@#M=LpfB>T1KIFXT8#myp7FF*l0IS_oEm$1+57qZD-? z4|fUG5wCX8X_-cCs=TkyU8$E_feRl1D#H3W2fH->YX5pAE}D1zIsPl+L#vT3yt|U^ zV;Paw8e%Fc5(~Mn`LjKf5?Jc#Uks(YUhPUbiTQMMVVW2>ssC-jPIOfLime^NJa(L} z7{e~sE4Z(3Nx_NZD0ULM>(%jbes})r&)+`^hGb)BdH(HZyjE!nk$zbWAE5|~hK5pV z8-OGVAVrk<6tDxJ~bXxT%d4Ob`@Z!=TX0 zm|A{LSL-Oiv}y9)n_2-$deU-@jp|Pxy11yhxuWpaHPk>c=M$BsHxK5*n4it8>3*dZ znnDv*=R<18=K=vB_cP}$2jgn1zIrh3N4|0H$!eo~CI1F*3eL>jZwL~>h;1V*4OIPx zyYHxM6pis#D|>@m(}rYT6R@%WZM%Q@9phw^q z?|D^M$7)Gj^vOA?N42G?#;^8}6!{-wD!i815OC3xks06DfRPs9Bi>zC8oso(W-cyt z@_ez2|BHwY#1Srcwb|*mX(+3W!q~8;TSRw(PuF^;~TL_@UYfT}9hb8+Qq54IRrTtF4#)P~r{uJ1G4Wc&c{mfR zt7n8v(`f#oN{V3tOURd+9j&=`wHq*Rykin|GUX;#u6e(3uQ)HNf=S2Ct5v^kU)UI| zQvS)46-yx8W!4>+IXfChE{+pmY1Kt6iu5jbZzgbb9c7E=xmo|Cp_TWt@{_bg2jI}O zN(S0H4vWw*avyHRlln!GX6F%&L{9=v{~V9>hxU2WWka`JFg5g3if9a$a+KmIZCm8% zIye;+W}<4SIs?|#i|>9zhd$;*ncC+k%Y4-U6}(@CC%PaLGj{6dW;HZF21Qo8j~|(n zR(8Y=GjKA@(*q#j&+;JPS+Y?j`Q{3(@{U}SdDPMAQ07mwuM;MV^rFuEm^nmhc@|<; zPQNm3p9S*@963EaiH5LbiQH;}RssY75EhGT?lkR#s(fn6!lo zsFH70xFHv_Qvd9d`X9udZQ}oi73+p&m>8~#5t1gFHzI!rg}TcsAwkh`B4&<)wIyQu zWs4iN1~c2xQfHkiSU*LA#uC^S&)fe1Wv3dUkSDpCr|8f}e|-DX!yhl+5}!q2t%)(R z=$0cJbMkudGsPlx!>-)_*eHQoh>JlfO{~^q)mz7!SEFLJ_avT_XkVJn zGJMQXW<5jxicgg*cGHhE?h_Wm^fXzhpLoupv=VP9v;dvWVw|Kb?1|-t^4m zh5sO~w(8v%6vj{S_+gCEyLV?j5)*D+VK*h}_oW_9w)L>~*I(!Iu->$!((l2>cM7M% zgztwZ@R|p2rP!Jx9mio~KxKI`A9bEpS@veWTI_5`$sHkgJ&s zddYcIMbggK`45;tDEU*NJO~VkE1llx|= zCM(dCeU*C4eL1WIJf%W~0F|2BzMbE_d?vF0G>>l{M!tS=Yp(1+`xjpESdZ)K@gGIY zKvS+Qc4Y30ej*iznUmNL`=jbUCsdVcL7UX>!8f#OktkE9eLkVtSuZaO1V>gYZRp1F zX&@&8fD6pWDSA(+CN*=& zSX$nV0n@s3fR*#)<(s6%VeIt}l7pty-@2GuUpXWetx4)!3w-HHtcI2-fw8eOV8Toa z)UhS~sY%am;v?Tj_UrOp-AUSyAdES*1(|K5Dw%>q&`KU^8p+KC!P>Uj2K)TwA5XZl zPxN4n9-fXB`f}?lTA))Wh4xLSLZNSO6q6ji%s$uBw?rozGI%1%TN#)37b(#8 zENQAe`F1_&+xALcNlEL5FZru>S4}b|03LK(!V)qwc0%1)y0qT-+VG)Nm!XT8IC_*C z6(ydP9yEAui(fZ{hRfk3H=i{jA>jKFL)O{~i)I+9LaM12GAsXQt=q2VcW(l+J^jY* zv;-#gM+{ggIDFHp$R&+IyNqri<9Pr4>9gN|lIZv}53&9?&t?Jo{r1{^`?li|8r!dE z8t7G?D}KCqGG}H~u`c>~_wv&EkMoMS7PpcLAljc35g&Tvy*i=FGJ5?@V?XYLRyuRK z6BCmcgG{v6y@{5yP>!des4m6E`1Lk;qi>fs(M?q02F%iMsy9VL`7*x3mBuet8=`J! zp(*_v>du8v^_1KccBeerOI9BkmJFSK2#%c$I_Gm!IZUrp`cDb!dJ$?hPjB3E6|;Pn zH2`RHVOS*Gzswtf(eE{xFju~bY|qv7uQ7G2yv0aQMl0LZ=JEla{_Fb}fBgRbQ<|X# zqwmKUeYIWT%=Dr?yQ2%=!EKoltS9ZLwd9KSEqCQq<}vaTCS2}*IWb3~r3|v?iFZpH z0vNUdb#C7o57@ZFjgZ17sfb^0jm|8%LpvBTlS0%0wz?CN68fYx7@YpvPfT9%w)!%Q z$uct1Nh@=oG8=5oEs`o ziewuUcE<@%Zs!VDxS>VrqvpUg$d`NE_R@ z=3H%2CfJ!{eUCdSsJ=2KW|O6#vvBk6MmIh*0ft92@qJCUrbEsdjSE6wW&2@lKG&U= z{DTSX2SGdzUA)yoPuJa#Bpu!(GbVW|+jS+b1JbR9itgPU%0?g%5m6Bh+F>_>43j#b zIvmxGNm`K(uJb0xYxhUV<)+Sq@jEpsG6#Zz(`hOlBs6km$CEM@6Ghf&bl5V?%q{_o zA)soJ8*5?eXG5qf#||`&WWRgs*&M8xf_T3MVq4C|_xXJZq}vIr0@G?6<$eEV?(SBb zih?Krmu!#00>vJrY>-gGK_%ol@;Zgw^w!H>`U1YB`&;`2!53(bT5H>LYTV<@ znptaRmT!L@UWjB*b1|dmkMsMRNfRPul{7?miQ`&pvp@Ac=BFvLGaX?B`CS-|@t@S& zc{{iPrh;#=Cz^x6?se5_CZ~6+V2OY~1w(HW#oQS62xl^qQBQ4yo$iFAGura!x|x~H>c(kKYvl_I5}MMU+QsyET2ME*1*S5*n2MzD zTkzJr>Xeq{W#euV`pS}N*=j&A_ritpTzsxM$D2%h&GX;4w-@ar+;T`J3uuP?I<77cR38w^Q(dncG5D1VGAvu^qjxbG%>-11#AB(}mQZ@gi zm&)vVy3#v_0^#u?v9}%CaGYKjT@{<>`DJ(Mx4i1 zBQ?t8tH_tIX~_z9<6Ss0UjVRAszpS{2n^k|Z&;S4%@iJ24-E{lpQpXvy+0ftS7kAq zHD{FIqPL8bp^>EQ?m3SGjAKw(bt2q=b3EJ^ zMI*>Owl|eKktDfq8&*xe6+R|l8wiR#8(0_Zym9B9$>^yK0sG)}w78vUU0!eR$JJHy zLhmll;d=dHzki%BzP0Qq!-d&RSD7JbO##8IK}YdGu>{CwkkzR{QUattg-WNV&8s5iKrh5Dq5dE!=905?zN7w^><-eKfGaa3w%_tSxN2a3k` zBZ82|H{UMikB7tiHy7t}y?%AvzqnkzT(ooh$x4#uD*~PbkY>mBdJtfwS}2UgHEBeP zZKV2L#Iek6#I<5QAZuh7sn8E4ZoM32d+w8vbsCjE?dvu3q(59|;XzrN%O{;{dD&p^ zdvSnh*dOc*m*5*fz)I%kUz(~`vN2zGW9@6!LxTw`QPmn4j7haLIx{r|N&Ow_1FS4N z$F-U4kVB60dN8_i#w!Z18C^{0laQ=G_lu!TL-bTC5%#oc?egUg#ghzB(MP=HF+}hL}L=Ki-M! znG|HokApstG`&XOF>RQd7_13OrI|kF;rA|ams7>q&{vK!vLf#WBqfXMqZ{-o@&l!d zG58rd(hx!^+n^(L3{C4B^C${S5&zU|e@??gC8ICRGZZT`Z_pg69ra)_=I%QQTEGYf zbOejy()ug7k$tO3iQuR(eJ_Mr@2N#0`0S+fH`YtZrIU;oxi>o{|l*wyHDh8FF+XVTDkbI>Kq7hn1aVBoo5O-dQAPMx!f{Guql0 z&~S{AQyWU0ajQx~{2SlLCEzkR8S^@-S2VB_M+|5y4n>+edRgN-YDn@9G(V8ltfBTc z`Qmj&{|Z=T+IjRr=?t1wQW@bXpfUM0E~YR55-rz%R+q>9@x%GVvHzmlr>p(pc=>i& z)yXorp`h#wotj4caQQ*OGd?MB#0AB`Ki+2{$?&JDGvor&ASDqYHEe28eo@QG1DKaI zd~^XF2IBejy(_;&UlT`PhT80!rX#u>;G(WwH$I=1euPLeI1Z{~od_`u`yoq>n2WD6 zDatL-f2I!@moJz|J)Kr+O;L0zzvrf{0O~2BCxL!rBOhMMEvw z+K5XbI<`@sX-~>j#VP4a0!Xc3G$Erhj96Z2*}AC#RUWARy{ip$-8Uz+|NQadoUeDE zJ|B*czvrv_aZ?+RR=lpthV32PNL&bptpS(rs0zmcH*I`QhYiofU5Qi@@}yBYorIxu zw-b&5HrXHlqV`V*$2lg)hw9}|cDh#?sVOW7i zmfXh%VO|0`WM0gVt$-S-8n9gKYsnfH4{_3`3EA1YS1YLOWy{;$vQ`p8q<}Y^QYS_z z>cgRKh1sI-XzZV&r`lAdD?q(wMp2dKN0w#}1S|3<|F!&8+I#gcb!W38MG(VrXcl$1 zrnC|9qoGmoBO-MO!I9u|%IQFB@VJL>FSieY+5aRxZ-@l`0sfjO~~V7I(*u2MMIq zi{N!`Y&yg7pLZ>~RZx3sMZ_R6Fm$q%a9Ra1qr%$=p>6`_4uu>hV*3~wiBO}E#-=_QL_;QOVOR`-r%%qC(qQ{dgq_(0XW8XMTPqb)HyN8*4es4Z;mO^N zsCuM``MS=zu8%KI?w>yYlY-DM(zB;Gr_<^7!S&kPhbz8Zj@b-bS=m{+7LKmBI&s7- zgqxR_S6P%Zse4Kn_i}L#tOL6tT?hYD+=hA6BV{Pz$T|cUqkWex3;`g9)H$zzr3op= zP|otgN-E&ogP9@9VMpyTvI36jU$)RlAKPt@oj`r*v5#+e&g zMg(z*xdR`WA`3$Xq=+q>Om7<|u)$%Xv?mNe2nN&8ECU7&3+_#n`|g#3Vc^p`M@-7q z*nLi?j}WZAT-R&uKO%== zvq$_E-|B_5T*>c98*nF}Ud%(B8m3X>_!x0$k)VVlb0Y?blSV^sz?9D;YsYph#cOG% zzCyvh2Mt)JlVeF}^~0aq$U;I3TD+lV8OAWsbZv%wZEdM%d>79vTgsxjCtHWw%Lc2y zM_an=B``TVtq~vXn=9AQh7{c;^_62{N53{D!aOuzoEDF-mg9!6wr)$8GF^BvF*E5$x=+Eg z=bR~cC*_a!RIVsalnVr1b*HW1LSHAT>ksUr3ON!;WNNX&Jr-8$O0?6U3?$F* zbh%Fft@RooqOLj^v+~yU?z+Cd_uz7OcUHQ2@$lLI%0S=RDTCaIK^TheLWV^n(hdn% z;T$J#w~1{)Ju(M}3`Ox~SG?I!=4r#Fvl*0qf*{~%z+pjfoQ zB4z+lo#*G0eZ*%<$rqe= z?l|1xia-$zr~{guGEuU)Fgc@{8PZaj{cYXE4FV z4YP!ZxQeX{m2+}kF4j9fDHfhF?%Nh_(KH||cGmcO6L=UpuOl!3!5;+k+OMOMOkPoq z(49T?Tnr$XZr>j%9=T`={5xC>WFP{H_&U-mEcS>Sf9W4O~#^`DM!;Y4&}-M7$- P00000NkvXXu0mjf$Mz76 literal 0 HcmV?d00001 diff --git a/frontend/src/App.vue b/frontend/src/App.vue new file mode 100644 index 00000000..8bae7b74 --- /dev/null +++ b/frontend/src/App.vue @@ -0,0 +1,89 @@ + + + diff --git a/frontend/src/api/admin/accounts.ts b/frontend/src/api/admin/accounts.ts new file mode 100644 index 00000000..54d0ad94 --- /dev/null +++ b/frontend/src/api/admin/accounts.ts @@ -0,0 +1,376 @@ +/** + * Admin Accounts API endpoints + * Handles AI platform account management for administrators + */ + +import { apiClient } from '../client' +import type { + Account, + CreateAccountRequest, + UpdateAccountRequest, + PaginatedResponse, + AccountUsageInfo, + WindowStats, + ClaudeModel, + AccountUsageStatsResponse, + TempUnschedulableStatus +} from '@/types' + +/** + * List all accounts with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters + * @returns Paginated list of accounts + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + platform?: string + type?: string + status?: string + search?: string + }, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>('/admin/accounts', { + params: { + page, + page_size: pageSize, + ...filters + }, + signal: options?.signal + }) + return data +} + +/** + * Get account by ID + * @param id - Account ID + * @returns Account details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/accounts/${id}`) + return data +} + +/** + * Create new account + * @param accountData - Account data + * @returns Created account + */ +export async function create(accountData: CreateAccountRequest): Promise { + const { data } = await apiClient.post('/admin/accounts', accountData) + return data +} + +/** + * Update account + * @param id - Account ID + * @param updates - Fields to update + * @returns Updated account + */ +export async function update(id: number, updates: UpdateAccountRequest): Promise { + const { data } = await apiClient.put(`/admin/accounts/${id}`, updates) + return data +} + +/** + * Delete account + * @param id - Account ID + * @returns Success confirmation + */ +export async function deleteAccount(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/accounts/${id}`) + return data +} + +/** + * Toggle account status + * @param id - Account ID + * @param status - New status + * @returns Updated account + */ +export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { + return update(id, { status }) +} + +/** + * Test account connectivity + * @param id - Account ID + * @returns Test result + */ +export async function testAccount(id: number): Promise<{ + success: boolean + message: string + latency_ms?: number +}> { + const { data } = await apiClient.post<{ + success: boolean + message: string + latency_ms?: number + }>(`/admin/accounts/${id}/test`) + return data +} + +/** + * Refresh account credentials + * @param id - Account ID + * @returns Updated account + */ +export async function refreshCredentials(id: number): Promise { + const { data } = await apiClient.post(`/admin/accounts/${id}/refresh`) + return data +} + +/** + * Get account usage statistics + * @param id - Account ID + * @param days - Number of days (default: 30) + * @returns Account usage statistics with history, summary, and models + */ +export async function getStats(id: number, days: number = 30): Promise { + const { data } = await apiClient.get(`/admin/accounts/${id}/stats`, { + params: { days } + }) + return data +} + +/** + * Clear account error + * @param id - Account ID + * @returns Updated account + */ +export async function clearError(id: number): Promise { + const { data } = await apiClient.post(`/admin/accounts/${id}/clear-error`) + return data +} + +/** + * Get account usage information (5h/7d window) + * @param id - Account ID + * @returns Account usage info + */ +export async function getUsage(id: number): Promise { + const { data } = await apiClient.get(`/admin/accounts/${id}/usage`) + return data +} + +/** + * Clear account rate limit status + * @param id - Account ID + * @returns Success confirmation + */ +export async function clearRateLimit(id: number): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>( + `/admin/accounts/${id}/clear-rate-limit` + ) + return data +} + +/** + * Get temporary unschedulable status + * @param id - Account ID + * @returns Status with detail state if active + */ +export async function getTempUnschedulableStatus(id: number): Promise { + const { data } = await apiClient.get( + `/admin/accounts/${id}/temp-unschedulable` + ) + return data +} + +/** + * Reset temporary unschedulable status + * @param id - Account ID + * @returns Success confirmation + */ +export async function resetTempUnschedulable(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>( + `/admin/accounts/${id}/temp-unschedulable` + ) + return data +} + +/** + * Generate OAuth authorization URL + * @param endpoint - API endpoint path + * @param config - Proxy configuration + * @returns Auth URL and session ID + */ +export async function generateAuthUrl( + endpoint: string, + config: { proxy_id?: number } +): Promise<{ auth_url: string; session_id: string }> { + const { data } = await apiClient.post<{ auth_url: string; session_id: string }>(endpoint, config) + return data +} + +/** + * Exchange authorization code for tokens + * @param endpoint - API endpoint path + * @param exchangeData - Session ID, code, and optional proxy config + * @returns Token information + */ +export async function exchangeCode( + endpoint: string, + exchangeData: { session_id: string; code: string; proxy_id?: number } +): Promise> { + const { data } = await apiClient.post>(endpoint, exchangeData) + return data +} + +/** + * Batch create accounts + * @param accounts - Array of account data + * @returns Results of batch creation + */ +export async function batchCreate(accounts: CreateAccountRequest[]): Promise<{ + success: number + failed: number + results: Array<{ success: boolean; account?: Account; error?: string }> +}> { + const { data } = await apiClient.post<{ + success: number + failed: number + results: Array<{ success: boolean; account?: Account; error?: string }> + }>('/admin/accounts/batch', { accounts }) + return data +} + +/** + * Batch update credentials fields for multiple accounts + * @param request - Batch update request containing account IDs, field name, and value + * @returns Results of batch update + */ +export async function batchUpdateCredentials(request: { + account_ids: number[] + field: string + value: any +}): Promise<{ + success: number + failed: number + results: Array<{ account_id: number; success: boolean; error?: string }> +}> { + const { data } = await apiClient.post<{ + success: number + failed: number + results: Array<{ account_id: number; success: boolean; error?: string }> + }>('/admin/accounts/batch-update-credentials', request) + return data +} + +/** + * Bulk update multiple accounts + * @param accountIds - Array of account IDs + * @param updates - Fields to update + * @returns Success confirmation + */ +export async function bulkUpdate( + accountIds: number[], + updates: Record +): Promise<{ + success: number + failed: number + success_ids?: number[] + failed_ids?: number[] + results: Array<{ account_id: number; success: boolean; error?: string }> + }> { + const { data } = await apiClient.post<{ + success: number + failed: number + success_ids?: number[] + failed_ids?: number[] + results: Array<{ account_id: number; success: boolean; error?: string }> + }>('/admin/accounts/bulk-update', { + account_ids: accountIds, + ...updates + }) + return data +} + +/** + * Get account today statistics + * @param id - Account ID + * @returns Today's stats (requests, tokens, cost) + */ +export async function getTodayStats(id: number): Promise { + const { data } = await apiClient.get(`/admin/accounts/${id}/today-stats`) + return data +} + +/** + * Set account schedulable status + * @param id - Account ID + * @param schedulable - Whether the account should participate in scheduling + * @returns Updated account + */ +export async function setSchedulable(id: number, schedulable: boolean): Promise { + const { data } = await apiClient.post(`/admin/accounts/${id}/schedulable`, { + schedulable + }) + return data +} + +/** + * Get available models for an account + * @param id - Account ID + * @returns List of available models for this account + */ +export async function getAvailableModels(id: number): Promise { + const { data } = await apiClient.get(`/admin/accounts/${id}/models`) + return data +} + +export async function syncFromCrs(params: { + base_url: string + username: string + password: string + sync_proxies?: boolean +}): Promise<{ + created: number + updated: number + skipped: number + failed: number + items: Array<{ + crs_account_id: string + kind: string + name: string + action: string + error?: string + }> +}> { + const { data } = await apiClient.post('/admin/accounts/sync/crs', params) + return data +} + +export const accountsAPI = { + list, + getById, + create, + update, + delete: deleteAccount, + toggleStatus, + testAccount, + refreshCredentials, + getStats, + clearError, + getUsage, + getTodayStats, + clearRateLimit, + getTempUnschedulableStatus, + resetTempUnschedulable, + setSchedulable, + getAvailableModels, + generateAuthUrl, + exchangeCode, + batchCreate, + batchUpdateCredentials, + bulkUpdate, + syncFromCrs +} + +export default accountsAPI diff --git a/frontend/src/api/admin/antigravity.ts b/frontend/src/api/admin/antigravity.ts new file mode 100644 index 00000000..0392da6f --- /dev/null +++ b/frontend/src/api/admin/antigravity.ts @@ -0,0 +1,56 @@ +/** + * Admin Antigravity API endpoints + * Handles Antigravity (Google Cloud AI Companion) OAuth flows for administrators + */ + +import { apiClient } from '../client' + +export interface AntigravityAuthUrlResponse { + auth_url: string + session_id: string + state: string +} + +export interface AntigravityAuthUrlRequest { + proxy_id?: number +} + +export interface AntigravityExchangeCodeRequest { + session_id: string + state: string + code: string + proxy_id?: number +} + +export interface AntigravityTokenInfo { + access_token?: string + refresh_token?: string + token_type?: string + expires_at?: number | string + expires_in?: number + project_id?: string + email?: string + [key: string]: unknown +} + +export async function generateAuthUrl( + payload: AntigravityAuthUrlRequest +): Promise { + const { data } = await apiClient.post( + '/admin/antigravity/oauth/auth-url', + payload + ) + return data +} + +export async function exchangeCode( + payload: AntigravityExchangeCodeRequest +): Promise { + const { data } = await apiClient.post( + '/admin/antigravity/oauth/exchange-code', + payload + ) + return data +} + +export default { generateAuthUrl, exchangeCode } diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts new file mode 100644 index 00000000..9b338788 --- /dev/null +++ b/frontend/src/api/admin/dashboard.ts @@ -0,0 +1,207 @@ +/** + * Admin Dashboard API endpoints + * Provides system-wide statistics and metrics + */ + +import { apiClient } from '../client' +import type { + DashboardStats, + TrendDataPoint, + ModelStat, + ApiKeyUsageTrendPoint, + UserUsageTrendPoint +} from '@/types' + +/** + * Get dashboard statistics + * @returns Dashboard statistics including users, keys, accounts, and token usage + */ +export async function getStats(): Promise { + const { data } = await apiClient.get('/admin/dashboard/stats') + return data +} + +/** + * Get real-time metrics + * @returns Real-time system metrics + */ +export async function getRealtimeMetrics(): Promise<{ + active_requests: number + requests_per_minute: number + average_response_time: number + error_rate: number +}> { + const { data } = await apiClient.get<{ + active_requests: number + requests_per_minute: number + average_response_time: number + error_rate: number + }>('/admin/dashboard/realtime') + return data +} + +export interface TrendParams { + start_date?: string + end_date?: string + granularity?: 'day' | 'hour' + user_id?: number + api_key_id?: number + model?: string + account_id?: number + group_id?: number + stream?: boolean +} + +export interface TrendResponse { + trend: TrendDataPoint[] + start_date: string + end_date: string + granularity: string +} + +/** + * Get usage trend data + * @param params - Query parameters for filtering + * @returns Usage trend data + */ +export async function getUsageTrend(params?: TrendParams): Promise { + const { data } = await apiClient.get('/admin/dashboard/trend', { params }) + return data +} + +export interface ModelStatsParams { + start_date?: string + end_date?: string + user_id?: number + api_key_id?: number + model?: string + account_id?: number + group_id?: number + stream?: boolean +} + +export interface ModelStatsResponse { + models: ModelStat[] + start_date: string + end_date: string +} + +/** + * Get model usage statistics + * @param params - Query parameters for filtering + * @returns Model usage statistics + */ +export async function getModelStats(params?: ModelStatsParams): Promise { + const { data } = await apiClient.get('/admin/dashboard/models', { params }) + return data +} + +export interface ApiKeyTrendParams extends TrendParams { + limit?: number +} + +export interface ApiKeyTrendResponse { + trend: ApiKeyUsageTrendPoint[] + start_date: string + end_date: string + granularity: string +} + +/** + * Get API key usage trend data + * @param params - Query parameters for filtering + * @returns API key usage trend data + */ +export async function getApiKeyUsageTrend( + params?: ApiKeyTrendParams +): Promise { + const { data } = await apiClient.get('/admin/dashboard/api-keys-trend', { + params + }) + return data +} + +export interface UserTrendParams extends TrendParams { + limit?: number +} + +export interface UserTrendResponse { + trend: UserUsageTrendPoint[] + start_date: string + end_date: string + granularity: string +} + +/** + * Get user usage trend data + * @param params - Query parameters for filtering + * @returns User usage trend data + */ +export async function getUserUsageTrend(params?: UserTrendParams): Promise { + const { data } = await apiClient.get('/admin/dashboard/users-trend', { + params + }) + return data +} + +export interface BatchUserUsageStats { + user_id: number + today_actual_cost: number + total_actual_cost: number +} + +export interface BatchUsersUsageResponse { + stats: Record +} + +/** + * Get batch usage stats for multiple users + * @param userIds - Array of user IDs + * @returns Usage stats map keyed by user ID + */ +export async function getBatchUsersUsage(userIds: number[]): Promise { + const { data } = await apiClient.post('/admin/dashboard/users-usage', { + user_ids: userIds + }) + return data +} + +export interface BatchApiKeyUsageStats { + api_key_id: number + today_actual_cost: number + total_actual_cost: number +} + +export interface BatchApiKeysUsageResponse { + stats: Record +} + +/** + * Get batch usage stats for multiple API keys + * @param apiKeyIds - Array of API key IDs + * @returns Usage stats map keyed by API key ID + */ +export async function getBatchApiKeysUsage( + apiKeyIds: number[] +): Promise { + const { data } = await apiClient.post( + '/admin/dashboard/api-keys-usage', + { + api_key_ids: apiKeyIds + } + ) + return data +} + +export const dashboardAPI = { + getStats, + getRealtimeMetrics, + getUsageTrend, + getModelStats, + getApiKeyUsageTrend, + getUserUsageTrend, + getBatchUsersUsage, + getBatchApiKeysUsage +} + +export default dashboardAPI diff --git a/frontend/src/api/admin/gemini.ts b/frontend/src/api/admin/gemini.ts new file mode 100644 index 00000000..6113f468 --- /dev/null +++ b/frontend/src/api/admin/gemini.ts @@ -0,0 +1,72 @@ +/** + * Admin Gemini API endpoints + * Handles Gemini OAuth flows for administrators + */ + +import { apiClient } from '../client' + +export interface GeminiAuthUrlResponse { + auth_url: string + session_id: string + state: string +} + +export interface GeminiOAuthCapabilities { + ai_studio_oauth_enabled: boolean + required_redirect_uris: string[] +} + +export interface GeminiAuthUrlRequest { + proxy_id?: number + project_id?: string + oauth_type?: 'code_assist' | 'google_one' | 'ai_studio' + tier_id?: string +} + +export interface GeminiExchangeCodeRequest { + session_id: string + state: string + code: string + proxy_id?: number + oauth_type?: 'code_assist' | 'google_one' | 'ai_studio' + tier_id?: string +} + +export type GeminiTokenInfo = { + access_token?: string + refresh_token?: string + token_type?: string + scope?: string + expires_in?: number + expires_at?: number + project_id?: string + oauth_type?: string + tier_id?: string + extra?: Record + [key: string]: unknown +} + +export async function generateAuthUrl( + payload: GeminiAuthUrlRequest +): Promise { + const { data } = await apiClient.post( + '/admin/gemini/oauth/auth-url', + payload + ) + return data +} + +export async function exchangeCode(payload: GeminiExchangeCodeRequest): Promise { + const { data } = await apiClient.post( + '/admin/gemini/oauth/exchange-code', + payload + ) + return data +} + +export async function getCapabilities(): Promise { + const { data } = await apiClient.get('/admin/gemini/oauth/capabilities') + return data +} + +export default { generateAuthUrl, exchangeCode, getCapabilities } diff --git a/frontend/src/api/admin/groups.ts b/frontend/src/api/admin/groups.ts new file mode 100644 index 00000000..44eebc99 --- /dev/null +++ b/frontend/src/api/admin/groups.ts @@ -0,0 +1,169 @@ +/** + * Admin Groups API endpoints + * Handles API key group management for administrators + */ + +import { apiClient } from '../client' +import type { + Group, + GroupPlatform, + CreateGroupRequest, + UpdateGroupRequest, + PaginatedResponse +} from '@/types' + +/** + * List all groups with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters (platform, status, is_exclusive, search) + * @returns Paginated list of groups + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + platform?: GroupPlatform + status?: 'active' | 'inactive' + is_exclusive?: boolean + search?: string + }, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>('/admin/groups', { + params: { + page, + page_size: pageSize, + ...filters + }, + signal: options?.signal + }) + return data +} + +/** + * Get all active groups (without pagination) + * @param platform - Optional platform filter + * @returns List of all active groups + */ +export async function getAll(platform?: GroupPlatform): Promise { + const { data } = await apiClient.get('/admin/groups/all', { + params: platform ? { platform } : undefined + }) + return data +} + +/** + * Get active groups by platform + * @param platform - Platform to filter by + * @returns List of groups for the specified platform + */ +export async function getByPlatform(platform: GroupPlatform): Promise { + return getAll(platform) +} + +/** + * Get group by ID + * @param id - Group ID + * @returns Group details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/groups/${id}`) + return data +} + +/** + * Create new group + * @param groupData - Group data + * @returns Created group + */ +export async function create(groupData: CreateGroupRequest): Promise { + const { data } = await apiClient.post('/admin/groups', groupData) + return data +} + +/** + * Update group + * @param id - Group ID + * @param updates - Fields to update + * @returns Updated group + */ +export async function update(id: number, updates: UpdateGroupRequest): Promise { + const { data } = await apiClient.put(`/admin/groups/${id}`, updates) + return data +} + +/** + * Delete group + * @param id - Group ID + * @returns Success confirmation + */ +export async function deleteGroup(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/groups/${id}`) + return data +} + +/** + * Toggle group status + * @param id - Group ID + * @param status - New status + * @returns Updated group + */ +export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { + return update(id, { status }) +} + +/** + * Get group statistics + * @param id - Group ID + * @returns Group usage statistics + */ +export async function getStats(id: number): Promise<{ + total_api_keys: number + active_api_keys: number + total_requests: number + total_cost: number +}> { + const { data } = await apiClient.get<{ + total_api_keys: number + active_api_keys: number + total_requests: number + total_cost: number + }>(`/admin/groups/${id}/stats`) + return data +} + +/** + * Get API keys in a group + * @param id - Group ID + * @param page - Page number + * @param pageSize - Items per page + * @returns Paginated list of API keys in the group + */ +export async function getGroupApiKeys( + id: number, + page: number = 1, + pageSize: number = 20 +): Promise> { + const { data } = await apiClient.get>(`/admin/groups/${id}/api-keys`, { + params: { page, page_size: pageSize } + }) + return data +} + +export const groupsAPI = { + list, + getAll, + getByPlatform, + getById, + create, + update, + delete: deleteGroup, + toggleStatus, + getStats, + getGroupApiKeys +} + +export default groupsAPI diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts new file mode 100644 index 00000000..e86f6348 --- /dev/null +++ b/frontend/src/api/admin/index.ts @@ -0,0 +1,61 @@ +/** + * Admin API barrel export + * Centralized exports for all admin API modules + */ + +import dashboardAPI from './dashboard' +import usersAPI from './users' +import groupsAPI from './groups' +import accountsAPI from './accounts' +import proxiesAPI from './proxies' +import redeemAPI from './redeem' +import promoAPI from './promo' +import settingsAPI from './settings' +import systemAPI from './system' +import subscriptionsAPI from './subscriptions' +import usageAPI from './usage' +import geminiAPI from './gemini' +import antigravityAPI from './antigravity' +import userAttributesAPI from './userAttributes' +import opsAPI from './ops' + +/** + * Unified admin API object for convenient access + */ +export const adminAPI = { + dashboard: dashboardAPI, + users: usersAPI, + groups: groupsAPI, + accounts: accountsAPI, + proxies: proxiesAPI, + redeem: redeemAPI, + promo: promoAPI, + settings: settingsAPI, + system: systemAPI, + subscriptions: subscriptionsAPI, + usage: usageAPI, + gemini: geminiAPI, + antigravity: antigravityAPI, + userAttributes: userAttributesAPI, + ops: opsAPI +} + +export { + dashboardAPI, + usersAPI, + groupsAPI, + accountsAPI, + proxiesAPI, + redeemAPI, + promoAPI, + settingsAPI, + systemAPI, + subscriptionsAPI, + usageAPI, + geminiAPI, + antigravityAPI, + userAttributesAPI, + opsAPI +} + +export default adminAPI diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts new file mode 100644 index 00000000..63b12cfb --- /dev/null +++ b/frontend/src/api/admin/ops.ts @@ -0,0 +1,1209 @@ +/** + * Admin Ops API endpoints (vNext) + * - Error logs list/detail + retry (client/upstream) + * - Dashboard overview (raw path) + */ + +import { apiClient } from '../client' +import type { PaginatedResponse } from '@/types' + +export type OpsRetryMode = 'client' | 'upstream' +export type OpsQueryMode = 'auto' | 'raw' | 'preagg' + +export interface OpsRequestOptions { + signal?: AbortSignal +} + +export interface OpsRetryRequest { + mode: OpsRetryMode + pinned_account_id?: number + force?: boolean +} + +export interface OpsRetryAttempt { + id: number + created_at: string + requested_by_user_id: number + source_error_id: number + mode: string + pinned_account_id?: number | null + pinned_account_name?: string + + status: string + started_at?: string | null + finished_at?: string | null + duration_ms?: number | null + + success?: boolean | null + http_status_code?: number | null + upstream_request_id?: string | null + used_account_id?: number | null + used_account_name?: string + response_preview?: string | null + response_truncated?: boolean | null + + result_request_id?: string | null + result_error_id?: number | null + error_message?: string | null +} + +export type OpsUpstreamErrorEvent = { + at_unix_ms?: number + platform?: string + account_id?: number + account_name?: string + upstream_status_code?: number + upstream_request_id?: string + upstream_request_body?: string + kind?: string + message?: string + detail?: string +} + +export interface OpsRetryResult { + attempt_id: number + mode: OpsRetryMode + status: 'running' | 'succeeded' | 'failed' | string + + pinned_account_id?: number | null + used_account_id?: number | null + + http_status_code: number + upstream_request_id: string + + response_preview: string + response_truncated: boolean + + error_message: string + + started_at: string + finished_at: string + duration_ms: number +} + +export interface OpsDashboardOverview { + start_time: string + end_time: string + platform: string + group_id?: number | null + + health_score?: number + + system_metrics?: OpsSystemMetricsSnapshot | null + job_heartbeats?: OpsJobHeartbeat[] | null + + success_count: number + error_count_total: number + business_limited_count: number + error_count_sla: number + request_count_total: number + request_count_sla: number + + token_consumed: number + + sla: number + error_rate: number + upstream_error_rate: number + upstream_error_count_excl_429_529: number + upstream_429_count: number + upstream_529_count: number + + qps: { + current: number + peak: number + avg: number + } + tps: { + current: number + peak: number + avg: number + } + + duration: OpsPercentiles + ttft: OpsPercentiles +} + +export interface OpsPercentiles { + p50_ms?: number | null + p90_ms?: number | null + p95_ms?: number | null + p99_ms?: number | null + avg_ms?: number | null + max_ms?: number | null +} + +export interface OpsThroughputTrendPoint { + bucket_start: string + request_count: number + token_consumed: number + qps: number + tps: number +} + +export interface OpsThroughputPlatformBreakdownItem { + platform: string + request_count: number + token_consumed: number +} + +export interface OpsThroughputGroupBreakdownItem { + group_id: number + group_name: string + request_count: number + token_consumed: number +} + +export interface OpsThroughputTrendResponse { + bucket: string + points: OpsThroughputTrendPoint[] + by_platform?: OpsThroughputPlatformBreakdownItem[] + top_groups?: OpsThroughputGroupBreakdownItem[] +} + +export type OpsRequestKind = 'success' | 'error' +export type OpsRequestDetailsKind = OpsRequestKind | 'all' +export type OpsRequestDetailsSort = 'created_at_desc' | 'duration_desc' + +export interface OpsRequestDetail { + kind: OpsRequestKind + created_at: string + request_id: string + + platform?: string + model?: string + duration_ms?: number | null + status_code?: number | null + + error_id?: number | null + phase?: string + severity?: string + message?: string + + user_id?: number | null + api_key_id?: number | null + account_id?: number | null + group_id?: number | null + + stream?: boolean +} + +export interface OpsRequestDetailsParams { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + + kind?: OpsRequestDetailsKind + + platform?: string + group_id?: number | null + + user_id?: number + api_key_id?: number + account_id?: number + + model?: string + request_id?: string + q?: string + + min_duration_ms?: number + max_duration_ms?: number + + sort?: OpsRequestDetailsSort + + page?: number + page_size?: number +} + +export type OpsRequestDetailsResponse = PaginatedResponse + +export interface OpsLatencyHistogramBucket { + range: string + count: number +} + +export interface OpsLatencyHistogramResponse { + start_time: string + end_time: string + platform: string + group_id?: number | null + + total_requests: number + buckets: OpsLatencyHistogramBucket[] +} + +export interface OpsErrorTrendPoint { + bucket_start: string + error_count_total: number + business_limited_count: number + error_count_sla: number + upstream_error_count_excl_429_529: number + upstream_429_count: number + upstream_529_count: number +} + +export interface OpsErrorTrendResponse { + bucket: string + points: OpsErrorTrendPoint[] +} + +export interface OpsErrorDistributionItem { + status_code: number + total: number + sla: number + business_limited: number +} + +export interface OpsErrorDistributionResponse { + total: number + items: OpsErrorDistributionItem[] +} + +export interface OpsSystemMetricsSnapshot { + id: number + created_at: string + window_minutes: number + + cpu_usage_percent?: number | null + memory_used_mb?: number | null + memory_total_mb?: number | null + memory_usage_percent?: number | null + + db_ok?: boolean | null + redis_ok?: boolean | null + + // Config-derived limits (best-effort) for rendering "current vs max". + db_max_open_conns?: number | null + redis_pool_size?: number | null + + redis_conn_total?: number | null + redis_conn_idle?: number | null + + db_conn_active?: number | null + db_conn_idle?: number | null + db_conn_waiting?: number | null + + goroutine_count?: number | null + concurrency_queue_depth?: number | null +} + +export interface OpsJobHeartbeat { + job_name: string + last_run_at?: string | null + last_success_at?: string | null + last_error_at?: string | null + last_error?: string | null + last_duration_ms?: number | null + updated_at: string +} + +export interface PlatformConcurrencyInfo { + platform: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface GroupConcurrencyInfo { + group_id: number + group_name: string + platform: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface AccountConcurrencyInfo { + account_id: number + account_name?: string + platform: string + group_id: number + group_name: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface OpsConcurrencyStatsResponse { + enabled: boolean + platform: Record + group: Record + account: Record + timestamp?: string +} + +export async function getConcurrencyStats(platform?: string, groupId?: number | null): Promise { + const params: Record = {} + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + + const { data } = await apiClient.get('/admin/ops/concurrency', { params }) + return data +} + +export interface PlatformAvailability { + platform: string + total_accounts: number + available_count: number + rate_limit_count: number + error_count: number +} + +export interface GroupAvailability { + group_id: number + group_name: string + platform: string + total_accounts: number + available_count: number + rate_limit_count: number + error_count: number +} + +export interface AccountAvailability { + account_id: number + account_name: string + platform: string + group_id: number + group_name: string + status: string + is_available: boolean + is_rate_limited: boolean + rate_limit_reset_at?: string + rate_limit_remaining_sec?: number + is_overloaded: boolean + overload_until?: string + overload_remaining_sec?: number + has_error: boolean + error_message?: string +} + +export interface OpsAccountAvailabilityStatsResponse { + enabled: boolean + platform: Record + group: Record + account: Record + timestamp?: string +} + +export async function getAccountAvailabilityStats(platform?: string, groupId?: number | null): Promise { + const params: Record = {} + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + const { data } = await apiClient.get('/admin/ops/account-availability', { params }) + return data +} + +export interface OpsRateSummary { + current: number + peak: number + avg: number +} + +export interface OpsRealtimeTrafficSummary { + window: string + start_time: string + end_time: string + platform: string + group_id?: number | null + qps: OpsRateSummary + tps: OpsRateSummary +} + +export interface OpsRealtimeTrafficSummaryResponse { + enabled: boolean + summary: OpsRealtimeTrafficSummary | null + timestamp?: string +} + +export async function getRealtimeTrafficSummary( + window: string, + platform?: string, + groupId?: number | null +): Promise { + const params: Record = { window } + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + + const { data } = await apiClient.get('/admin/ops/realtime-traffic', { params }) + return data +} + +/** + * Subscribe to realtime QPS updates via WebSocket. + * + * Note: browsers cannot set Authorization headers for WebSockets. + * We authenticate via Sec-WebSocket-Protocol using a prefixed token item: + * ["sub2api-admin", "jwt."] + */ +export interface SubscribeQPSOptions { + token?: string | null + onOpen?: () => void + onClose?: (event: CloseEvent) => void + onError?: (event: Event) => void + /** + * Called when the server closes with an application close code that indicates + * reconnecting is not useful (e.g. feature flag disabled). + */ + onFatalClose?: (event: CloseEvent) => void + /** + * More granular status updates for UI (connecting/reconnecting/offline/etc). + */ + onStatusChange?: (status: OpsWSStatus) => void + /** + * Called when a reconnect is scheduled (helps display "retry in Xs"). + */ + onReconnectScheduled?: (info: { attempt: number, delayMs: number }) => void + wsBaseUrl?: string + /** + * Maximum reconnect attempts. Defaults to Infinity to keep the dashboard live. + * Set to 0 to disable reconnect. + */ + maxReconnectAttempts?: number + reconnectBaseDelayMs?: number + reconnectMaxDelayMs?: number + /** + * Stale connection detection (heartbeat-by-observation). + * If no messages are received within this window, the socket is closed to trigger a reconnect. + * Set to 0 to disable. + */ + staleTimeoutMs?: number + /** + * How often to check staleness. Only used when `staleTimeoutMs > 0`. + */ + staleCheckIntervalMs?: number +} + +export type OpsWSStatus = 'connecting' | 'connected' | 'reconnecting' | 'offline' | 'closed' + +export const OPS_WS_CLOSE_CODES = { + REALTIME_DISABLED: 4001 +} as const + +const OPS_WS_BASE_PROTOCOL = 'sub2api-admin' + +export function subscribeQPS(onMessage: (data: any) => void, options: SubscribeQPSOptions = {}): () => void { + let ws: WebSocket | null = null + let reconnectAttempts = 0 + const maxReconnectAttempts = Number.isFinite(options.maxReconnectAttempts as number) + ? (options.maxReconnectAttempts as number) + : Infinity + const baseDelayMs = options.reconnectBaseDelayMs ?? 1000 + const maxDelayMs = options.reconnectMaxDelayMs ?? 30000 + let reconnectTimer: ReturnType | null = null + let shouldReconnect = true + let isConnecting = false + let hasConnectedOnce = false + let lastMessageAt = 0 + const staleTimeoutMs = options.staleTimeoutMs ?? 120_000 + const staleCheckIntervalMs = options.staleCheckIntervalMs ?? 30_000 + let staleTimer: ReturnType | null = null + + const setStatus = (status: OpsWSStatus) => { + options.onStatusChange?.(status) + } + + const clearReconnectTimer = () => { + if (reconnectTimer) { + clearTimeout(reconnectTimer) + reconnectTimer = null + } + } + + const clearStaleTimer = () => { + if (staleTimer) { + clearInterval(staleTimer) + staleTimer = null + } + } + + const startStaleTimer = () => { + clearStaleTimer() + if (!staleTimeoutMs || staleTimeoutMs <= 0) return + staleTimer = setInterval(() => { + if (!shouldReconnect) return + if (!ws || ws.readyState !== WebSocket.OPEN) return + if (!lastMessageAt) return + const ageMs = Date.now() - lastMessageAt + if (ageMs > staleTimeoutMs) { + // Treat as a half-open connection; closing triggers the normal reconnect path. + ws.close() + } + }, staleCheckIntervalMs) + } + + const scheduleReconnect = () => { + if (!shouldReconnect) return + if (hasConnectedOnce && reconnectAttempts >= maxReconnectAttempts) return + + // If we're offline, wait for the browser to come back online. + if (typeof navigator !== 'undefined' && 'onLine' in navigator && !navigator.onLine) { + setStatus('offline') + return + } + + const expDelay = baseDelayMs * Math.pow(2, reconnectAttempts) + const delay = Math.min(expDelay, maxDelayMs) + const jitter = Math.floor(Math.random() * 250) + clearReconnectTimer() + reconnectTimer = setTimeout(() => { + reconnectAttempts++ + connect() + }, delay + jitter) + options.onReconnectScheduled?.({ attempt: reconnectAttempts + 1, delayMs: delay + jitter }) + } + + const handleOnline = () => { + if (!shouldReconnect) return + if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) return + connect() + } + + const handleOffline = () => { + setStatus('offline') + } + + const connect = () => { + if (!shouldReconnect) return + if (isConnecting) return + if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) return + if (hasConnectedOnce && reconnectAttempts >= maxReconnectAttempts) return + + isConnecting = true + setStatus(hasConnectedOnce ? 'reconnecting' : 'connecting') + const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:' + const wsBaseUrl = options.wsBaseUrl || import.meta.env.VITE_WS_BASE_URL || window.location.host + const wsURL = new URL(`${protocol}//${wsBaseUrl}/api/v1/admin/ops/ws/qps`) + + // Do NOT put admin JWT in the URL query string (it can leak via access logs, proxies, etc). + // Browsers cannot set Authorization headers for WebSockets, so we pass the token via + // Sec-WebSocket-Protocol (subprotocol list): ["sub2api-admin", "jwt."]. + const rawToken = String(options.token ?? localStorage.getItem('auth_token') ?? '').trim() + const protocols: string[] = [OPS_WS_BASE_PROTOCOL] + if (rawToken) protocols.push(`jwt.${rawToken}`) + + ws = new WebSocket(wsURL.toString(), protocols) + + ws.onopen = () => { + reconnectAttempts = 0 + isConnecting = false + hasConnectedOnce = true + clearReconnectTimer() + lastMessageAt = Date.now() + startStaleTimer() + setStatus('connected') + options.onOpen?.() + } + + ws.onmessage = (e) => { + try { + const data = JSON.parse(e.data) + lastMessageAt = Date.now() + onMessage(data) + } catch (err) { + console.warn('[OpsWS] Failed to parse message:', err) + } + } + + ws.onerror = (error) => { + console.error('[OpsWS] Connection error:', error) + options.onError?.(error) + } + + ws.onclose = (event) => { + isConnecting = false + options.onClose?.(event) + clearStaleTimer() + ws = null + + // If the server explicitly tells us to stop reconnecting, honor it. + if (event && typeof event.code === 'number' && event.code === OPS_WS_CLOSE_CODES.REALTIME_DISABLED) { + shouldReconnect = false + clearReconnectTimer() + setStatus('closed') + options.onFatalClose?.(event) + return + } + + scheduleReconnect() + } + } + + window.addEventListener('online', handleOnline) + window.addEventListener('offline', handleOffline) + connect() + + return () => { + shouldReconnect = false + window.removeEventListener('online', handleOnline) + window.removeEventListener('offline', handleOffline) + clearReconnectTimer() + clearStaleTimer() + if (ws) ws.close() + ws = null + setStatus('closed') + } +} + +export type OpsSeverity = string +export type OpsPhase = string + +export type AlertSeverity = 'critical' | 'warning' | 'info' +export type ThresholdMode = 'count' | 'percentage' | 'both' +export type MetricType = + | 'success_rate' + | 'error_rate' + | 'upstream_error_rate' + | 'cpu_usage_percent' + | 'memory_usage_percent' + | 'concurrency_queue_depth' + | 'group_available_accounts' + | 'group_available_ratio' + | 'group_rate_limit_ratio' + | 'account_rate_limited_count' + | 'account_error_count' + | 'account_error_ratio' + | 'overload_account_count' +export type Operator = '>' | '>=' | '<' | '<=' | '==' | '!=' + +export interface AlertRule { + id?: number + name: string + description?: string + enabled: boolean + metric_type: MetricType + operator: Operator + threshold: number + window_minutes: number + sustained_minutes: number + severity: OpsSeverity + cooldown_minutes: number + notify_email: boolean + filters?: Record + created_at?: string + updated_at?: string + last_triggered_at?: string | null +} + +export interface AlertEvent { + id: number + rule_id: number + severity: OpsSeverity | string + status: 'firing' | 'resolved' | 'manual_resolved' | string + title?: string + description?: string + metric_value?: number + threshold_value?: number + dimensions?: Record + fired_at: string + resolved_at?: string | null + email_sent: boolean + created_at: string +} + +export interface EmailNotificationConfig { + alert: { + enabled: boolean + recipients: string[] + min_severity: AlertSeverity | '' + rate_limit_per_hour: number + batching_window_seconds: number + include_resolved_alerts: boolean + } + report: { + enabled: boolean + recipients: string[] + daily_summary_enabled: boolean + daily_summary_schedule: string + weekly_summary_enabled: boolean + weekly_summary_schedule: string + error_digest_enabled: boolean + error_digest_schedule: string + error_digest_min_count: number + account_health_enabled: boolean + account_health_schedule: string + account_health_error_rate_threshold: number + } +} + +export interface OpsMetricThresholds { + sla_percent_min?: number | null // SLA低于此值变红 + ttft_p99_ms_max?: number | null // TTFT P99高于此值变红 + request_error_rate_percent_max?: number | null // 请求错误率高于此值变红 + upstream_error_rate_percent_max?: number | null // 上游错误率高于此值变红 +} + +export interface OpsDistributedLockSettings { + enabled: boolean + key: string + ttl_seconds: number +} + +export interface OpsAlertRuntimeSettings { + evaluation_interval_seconds: number + distributed_lock: OpsDistributedLockSettings + silencing: { + enabled: boolean + global_until_rfc3339: string + global_reason: string + entries?: Array<{ + rule_id?: number + severities?: Array + until_rfc3339: string + reason: string + }> + } + thresholds: OpsMetricThresholds // 指标阈值配置 +} + +export interface OpsAdvancedSettings { + data_retention: OpsDataRetentionSettings + aggregation: OpsAggregationSettings + ignore_count_tokens_errors: boolean + ignore_context_canceled: boolean + ignore_no_available_accounts: boolean + auto_refresh_enabled: boolean + auto_refresh_interval_seconds: number +} + +export interface OpsDataRetentionSettings { + cleanup_enabled: boolean + cleanup_schedule: string + error_log_retention_days: number + minute_metrics_retention_days: number + hourly_metrics_retention_days: number +} + +export interface OpsAggregationSettings { + aggregation_enabled: boolean +} + +export interface OpsErrorLog { + id: number + created_at: string + + // Standardized classification + phase: OpsPhase + type: string + error_owner: 'client' | 'provider' | 'platform' | string + error_source: 'client_request' | 'upstream_http' | 'gateway' | string + + severity: OpsSeverity + status_code: number + platform: string + model: string + + is_retryable: boolean + retry_count: number + + resolved: boolean + resolved_at?: string | null + resolved_by_user_id?: number | null + resolved_retry_id?: number | null + + client_request_id: string + request_id: string + message: string + + user_id?: number | null + user_email: string + api_key_id?: number | null + account_id?: number | null + account_name: string + group_id?: number | null + group_name: string + + client_ip?: string | null + request_path?: string + stream?: boolean +} + +export interface OpsErrorDetail extends OpsErrorLog { + error_body: string + user_agent: string + + // Upstream context (optional; enriched by gateway services) + upstream_status_code?: number | null + upstream_error_message?: string + upstream_error_detail?: string + upstream_errors?: string + + auth_latency_ms?: number | null + routing_latency_ms?: number | null + upstream_latency_ms?: number | null + response_latency_ms?: number | null + time_to_first_token_ms?: number | null + + request_body: string + request_body_truncated: boolean + request_body_bytes?: number | null + + is_business_limited: boolean +} + +export type OpsErrorLogsResponse = PaginatedResponse + +export async function getDashboardOverview( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/overview', { + params, + signal: options.signal + }) + return data +} + +export async function getThroughputTrend( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/throughput-trend', { + params, + signal: options.signal + }) + return data +} + +export async function getLatencyHistogram( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/latency-histogram', { + params, + signal: options.signal + }) + return data +} + +export async function getErrorTrend( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/error-trend', { + params, + signal: options.signal + }) + return data +} + +export async function getErrorDistribution( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/error-distribution', { + params, + signal: options.signal + }) + return data +} + +export type OpsErrorListView = 'errors' | 'excluded' | 'all' + +export type OpsErrorListQueryParams = { + page?: number + page_size?: number + time_range?: string + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + account_id?: number | null + + phase?: string + error_owner?: string + error_source?: string + resolved?: string + view?: OpsErrorListView + + q?: string + status_codes?: string + status_codes_other?: string +} + +// Legacy unified endpoints +export async function listErrorLogs(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/errors', { params }) + return data +} + +export async function getErrorLogDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/errors/${id}`) + return data +} + +export async function retryErrorRequest(id: number, req: OpsRetryRequest): Promise { + const { data } = await apiClient.post(`/admin/ops/errors/${id}/retry`, req) + return data +} + +export async function listRetryAttempts(errorId: number, limit = 50): Promise { + const { data } = await apiClient.get(`/admin/ops/errors/${errorId}/retries`, { params: { limit } }) + return data +} + +export async function updateErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/errors/${errorId}/resolve`, { resolved }) +} + +// New split endpoints +export async function listRequestErrors(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/request-errors', { params }) + return data +} + +export async function listUpstreamErrors(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/upstream-errors', { params }) + return data +} + +export async function getRequestErrorDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/request-errors/${id}`) + return data +} + +export async function getUpstreamErrorDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/upstream-errors/${id}`) + return data +} + +export async function retryRequestErrorClient(id: number): Promise { + const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/retry-client`, {}) + return data +} + +export async function retryRequestErrorUpstreamEvent(id: number, idx: number): Promise { + const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/upstream-errors/${idx}/retry`, {}) + return data +} + +export async function retryUpstreamError(id: number): Promise { + const { data } = await apiClient.post(`/admin/ops/upstream-errors/${id}/retry`, {}) + return data +} + +export async function updateRequestErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/request-errors/${errorId}/resolve`, { resolved }) +} + +export async function updateUpstreamErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/upstream-errors/${errorId}/resolve`, { resolved }) +} + +export async function listRequestErrorUpstreamErrors( + id: number, + params: OpsErrorListQueryParams = {}, + options: { include_detail?: boolean } = {} +): Promise> { + const query: Record = { ...params } + if (options.include_detail) query.include_detail = '1' + const { data } = await apiClient.get>(`/admin/ops/request-errors/${id}/upstream-errors`, { params: query }) + return data +} + +export async function listRequestDetails(params: OpsRequestDetailsParams): Promise { + const { data } = await apiClient.get('/admin/ops/requests', { params }) + return data +} + +// Alert rules +export async function listAlertRules(): Promise { + const { data } = await apiClient.get('/admin/ops/alert-rules') + return data +} + +export async function createAlertRule(rule: AlertRule): Promise { + const { data } = await apiClient.post('/admin/ops/alert-rules', rule) + return data +} + +export async function updateAlertRule(id: number, rule: Partial): Promise { + const { data } = await apiClient.put(`/admin/ops/alert-rules/${id}`, rule) + return data +} + +export async function deleteAlertRule(id: number): Promise { + await apiClient.delete(`/admin/ops/alert-rules/${id}`) +} + +export interface AlertEventsQuery { + limit?: number + status?: string + severity?: string + email_sent?: boolean + time_range?: string + start_time?: string + end_time?: string + before_fired_at?: string + before_id?: number + platform?: string + group_id?: number +} + +export async function listAlertEvents(params: AlertEventsQuery = {}): Promise { + const { data } = await apiClient.get('/admin/ops/alert-events', { params }) + return data +} + +export async function getAlertEvent(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/alert-events/${id}`) + return data +} + +export async function updateAlertEventStatus(id: number, status: 'resolved' | 'manual_resolved'): Promise { + await apiClient.put(`/admin/ops/alert-events/${id}/status`, { status }) +} + +export async function createAlertSilence(payload: { + rule_id: number + platform: string + group_id?: number | null + region?: string | null + until: string + reason?: string +}): Promise { + await apiClient.post('/admin/ops/alert-silences', payload) +} + +// Email notification config +export async function getEmailNotificationConfig(): Promise { + const { data } = await apiClient.get('/admin/ops/email-notification/config') + return data +} + +export async function updateEmailNotificationConfig(config: EmailNotificationConfig): Promise { + const { data } = await apiClient.put('/admin/ops/email-notification/config', config) + return data +} + +// Runtime settings (DB-backed) +export async function getAlertRuntimeSettings(): Promise { + const { data } = await apiClient.get('/admin/ops/runtime/alert') + return data +} + +export async function updateAlertRuntimeSettings(config: OpsAlertRuntimeSettings): Promise { + const { data } = await apiClient.put('/admin/ops/runtime/alert', config) + return data +} + +// Advanced settings (DB-backed) +export async function getAdvancedSettings(): Promise { + const { data } = await apiClient.get('/admin/ops/advanced-settings') + return data +} + +export async function updateAdvancedSettings(config: OpsAdvancedSettings): Promise { + const { data } = await apiClient.put('/admin/ops/advanced-settings', config) + return data +} + +// ==================== Metric Thresholds ==================== + +async function getMetricThresholds(): Promise { + const { data } = await apiClient.get('/admin/ops/settings/metric-thresholds') + return data +} + +async function updateMetricThresholds(thresholds: OpsMetricThresholds): Promise { + await apiClient.put('/admin/ops/settings/metric-thresholds', thresholds) +} + +export const opsAPI = { + getDashboardOverview, + getThroughputTrend, + getLatencyHistogram, + getErrorTrend, + getErrorDistribution, + getConcurrencyStats, + getAccountAvailabilityStats, + getRealtimeTrafficSummary, + subscribeQPS, + + // Legacy unified endpoints + listErrorLogs, + getErrorLogDetail, + retryErrorRequest, + listRetryAttempts, + updateErrorResolved, + + // New split endpoints + listRequestErrors, + listUpstreamErrors, + getRequestErrorDetail, + getUpstreamErrorDetail, + retryRequestErrorClient, + retryRequestErrorUpstreamEvent, + retryUpstreamError, + updateRequestErrorResolved, + updateUpstreamErrorResolved, + listRequestErrorUpstreamErrors, + + listRequestDetails, + listAlertRules, + createAlertRule, + updateAlertRule, + deleteAlertRule, + listAlertEvents, + getAlertEvent, + updateAlertEventStatus, + createAlertSilence, + getEmailNotificationConfig, + updateEmailNotificationConfig, + getAlertRuntimeSettings, + updateAlertRuntimeSettings, + getAdvancedSettings, + updateAdvancedSettings, + getMetricThresholds, + updateMetricThresholds +} + +export default opsAPI diff --git a/frontend/src/api/admin/promo.ts b/frontend/src/api/admin/promo.ts new file mode 100644 index 00000000..6a8c4559 --- /dev/null +++ b/frontend/src/api/admin/promo.ts @@ -0,0 +1,69 @@ +/** + * Admin Promo Codes API endpoints + */ + +import { apiClient } from '../client' +import type { + PromoCode, + PromoCodeUsage, + CreatePromoCodeRequest, + UpdatePromoCodeRequest, + BasePaginationResponse +} from '@/types' + +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: string + search?: string + } +): Promise> { + const { data } = await apiClient.get>('/admin/promo-codes', { + params: { page, page_size: pageSize, ...filters } + }) + return data +} + +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/promo-codes/${id}`) + return data +} + +export async function create(request: CreatePromoCodeRequest): Promise { + const { data } = await apiClient.post('/admin/promo-codes', request) + return data +} + +export async function update(id: number, request: UpdatePromoCodeRequest): Promise { + const { data } = await apiClient.put(`/admin/promo-codes/${id}`, request) + return data +} + +export async function deleteCode(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/promo-codes/${id}`) + return data +} + +export async function getUsages( + id: number, + page: number = 1, + pageSize: number = 20 +): Promise> { + const { data } = await apiClient.get>( + `/admin/promo-codes/${id}/usages`, + { params: { page, page_size: pageSize } } + ) + return data +} + +const promoAPI = { + list, + getById, + create, + update, + delete: deleteCode, + getUsages +} + +export default promoAPI diff --git a/frontend/src/api/admin/proxies.ts b/frontend/src/api/admin/proxies.ts new file mode 100644 index 00000000..1af2ea39 --- /dev/null +++ b/frontend/src/api/admin/proxies.ts @@ -0,0 +1,227 @@ +/** + * Admin Proxies API endpoints + * Handles proxy server management for administrators + */ + +import { apiClient } from '../client' +import type { + Proxy, + ProxyAccountSummary, + CreateProxyRequest, + UpdateProxyRequest, + PaginatedResponse +} from '@/types' + +/** + * List all proxies with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters + * @returns Paginated list of proxies + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + protocol?: string + status?: 'active' | 'inactive' + search?: string + }, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>('/admin/proxies', { + params: { + page, + page_size: pageSize, + ...filters + }, + signal: options?.signal + }) + return data +} + +/** + * Get all active proxies (without pagination) + * @returns List of all active proxies + */ +export async function getAll(): Promise { + const { data } = await apiClient.get('/admin/proxies/all') + return data +} + +/** + * Get all active proxies with account count (sorted by creation time desc) + * @returns List of all active proxies with account count + */ +export async function getAllWithCount(): Promise { + const { data } = await apiClient.get('/admin/proxies/all', { + params: { with_count: 'true' } + }) + return data +} + +/** + * Get proxy by ID + * @param id - Proxy ID + * @returns Proxy details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/proxies/${id}`) + return data +} + +/** + * Create new proxy + * @param proxyData - Proxy data + * @returns Created proxy + */ +export async function create(proxyData: CreateProxyRequest): Promise { + const { data } = await apiClient.post('/admin/proxies', proxyData) + return data +} + +/** + * Update proxy + * @param id - Proxy ID + * @param updates - Fields to update + * @returns Updated proxy + */ +export async function update(id: number, updates: UpdateProxyRequest): Promise { + const { data } = await apiClient.put(`/admin/proxies/${id}`, updates) + return data +} + +/** + * Delete proxy + * @param id - Proxy ID + * @returns Success confirmation + */ +export async function deleteProxy(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/proxies/${id}`) + return data +} + +/** + * Toggle proxy status + * @param id - Proxy ID + * @param status - New status + * @returns Updated proxy + */ +export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { + return update(id, { status }) +} + +/** + * Test proxy connectivity + * @param id - Proxy ID + * @returns Test result with IP info + */ +export async function testProxy(id: number): Promise<{ + success: boolean + message: string + latency_ms?: number + ip_address?: string + city?: string + region?: string + country?: string + country_code?: string +}> { + const { data } = await apiClient.post<{ + success: boolean + message: string + latency_ms?: number + ip_address?: string + city?: string + region?: string + country?: string + country_code?: string + }>(`/admin/proxies/${id}/test`) + return data +} + +/** + * Get proxy usage statistics + * @param id - Proxy ID + * @returns Proxy usage statistics + */ +export async function getStats(id: number): Promise<{ + total_accounts: number + active_accounts: number + total_requests: number + success_rate: number + average_latency: number +}> { + const { data } = await apiClient.get<{ + total_accounts: number + active_accounts: number + total_requests: number + success_rate: number + average_latency: number + }>(`/admin/proxies/${id}/stats`) + return data +} + +/** + * Get accounts using a proxy + * @param id - Proxy ID + * @returns List of accounts using the proxy + */ +export async function getProxyAccounts(id: number): Promise { + const { data } = await apiClient.get(`/admin/proxies/${id}/accounts`) + return data +} + +/** + * Batch create proxies + * @param proxies - Array of proxy data to create + * @returns Creation result with count of created and skipped + */ +export async function batchCreate( + proxies: Array<{ + protocol: string + host: string + port: number + username?: string + password?: string + }> +): Promise<{ + created: number + skipped: number +}> { + const { data } = await apiClient.post<{ + created: number + skipped: number + }>('/admin/proxies/batch', { proxies }) + return data +} + +export async function batchDelete(ids: number[]): Promise<{ + deleted_ids: number[] + skipped: Array<{ id: number; reason: string }> +}> { + const { data } = await apiClient.post<{ + deleted_ids: number[] + skipped: Array<{ id: number; reason: string }> + }>('/admin/proxies/batch-delete', { ids }) + return data +} + +export const proxiesAPI = { + list, + getAll, + getAllWithCount, + getById, + create, + update, + delete: deleteProxy, + toggleStatus, + testProxy, + getStats, + getProxyAccounts, + batchCreate, + batchDelete +} + +export default proxiesAPI diff --git a/frontend/src/api/admin/redeem.ts b/frontend/src/api/admin/redeem.ts new file mode 100644 index 00000000..a53c3566 --- /dev/null +++ b/frontend/src/api/admin/redeem.ts @@ -0,0 +1,174 @@ +/** + * Admin Redeem Codes API endpoints + * Handles redeem code generation and management for administrators + */ + +import { apiClient } from '../client' +import type { + RedeemCode, + GenerateRedeemCodesRequest, + RedeemCodeType, + PaginatedResponse +} from '@/types' + +/** + * List all redeem codes with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters + * @returns Paginated list of redeem codes + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + type?: RedeemCodeType + status?: 'active' | 'used' | 'expired' | 'unused' + search?: string + }, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>('/admin/redeem-codes', { + params: { + page, + page_size: pageSize, + ...filters + }, + signal: options?.signal + }) + return data +} + +/** + * Get redeem code by ID + * @param id - Redeem code ID + * @returns Redeem code details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/redeem-codes/${id}`) + return data +} + +/** + * Generate new redeem codes + * @param count - Number of codes to generate + * @param type - Type of redeem code + * @param value - Value of the code + * @param groupId - Group ID (required for subscription type) + * @param validityDays - Validity days (for subscription type) + * @returns Array of generated redeem codes + */ +export async function generate( + count: number, + type: RedeemCodeType, + value: number, + groupId?: number | null, + validityDays?: number +): Promise { + const payload: GenerateRedeemCodesRequest = { + count, + type, + value + } + + // 订阅类型专用字段 + if (type === 'subscription') { + payload.group_id = groupId + if (validityDays && validityDays > 0) { + payload.validity_days = validityDays + } + } + + const { data } = await apiClient.post('/admin/redeem-codes/generate', payload) + return data +} + +/** + * Delete redeem code + * @param id - Redeem code ID + * @returns Success confirmation + */ +export async function deleteCode(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/redeem-codes/${id}`) + return data +} + +/** + * Batch delete redeem codes + * @param ids - Array of redeem code IDs + * @returns Success confirmation + */ +export async function batchDelete(ids: number[]): Promise<{ + deleted: number + message: string +}> { + const { data } = await apiClient.post<{ + deleted: number + message: string + }>('/admin/redeem-codes/batch-delete', { ids }) + return data +} + +/** + * Expire redeem code + * @param id - Redeem code ID + * @returns Updated redeem code + */ +export async function expire(id: number): Promise { + const { data } = await apiClient.post(`/admin/redeem-codes/${id}/expire`) + return data +} + +/** + * Get redeem code statistics + * @returns Statistics about redeem codes + */ +export async function getStats(): Promise<{ + total_codes: number + active_codes: number + used_codes: number + expired_codes: number + total_value_distributed: number + by_type: Record +}> { + const { data } = await apiClient.get<{ + total_codes: number + active_codes: number + used_codes: number + expired_codes: number + total_value_distributed: number + by_type: Record + }>('/admin/redeem-codes/stats') + return data +} + +/** + * Export redeem codes to CSV + * @param filters - Optional filters + * @returns CSV data as blob + */ +export async function exportCodes(filters?: { + type?: RedeemCodeType + status?: 'active' | 'used' | 'expired' +}): Promise { + const response = await apiClient.get('/admin/redeem-codes/export', { + params: filters, + responseType: 'blob' + }) + return response.data +} + +export const redeemAPI = { + list, + getById, + generate, + delete: deleteCode, + batchDelete, + expire, + getStats, + exportCodes +} + +export default redeemAPI diff --git a/frontend/src/api/admin/settings.ts b/frontend/src/api/admin/settings.ts new file mode 100644 index 00000000..fc72be8d --- /dev/null +++ b/frontend/src/api/admin/settings.ts @@ -0,0 +1,251 @@ +/** + * Admin Settings API endpoints + * Handles system settings management for administrators + */ + +import { apiClient } from '../client' + +/** + * System settings interface + */ +export interface SystemSettings { + // Registration settings + registration_enabled: boolean + email_verify_enabled: boolean + // Default settings + default_balance: number + default_concurrency: number + // OEM settings + site_name: string + site_logo: string + site_subtitle: string + api_base_url: string + contact_info: string + doc_url: string + home_content: string + // SMTP settings + smtp_host: string + smtp_port: number + smtp_username: string + smtp_password_configured: boolean + smtp_from_email: string + smtp_from_name: string + smtp_use_tls: boolean + // Cloudflare Turnstile settings + turnstile_enabled: boolean + turnstile_site_key: string + turnstile_secret_key_configured: boolean + + // LinuxDo Connect OAuth settings + linuxdo_connect_enabled: boolean + linuxdo_connect_client_id: string + linuxdo_connect_client_secret_configured: boolean + linuxdo_connect_redirect_url: string + + // Model fallback configuration + enable_model_fallback: boolean + fallback_model_anthropic: string + fallback_model_openai: string + fallback_model_gemini: string + fallback_model_antigravity: string + + // Identity patch configuration (Claude -> Gemini) + enable_identity_patch: boolean + identity_patch_prompt: string + + // Ops Monitoring (vNext) + ops_monitoring_enabled: boolean + ops_realtime_monitoring_enabled: boolean + ops_query_mode_default: 'auto' | 'raw' | 'preagg' | string + ops_metrics_interval_seconds: number +} + +export interface UpdateSettingsRequest { + registration_enabled?: boolean + email_verify_enabled?: boolean + default_balance?: number + default_concurrency?: number + site_name?: string + site_logo?: string + site_subtitle?: string + api_base_url?: string + contact_info?: string + doc_url?: string + home_content?: string + smtp_host?: string + smtp_port?: number + smtp_username?: string + smtp_password?: string + smtp_from_email?: string + smtp_from_name?: string + smtp_use_tls?: boolean + turnstile_enabled?: boolean + turnstile_site_key?: string + turnstile_secret_key?: string + linuxdo_connect_enabled?: boolean + linuxdo_connect_client_id?: string + linuxdo_connect_client_secret?: string + linuxdo_connect_redirect_url?: string + enable_model_fallback?: boolean + fallback_model_anthropic?: string + fallback_model_openai?: string + fallback_model_gemini?: string + fallback_model_antigravity?: string + enable_identity_patch?: boolean + identity_patch_prompt?: string + ops_monitoring_enabled?: boolean + ops_realtime_monitoring_enabled?: boolean + ops_query_mode_default?: 'auto' | 'raw' | 'preagg' | string + ops_metrics_interval_seconds?: number +} + +/** + * Get all system settings + * @returns System settings + */ +export async function getSettings(): Promise { + const { data } = await apiClient.get('/admin/settings') + return data +} + +/** + * Update system settings + * @param settings - Partial settings to update + * @returns Updated settings + */ +export async function updateSettings(settings: UpdateSettingsRequest): Promise { + const { data } = await apiClient.put('/admin/settings', settings) + return data +} + +/** + * Test SMTP connection request + */ +export interface TestSmtpRequest { + smtp_host: string + smtp_port: number + smtp_username: string + smtp_password: string + smtp_use_tls: boolean +} + +/** + * Test SMTP connection with provided config + * @param config - SMTP configuration to test + * @returns Test result message + */ +export async function testSmtpConnection(config: TestSmtpRequest): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>('/admin/settings/test-smtp', config) + return data +} + +/** + * Send test email request + */ +export interface SendTestEmailRequest { + email: string + smtp_host: string + smtp_port: number + smtp_username: string + smtp_password: string + smtp_from_email: string + smtp_from_name: string + smtp_use_tls: boolean +} + +/** + * Send test email with provided SMTP config + * @param request - Email address and SMTP config + * @returns Test result message + */ +export async function sendTestEmail(request: SendTestEmailRequest): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>( + '/admin/settings/send-test-email', + request + ) + return data +} + +/** + * Admin API Key status response + */ +export interface AdminApiKeyStatus { + exists: boolean + masked_key: string +} + +/** + * Get admin API key status + * @returns Status indicating if key exists and masked version + */ +export async function getAdminApiKey(): Promise { + const { data } = await apiClient.get('/admin/settings/admin-api-key') + return data +} + +/** + * Regenerate admin API key + * @returns The new full API key (only shown once) + */ +export async function regenerateAdminApiKey(): Promise<{ key: string }> { + const { data } = await apiClient.post<{ key: string }>('/admin/settings/admin-api-key/regenerate') + return data +} + +/** + * Delete admin API key + * @returns Success message + */ +export async function deleteAdminApiKey(): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>('/admin/settings/admin-api-key') + return data +} + +/** + * Stream timeout settings interface + */ +export interface StreamTimeoutSettings { + enabled: boolean + action: 'temp_unsched' | 'error' | 'none' + temp_unsched_minutes: number + threshold_count: number + threshold_window_minutes: number +} + +/** + * Get stream timeout settings + * @returns Stream timeout settings + */ +export async function getStreamTimeoutSettings(): Promise { + const { data } = await apiClient.get('/admin/settings/stream-timeout') + return data +} + +/** + * Update stream timeout settings + * @param settings - Stream timeout settings to update + * @returns Updated settings + */ +export async function updateStreamTimeoutSettings( + settings: StreamTimeoutSettings +): Promise { + const { data } = await apiClient.put( + '/admin/settings/stream-timeout', + settings + ) + return data +} + +export const settingsAPI = { + getSettings, + updateSettings, + testSmtpConnection, + sendTestEmail, + getAdminApiKey, + regenerateAdminApiKey, + deleteAdminApiKey, + getStreamTimeoutSettings, + updateStreamTimeoutSettings +} + +export default settingsAPI diff --git a/frontend/src/api/admin/subscriptions.ts b/frontend/src/api/admin/subscriptions.ts new file mode 100644 index 00000000..54b448e2 --- /dev/null +++ b/frontend/src/api/admin/subscriptions.ts @@ -0,0 +1,175 @@ +/** + * Admin Subscriptions API endpoints + * Handles user subscription management for administrators + */ + +import { apiClient } from '../client' +import type { + UserSubscription, + SubscriptionProgress, + AssignSubscriptionRequest, + BulkAssignSubscriptionRequest, + ExtendSubscriptionRequest, + PaginatedResponse +} from '@/types' + +/** + * List all subscriptions with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters (status, user_id, group_id) + * @returns Paginated list of subscriptions + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: 'active' | 'expired' | 'revoked' + user_id?: number + group_id?: number + }, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>( + '/admin/subscriptions', + { + params: { + page, + page_size: pageSize, + ...filters + }, + signal: options?.signal + } + ) + return data +} + +/** + * Get subscription by ID + * @param id - Subscription ID + * @returns Subscription details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/subscriptions/${id}`) + return data +} + +/** + * Get subscription progress + * @param id - Subscription ID + * @returns Subscription progress with usage stats + */ +export async function getProgress(id: number): Promise { + const { data } = await apiClient.get(`/admin/subscriptions/${id}/progress`) + return data +} + +/** + * Assign subscription to user + * @param request - Assignment request + * @returns Created subscription + */ +export async function assign(request: AssignSubscriptionRequest): Promise { + const { data } = await apiClient.post('/admin/subscriptions/assign', request) + return data +} + +/** + * Bulk assign subscriptions to multiple users + * @param request - Bulk assignment request + * @returns Created subscriptions + */ +export async function bulkAssign( + request: BulkAssignSubscriptionRequest +): Promise { + const { data } = await apiClient.post( + '/admin/subscriptions/bulk-assign', + request + ) + return data +} + +/** + * Extend subscription validity + * @param id - Subscription ID + * @param request - Extension request with days + * @returns Updated subscription + */ +export async function extend( + id: number, + request: ExtendSubscriptionRequest +): Promise { + const { data } = await apiClient.post( + `/admin/subscriptions/${id}/extend`, + request + ) + return data +} + +/** + * Revoke subscription + * @param id - Subscription ID + * @returns Success confirmation + */ +export async function revoke(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/subscriptions/${id}`) + return data +} + +/** + * List subscriptions by group + * @param groupId - Group ID + * @param page - Page number + * @param pageSize - Items per page + * @returns Paginated list of subscriptions in the group + */ +export async function listByGroup( + groupId: number, + page: number = 1, + pageSize: number = 20 +): Promise> { + const { data } = await apiClient.get>( + `/admin/groups/${groupId}/subscriptions`, + { + params: { page, page_size: pageSize } + } + ) + return data +} + +/** + * List subscriptions by user + * @param userId - User ID + * @param page - Page number + * @param pageSize - Items per page + * @returns Paginated list of user's subscriptions + */ +export async function listByUser( + userId: number, + page: number = 1, + pageSize: number = 20 +): Promise> { + const { data } = await apiClient.get>( + `/admin/users/${userId}/subscriptions`, + { + params: { page, page_size: pageSize } + } + ) + return data +} + +export const subscriptionsAPI = { + list, + getById, + getProgress, + assign, + bulkAssign, + extend, + revoke, + listByGroup, + listByUser +} + +export default subscriptionsAPI diff --git a/frontend/src/api/admin/system.ts b/frontend/src/api/admin/system.ts new file mode 100644 index 00000000..9ea312d5 --- /dev/null +++ b/frontend/src/api/admin/system.ts @@ -0,0 +1,81 @@ +/** + * System API endpoints for admin operations + */ + +import { apiClient } from '../client' + +export interface ReleaseInfo { + name: string + body: string + published_at: string + html_url: string +} + +export interface VersionInfo { + current_version: string + latest_version: string + has_update: boolean + release_info?: ReleaseInfo + cached: boolean + warning?: string + build_type: string // "source" for manual builds, "release" for CI builds +} + +/** + * Get current version + */ +export async function getVersion(): Promise<{ version: string }> { + const { data } = await apiClient.get<{ version: string }>('/admin/system/version') + return data +} + +/** + * Check for updates + * @param force - Force refresh from GitHub API + */ +export async function checkUpdates(force = false): Promise { + const { data } = await apiClient.get('/admin/system/check-updates', { + params: force ? { force: 'true' } : undefined + }) + return data +} + +export interface UpdateResult { + message: string + need_restart: boolean +} + +/** + * Perform system update + * Downloads and applies the latest version + */ +export async function performUpdate(): Promise { + const { data } = await apiClient.post('/admin/system/update') + return data +} + +/** + * Rollback to previous version + */ +export async function rollback(): Promise { + const { data } = await apiClient.post('/admin/system/rollback') + return data +} + +/** + * Restart the service + */ +export async function restartService(): Promise<{ message: string }> { + const { data } = await apiClient.post<{ message: string }>('/admin/system/restart') + return data +} + +export const systemAPI = { + getVersion, + checkUpdates, + performUpdate, + rollback, + restartService +} + +export default systemAPI diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts new file mode 100644 index 00000000..dd85fc24 --- /dev/null +++ b/frontend/src/api/admin/usage.ts @@ -0,0 +1,118 @@ +/** + * Admin Usage API endpoints + * Handles admin-level usage logs and statistics retrieval + */ + +import { apiClient } from '../client' +import type { UsageLog, UsageQueryParams, PaginatedResponse } from '@/types' + +// ==================== Types ==================== + +export interface AdminUsageStatsResponse { + total_requests: number + total_input_tokens: number + total_output_tokens: number + total_cache_tokens: number + total_tokens: number + total_cost: number + total_actual_cost: number + total_account_cost?: number + average_duration_ms: number +} + +export interface SimpleUser { + id: number + email: string +} + +export interface SimpleApiKey { + id: number + name: string + user_id: number +} + +export interface AdminUsageQueryParams extends UsageQueryParams { + user_id?: number +} + +// ==================== API Functions ==================== + +/** + * List all usage logs with optional filters (admin only) + * @param params - Query parameters for filtering and pagination + * @returns Paginated list of usage logs + */ +export async function list( + params: AdminUsageQueryParams, + options?: { signal?: AbortSignal } +): Promise> { + const { data } = await apiClient.get>('/admin/usage', { + params, + signal: options?.signal + }) + return data +} + +/** + * Get usage statistics with optional filters (admin only) + * @param params - Query parameters for filtering + * @returns Usage statistics + */ +export async function getStats(params: { + user_id?: number + api_key_id?: number + account_id?: number + group_id?: number + model?: string + stream?: boolean + period?: string + start_date?: string + end_date?: string + timezone?: string +}): Promise { + const { data } = await apiClient.get('/admin/usage/stats', { + params + }) + return data +} + +/** + * Search users by email keyword (admin only) + * @param keyword - Email keyword to search + * @returns List of matching users (max 30) + */ +export async function searchUsers(keyword: string): Promise { + const { data } = await apiClient.get('/admin/usage/search-users', { + params: { q: keyword } + }) + return data +} + +/** + * Search API keys by user ID and/or keyword (admin only) + * @param userId - Optional user ID to filter by + * @param keyword - Optional keyword to search in key name + * @returns List of matching API keys (max 30) + */ +export async function searchApiKeys(userId?: number, keyword?: string): Promise { + const params: Record = {} + if (userId !== undefined) { + params.user_id = userId + } + if (keyword) { + params.q = keyword + } + const { data } = await apiClient.get('/admin/usage/search-api-keys', { + params + }) + return data +} + +export const adminUsageAPI = { + list, + getStats, + searchUsers, + searchApiKeys +} + +export default adminUsageAPI diff --git a/frontend/src/api/admin/userAttributes.ts b/frontend/src/api/admin/userAttributes.ts new file mode 100644 index 00000000..304aa828 --- /dev/null +++ b/frontend/src/api/admin/userAttributes.ts @@ -0,0 +1,131 @@ +/** + * Admin User Attributes API endpoints + * Handles user custom attribute definitions and values + */ + +import { apiClient } from '../client' +import type { + UserAttributeDefinition, + UserAttributeValue, + CreateUserAttributeRequest, + UpdateUserAttributeRequest, + UserAttributeValuesMap +} from '@/types' + +/** + * Get all attribute definitions + */ +export async function listDefinitions(): Promise { + const { data } = await apiClient.get('/admin/user-attributes') + return data +} + +/** + * Get enabled attribute definitions only + */ +export async function listEnabledDefinitions(): Promise { + const { data } = await apiClient.get('/admin/user-attributes', { + params: { enabled: true } + }) + return data +} + +/** + * Create a new attribute definition + */ +export async function createDefinition( + request: CreateUserAttributeRequest +): Promise { + const { data } = await apiClient.post('/admin/user-attributes', request) + return data +} + +/** + * Update an attribute definition + */ +export async function updateDefinition( + id: number, + request: UpdateUserAttributeRequest +): Promise { + const { data } = await apiClient.put( + `/admin/user-attributes/${id}`, + request + ) + return data +} + +/** + * Delete an attribute definition + */ +export async function deleteDefinition(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/user-attributes/${id}`) + return data +} + +/** + * Reorder attribute definitions + */ +export async function reorderDefinitions(ids: number[]): Promise<{ message: string }> { + const { data } = await apiClient.put<{ message: string }>('/admin/user-attributes/reorder', { + ids + }) + return data +} + +/** + * Get user's attribute values + */ +export async function getUserAttributeValues(userId: number): Promise { + const { data } = await apiClient.get( + `/admin/users/${userId}/attributes` + ) + return data +} + +/** + * Update user's attribute values (batch) + */ +export async function updateUserAttributeValues( + userId: number, + values: UserAttributeValuesMap +): Promise<{ message: string }> { + const { data } = await apiClient.put<{ message: string }>( + `/admin/users/${userId}/attributes`, + { values } + ) + return data +} + +/** + * Batch response type + */ +export interface BatchUserAttributesResponse { + attributes: Record> +} + +/** + * Get attribute values for multiple users + */ +export async function getBatchUserAttributes( + userIds: number[] +): Promise { + const { data } = await apiClient.post( + '/admin/user-attributes/batch', + { user_ids: userIds } + ) + return data +} + +export const userAttributesAPI = { + listDefinitions, + listEnabledDefinitions, + createDefinition, + updateDefinition, + deleteDefinition, + reorderDefinitions, + getUserAttributeValues, + updateUserAttributeValues, + getBatchUserAttributes +} + +export default userAttributesAPI diff --git a/frontend/src/api/admin/users.ts b/frontend/src/api/admin/users.ts new file mode 100644 index 00000000..44963cf9 --- /dev/null +++ b/frontend/src/api/admin/users.ts @@ -0,0 +1,191 @@ +/** + * Admin Users API endpoints + * Handles user management for administrators + */ + +import { apiClient } from '../client' +import type { User, UpdateUserRequest, PaginatedResponse } from '@/types' + +/** + * List all users with pagination + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param filters - Optional filters (status, role, search, attributes) + * @param options - Optional request options (signal) + * @returns Paginated list of users + */ +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: 'active' | 'disabled' + role?: 'admin' | 'user' + search?: string + attributes?: Record // attributeId -> value + }, + options?: { + signal?: AbortSignal + } +): Promise> { + // Build params with attribute filters in attr[id]=value format + const params: Record = { + page, + page_size: pageSize, + status: filters?.status, + role: filters?.role, + search: filters?.search + } + + // Add attribute filters as attr[id]=value + if (filters?.attributes) { + for (const [attrId, value] of Object.entries(filters.attributes)) { + if (value) { + params[`attr[${attrId}]`] = value + } + } + } + + const { data } = await apiClient.get>('/admin/users', { + params, + signal: options?.signal + }) + return data +} + +/** + * Get user by ID + * @param id - User ID + * @returns User details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/users/${id}`) + return data +} + +/** + * Create new user + * @param userData - User data (email, password, etc.) + * @returns Created user + */ +export async function create(userData: { + email: string + password: string + balance?: number + concurrency?: number + allowed_groups?: number[] | null +}): Promise { + const { data } = await apiClient.post('/admin/users', userData) + return data +} + +/** + * Update user + * @param id - User ID + * @param updates - Fields to update + * @returns Updated user + */ +export async function update(id: number, updates: UpdateUserRequest): Promise { + const { data } = await apiClient.put(`/admin/users/${id}`, updates) + return data +} + +/** + * Delete user + * @param id - User ID + * @returns Success confirmation + */ +export async function deleteUser(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/users/${id}`) + return data +} + +/** + * Update user balance + * @param id - User ID + * @param balance - New balance + * @param operation - Operation type ('set', 'add', 'subtract') + * @param notes - Optional notes for the balance adjustment + * @returns Updated user + */ +export async function updateBalance( + id: number, + balance: number, + operation: 'set' | 'add' | 'subtract' = 'set', + notes?: string +): Promise { + const { data } = await apiClient.post(`/admin/users/${id}/balance`, { + balance, + operation, + notes: notes || '' + }) + return data +} + +/** + * Update user concurrency + * @param id - User ID + * @param concurrency - New concurrency limit + * @returns Updated user + */ +export async function updateConcurrency(id: number, concurrency: number): Promise { + return update(id, { concurrency }) +} + +/** + * Toggle user status + * @param id - User ID + * @param status - New status + * @returns Updated user + */ +export async function toggleStatus(id: number, status: 'active' | 'disabled'): Promise { + return update(id, { status }) +} + +/** + * Get user's API keys + * @param id - User ID + * @returns List of user's API keys + */ +export async function getUserApiKeys(id: number): Promise> { + const { data } = await apiClient.get>(`/admin/users/${id}/api-keys`) + return data +} + +/** + * Get user's usage statistics + * @param id - User ID + * @param period - Time period + * @returns User usage statistics + */ +export async function getUserUsageStats( + id: number, + period: string = 'month' +): Promise<{ + total_requests: number + total_cost: number + total_tokens: number +}> { + const { data } = await apiClient.get<{ + total_requests: number + total_cost: number + total_tokens: number + }>(`/admin/users/${id}/usage`, { + params: { period } + }) + return data +} + +export const usersAPI = { + list, + getById, + create, + update, + delete: deleteUser, + updateBalance, + updateConcurrency, + toggleStatus, + getUserApiKeys, + getUserUsageStats +} + +export default usersAPI diff --git a/frontend/src/api/auth.ts b/frontend/src/api/auth.ts new file mode 100644 index 00000000..fddc23ef --- /dev/null +++ b/frontend/src/api/auth.ts @@ -0,0 +1,150 @@ +/** + * Authentication API endpoints + * Handles user login, registration, and logout operations + */ + +import { apiClient } from './client' +import type { + LoginRequest, + RegisterRequest, + AuthResponse, + CurrentUserResponse, + SendVerifyCodeRequest, + SendVerifyCodeResponse, + PublicSettings +} from '@/types' + +/** + * Store authentication token in localStorage + */ +export function setAuthToken(token: string): void { + localStorage.setItem('auth_token', token) +} + +/** + * Get authentication token from localStorage + */ +export function getAuthToken(): string | null { + return localStorage.getItem('auth_token') +} + +/** + * Clear authentication token from localStorage + */ +export function clearAuthToken(): void { + localStorage.removeItem('auth_token') + localStorage.removeItem('auth_user') +} + +/** + * User login + * @param credentials - Username and password + * @returns Authentication response with token and user data + */ +export async function login(credentials: LoginRequest): Promise { + const { data } = await apiClient.post('/auth/login', credentials) + + // Store token and user data + setAuthToken(data.access_token) + localStorage.setItem('auth_user', JSON.stringify(data.user)) + + return data +} + +/** + * User registration + * @param userData - Registration data (username, email, password) + * @returns Authentication response with token and user data + */ +export async function register(userData: RegisterRequest): Promise { + const { data } = await apiClient.post('/auth/register', userData) + + // Store token and user data + setAuthToken(data.access_token) + localStorage.setItem('auth_user', JSON.stringify(data.user)) + + return data +} + +/** + * Get current authenticated user + * @returns User profile data + */ +export async function getCurrentUser() { + return apiClient.get('/auth/me') +} + +/** + * User logout + * Clears authentication token and user data from localStorage + */ +export function logout(): void { + clearAuthToken() + // Optionally redirect to login page + // window.location.href = '/login'; +} + +/** + * Check if user is authenticated + * @returns True if user has valid token + */ +export function isAuthenticated(): boolean { + return getAuthToken() !== null +} + +/** + * Get public settings (no auth required) + * @returns Public settings including registration and Turnstile config + */ +export async function getPublicSettings(): Promise { + const { data } = await apiClient.get('/settings/public') + return data +} + +/** + * Send verification code to email + * @param request - Email and optional Turnstile token + * @returns Response with countdown seconds + */ +export async function sendVerifyCode( + request: SendVerifyCodeRequest +): Promise { + const { data } = await apiClient.post('/auth/send-verify-code', request) + return data +} + +/** + * Validate promo code response + */ +export interface ValidatePromoCodeResponse { + valid: boolean + bonus_amount?: number + error_code?: string + message?: string +} + +/** + * Validate promo code (public endpoint, no auth required) + * @param code - Promo code to validate + * @returns Validation result with bonus amount if valid + */ +export async function validatePromoCode(code: string): Promise { + const { data } = await apiClient.post('/auth/validate-promo-code', { code }) + return data +} + +export const authAPI = { + login, + register, + getCurrentUser, + logout, + isAuthenticated, + setAuthToken, + getAuthToken, + clearAuthToken, + getPublicSettings, + sendVerifyCode, + validatePromoCode +} + +export default authAPI diff --git a/frontend/src/api/client.ts b/frontend/src/api/client.ts new file mode 100644 index 00000000..3827498b --- /dev/null +++ b/frontend/src/api/client.ts @@ -0,0 +1,165 @@ +/** + * Axios HTTP Client Configuration + * Base client with interceptors for authentication and error handling + */ + +import axios, { AxiosInstance, AxiosError, InternalAxiosRequestConfig } from 'axios' +import type { ApiResponse } from '@/types' +import { getLocale } from '@/i18n' + +// ==================== Axios Instance Configuration ==================== + +const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api/v1' + +export const apiClient: AxiosInstance = axios.create({ + baseURL: API_BASE_URL, + timeout: 30000, + headers: { + 'Content-Type': 'application/json' + } +}) + +// ==================== Request Interceptor ==================== + +// Get user's timezone +const getUserTimezone = (): string => { + try { + return Intl.DateTimeFormat().resolvedOptions().timeZone + } catch { + return 'UTC' + } +} + +apiClient.interceptors.request.use( + (config: InternalAxiosRequestConfig) => { + // Attach token from localStorage + const token = localStorage.getItem('auth_token') + if (token && config.headers) { + config.headers.Authorization = `Bearer ${token}` + } + + // Attach locale for backend translations + if (config.headers) { + config.headers['Accept-Language'] = getLocale() + } + + // Attach timezone for all GET requests (backend may use it for default date ranges) + if (config.method === 'get') { + if (!config.params) { + config.params = {} + } + config.params.timezone = getUserTimezone() + } + + return config + }, + (error) => { + return Promise.reject(error) + } +) + +// ==================== Response Interceptor ==================== + +apiClient.interceptors.response.use( + (response) => { + // Unwrap standard API response format { code, message, data } + const apiResponse = response.data as ApiResponse + if (apiResponse && typeof apiResponse === 'object' && 'code' in apiResponse) { + if (apiResponse.code === 0) { + // Success - return the data portion + response.data = apiResponse.data + } else { + // API error + return Promise.reject({ + status: response.status, + code: apiResponse.code, + message: apiResponse.message || 'Unknown error' + }) + } + } + return response + }, + (error: AxiosError>) => { + // Request cancellation: keep the original axios cancellation error so callers can ignore it. + // Otherwise we'd misclassify it as a generic "network error". + if (error.code === 'ERR_CANCELED' || axios.isCancel(error)) { + return Promise.reject(error) + } + + // Handle common errors + if (error.response) { + const { status, data } = error.response + const url = String(error.config?.url || '') + + // Validate `data` shape to avoid HTML error pages breaking our error handling. + const apiData = (typeof data === 'object' && data !== null ? data : {}) as Record + + // Ops monitoring disabled: treat as feature-flagged 404, and proactively redirect away + // from ops pages to avoid broken UI states. + if (status === 404 && apiData.message === 'Ops monitoring is disabled') { + try { + localStorage.setItem('ops_monitoring_enabled_cached', 'false') + } catch { + // ignore localStorage failures + } + try { + window.dispatchEvent(new CustomEvent('ops-monitoring-disabled')) + } catch { + // ignore event failures + } + + if (window.location.pathname.startsWith('/admin/ops')) { + window.location.href = '/admin/settings' + } + + return Promise.reject({ + status, + code: 'OPS_DISABLED', + message: apiData.message || error.message, + url + }) + } + + // 401: Unauthorized - clear token and redirect to login + if (status === 401) { + const hasToken = !!localStorage.getItem('auth_token') + const url = error.config?.url || '' + const isAuthEndpoint = + url.includes('/auth/login') || url.includes('/auth/register') || url.includes('/auth/refresh') + const headers = error.config?.headers as Record | undefined + const authHeader = headers?.Authorization ?? headers?.authorization + const sentAuth = + typeof authHeader === 'string' + ? authHeader.trim() !== '' + : Array.isArray(authHeader) + ? authHeader.length > 0 + : !!authHeader + + localStorage.removeItem('auth_token') + localStorage.removeItem('auth_user') + if ((hasToken || sentAuth) && !isAuthEndpoint) { + sessionStorage.setItem('auth_expired', '1') + } + // Only redirect if not already on login page + if (!window.location.pathname.includes('/login')) { + window.location.href = '/login' + } + } + + // Return structured error + return Promise.reject({ + status, + code: apiData.code, + message: apiData.message || apiData.detail || error.message + }) + } + + // Network error + return Promise.reject({ + status: 0, + message: 'Network error. Please check your connection.' + }) + } +) + +export default apiClient diff --git a/frontend/src/api/groups.ts b/frontend/src/api/groups.ts new file mode 100644 index 00000000..0f366d51 --- /dev/null +++ b/frontend/src/api/groups.ts @@ -0,0 +1,25 @@ +/** + * User Groups API endpoints (non-admin) + * Handles group-related operations for regular users + */ + +import { apiClient } from './client' +import type { Group } from '@/types' + +/** + * Get available groups that the current user can bind to API keys + * This returns groups based on user's permissions: + * - Standard groups: public (non-exclusive) or explicitly allowed + * - Subscription groups: user has active subscription + * @returns List of available groups + */ +export async function getAvailable(): Promise { + const { data } = await apiClient.get('/groups/available') + return data +} + +export const userGroupsAPI = { + getAvailable +} + +export default userGroupsAPI diff --git a/frontend/src/api/index.ts b/frontend/src/api/index.ts new file mode 100644 index 00000000..50b14c4c --- /dev/null +++ b/frontend/src/api/index.ts @@ -0,0 +1,23 @@ +/** + * API Client for Sub2API Backend + * Central export point for all API modules + */ + +// Re-export the HTTP client +export { apiClient } from './client' + +// Auth API +export { authAPI } from './auth' + +// User APIs +export { keysAPI } from './keys' +export { usageAPI } from './usage' +export { userAPI } from './user' +export { redeemAPI, type RedeemHistoryItem } from './redeem' +export { userGroupsAPI } from './groups' + +// Admin APIs +export { adminAPI } from './admin' + +// Default export +export { default } from './client' diff --git a/frontend/src/api/keys.ts b/frontend/src/api/keys.ts new file mode 100644 index 00000000..cdae1359 --- /dev/null +++ b/frontend/src/api/keys.ts @@ -0,0 +1,114 @@ +/** + * API Keys management endpoints + * Handles CRUD operations for user API keys + */ + +import { apiClient } from './client' +import type { ApiKey, CreateApiKeyRequest, UpdateApiKeyRequest, PaginatedResponse } from '@/types' + +/** + * List all API keys for current user + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 10) + * @param options - Optional request options + * @returns Paginated list of API keys + */ +export async function list( + page: number = 1, + pageSize: number = 10, + options?: { + signal?: AbortSignal + } +): Promise> { + const { data } = await apiClient.get>('/keys', { + params: { page, page_size: pageSize }, + signal: options?.signal + }) + return data +} + +/** + * Get API key by ID + * @param id - API key ID + * @returns API key details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/keys/${id}`) + return data +} + +/** + * Create new API key + * @param name - Key name + * @param groupId - Optional group ID + * @param customKey - Optional custom key value + * @param ipWhitelist - Optional IP whitelist + * @param ipBlacklist - Optional IP blacklist + * @returns Created API key + */ +export async function create( + name: string, + groupId?: number | null, + customKey?: string, + ipWhitelist?: string[], + ipBlacklist?: string[] +): Promise { + const payload: CreateApiKeyRequest = { name } + if (groupId !== undefined) { + payload.group_id = groupId + } + if (customKey) { + payload.custom_key = customKey + } + if (ipWhitelist && ipWhitelist.length > 0) { + payload.ip_whitelist = ipWhitelist + } + if (ipBlacklist && ipBlacklist.length > 0) { + payload.ip_blacklist = ipBlacklist + } + + const { data } = await apiClient.post('/keys', payload) + return data +} + +/** + * Update API key + * @param id - API key ID + * @param updates - Fields to update + * @returns Updated API key + */ +export async function update(id: number, updates: UpdateApiKeyRequest): Promise { + const { data } = await apiClient.put(`/keys/${id}`, updates) + return data +} + +/** + * Delete API key + * @param id - API key ID + * @returns Success confirmation + */ +export async function deleteKey(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/keys/${id}`) + return data +} + +/** + * Toggle API key status (active/inactive) + * @param id - API key ID + * @param status - New status + * @returns Updated API key + */ +export async function toggleStatus(id: number, status: 'active' | 'inactive'): Promise { + return update(id, { status }) +} + +export const keysAPI = { + list, + getById, + create, + update, + delete: deleteKey, + toggleStatus +} + +export default keysAPI diff --git a/frontend/src/api/redeem.ts b/frontend/src/api/redeem.ts new file mode 100644 index 00000000..9e1c7d94 --- /dev/null +++ b/frontend/src/api/redeem.ts @@ -0,0 +1,65 @@ +/** + * Redeem code API endpoints + * Handles redeem code redemption for users + */ + +import { apiClient } from './client' +import type { RedeemCodeRequest } from '@/types' + +export interface RedeemHistoryItem { + id: number + code: string + type: string + value: number + status: string + used_at: string + created_at: string + // 订阅类型专用字段 + group_id?: number + validity_days?: number + group?: { + id: number + name: string + } +} + +/** + * Redeem a code + * @param code - Redeem code string + * @returns Redemption result with updated balance or concurrency + */ +export async function redeem(code: string): Promise<{ + message: string + type: string + value: number + new_balance?: number + new_concurrency?: number +}> { + const payload: RedeemCodeRequest = { code } + + const { data } = await apiClient.post<{ + message: string + type: string + value: number + new_balance?: number + new_concurrency?: number + }>('/redeem', payload) + + return data +} + +/** + * Get user's redemption history + * @returns List of redeemed codes + */ +export async function getHistory(): Promise { + const { data } = await apiClient.get('/redeem/history') + return data +} + +export const redeemAPI = { + redeem, + getHistory +} + +export default redeemAPI diff --git a/frontend/src/api/setup.ts b/frontend/src/api/setup.ts new file mode 100644 index 00000000..8b744590 --- /dev/null +++ b/frontend/src/api/setup.ts @@ -0,0 +1,87 @@ +/** + * Setup API endpoints + */ +import axios from 'axios' + +// Create a separate client for setup endpoints (not under /api/v1) +const setupClient = axios.create({ + baseURL: '', + timeout: 30000, + headers: { + 'Content-Type': 'application/json' + } +}) + +export interface SetupStatus { + needs_setup: boolean + step: string +} + +export interface DatabaseConfig { + host: string + port: number + user: string + password: string + dbname: string + sslmode: string +} + +export interface RedisConfig { + host: string + port: number + password: string + db: number +} + +export interface AdminConfig { + email: string + password: string +} + +export interface ServerConfig { + host: string + port: number + mode: string +} + +export interface InstallRequest { + database: DatabaseConfig + redis: RedisConfig + admin: AdminConfig + server: ServerConfig +} + +export interface InstallResponse { + message: string + restart: boolean +} + +/** + * Get setup status + */ +export async function getSetupStatus(): Promise { + const response = await setupClient.get('/setup/status') + return response.data.data +} + +/** + * Test database connection + */ +export async function testDatabase(config: DatabaseConfig): Promise { + await setupClient.post('/setup/test-db', config) +} + +/** + * Test Redis connection + */ +export async function testRedis(config: RedisConfig): Promise { + await setupClient.post('/setup/test-redis', config) +} + +/** + * Perform installation + */ +export async function install(config: InstallRequest): Promise { + const response = await setupClient.post('/setup/install', config) + return response.data.data +} diff --git a/frontend/src/api/subscriptions.ts b/frontend/src/api/subscriptions.ts new file mode 100644 index 00000000..a614a425 --- /dev/null +++ b/frontend/src/api/subscriptions.ts @@ -0,0 +1,76 @@ +/** + * User Subscription API + * API for regular users to view their own subscriptions and progress + */ + +import { apiClient } from './client' +import type { UserSubscription, SubscriptionProgress } from '@/types' + +/** + * Subscription summary for user dashboard + */ +export interface SubscriptionSummary { + active_count: number + subscriptions: Array<{ + id: number + group_name: string + status: string + daily_progress: number | null + weekly_progress: number | null + monthly_progress: number | null + expires_at: string | null + days_remaining: number | null + }> +} + +/** + * Get list of current user's subscriptions + */ +export async function getMySubscriptions(): Promise { + const response = await apiClient.get('/subscriptions') + return response.data +} + +/** + * Get current user's active subscriptions + */ +export async function getActiveSubscriptions(): Promise { + const response = await apiClient.get('/subscriptions/active') + return response.data +} + +/** + * Get progress for all user's active subscriptions + */ +export async function getSubscriptionsProgress(): Promise { + const response = await apiClient.get('/subscriptions/progress') + return response.data +} + +/** + * Get subscription summary for dashboard display + */ +export async function getSubscriptionSummary(): Promise { + const response = await apiClient.get('/subscriptions/summary') + return response.data +} + +/** + * Get progress for a specific subscription + */ +export async function getSubscriptionProgress( + subscriptionId: number +): Promise { + const response = await apiClient.get( + `/subscriptions/${subscriptionId}/progress` + ) + return response.data +} + +export default { + getMySubscriptions, + getActiveSubscriptions, + getSubscriptionsProgress, + getSubscriptionSummary, + getSubscriptionProgress +} diff --git a/frontend/src/api/usage.ts b/frontend/src/api/usage.ts new file mode 100644 index 00000000..6efd7657 --- /dev/null +++ b/frontend/src/api/usage.ts @@ -0,0 +1,274 @@ +/** + * Usage tracking API endpoints + * Handles usage logs and statistics retrieval + */ + +import { apiClient } from './client' +import type { + UsageLog, + UsageQueryParams, + UsageStatsResponse, + PaginatedResponse, + TrendDataPoint, + ModelStat +} from '@/types' + +// ==================== Dashboard Types ==================== + +export interface UserDashboardStats { + total_api_keys: number + active_api_keys: number + total_requests: number + total_input_tokens: number + total_output_tokens: number + total_cache_creation_tokens: number + total_cache_read_tokens: number + total_tokens: number + total_cost: number // 标准计费 + total_actual_cost: number // 实际扣除 + today_requests: number + today_input_tokens: number + today_output_tokens: number + today_cache_creation_tokens: number + today_cache_read_tokens: number + today_tokens: number + today_cost: number // 今日标准计费 + today_actual_cost: number // 今日实际扣除 + average_duration_ms: number + rpm: number // 近5分钟平均每分钟请求数 + tpm: number // 近5分钟平均每分钟Token数 +} + +export interface TrendParams { + start_date?: string + end_date?: string + granularity?: 'day' | 'hour' +} + +export interface TrendResponse { + trend: TrendDataPoint[] + start_date: string + end_date: string + granularity: string +} + +export interface ModelStatsResponse { + models: ModelStat[] + start_date: string + end_date: string +} + +/** + * List usage logs with optional filters + * @param page - Page number (default: 1) + * @param pageSize - Items per page (default: 20) + * @param apiKeyId - Filter by API key ID + * @returns Paginated list of usage logs + */ +export async function list( + page: number = 1, + pageSize: number = 20, + apiKeyId?: number +): Promise> { + const params: UsageQueryParams = { + page, + page_size: pageSize + } + + if (apiKeyId !== undefined) { + params.api_key_id = apiKeyId + } + + const { data } = await apiClient.get>('/usage', { + params + }) + return data +} + +/** + * Get usage logs with advanced query parameters + * @param params - Query parameters for filtering and pagination + * @returns Paginated list of usage logs + */ +export async function query( + params: UsageQueryParams, + config: { signal?: AbortSignal } = {} +): Promise> { + const { data } = await apiClient.get>('/usage', { + ...config, + params + }) + return data +} + +/** + * Get usage statistics for a specific period + * @param period - Time period ('today', 'week', 'month', 'year') + * @param apiKeyId - Optional API key ID filter + * @returns Usage statistics + */ +export async function getStats( + period: string = 'today', + apiKeyId?: number +): Promise { + const params: Record = { period } + + if (apiKeyId !== undefined) { + params.api_key_id = apiKeyId + } + + const { data } = await apiClient.get('/usage/stats', { + params + }) + return data +} + +/** + * Get usage statistics for a date range + * @param startDate - Start date (YYYY-MM-DD format) + * @param endDate - End date (YYYY-MM-DD format) + * @param apiKeyId - Optional API key ID filter + * @returns Usage statistics + */ +export async function getStatsByDateRange( + startDate: string, + endDate: string, + apiKeyId?: number +): Promise { + const params: Record = { + start_date: startDate, + end_date: endDate + } + + if (apiKeyId !== undefined) { + params.api_key_id = apiKeyId + } + + const { data } = await apiClient.get('/usage/stats', { + params + }) + return data +} + +/** + * Get usage by date range + * @param startDate - Start date (YYYY-MM-DD format) + * @param endDate - End date (YYYY-MM-DD format) + * @param apiKeyId - Optional API key ID filter + * @returns Usage logs within date range + */ +export async function getByDateRange( + startDate: string, + endDate: string, + apiKeyId?: number +): Promise> { + const params: UsageQueryParams = { + start_date: startDate, + end_date: endDate, + page: 1, + page_size: 100 + } + + if (apiKeyId !== undefined) { + params.api_key_id = apiKeyId + } + + const { data } = await apiClient.get>('/usage', { + params + }) + return data +} + +/** + * Get detailed usage log by ID + * @param id - Usage log ID + * @returns Usage log details + */ +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/usage/${id}`) + return data +} + +// ==================== Dashboard API ==================== + +/** + * Get user dashboard statistics + * @returns Dashboard statistics for current user + */ +export async function getDashboardStats(): Promise { + const { data } = await apiClient.get('/usage/dashboard/stats') + return data +} + +/** + * Get user usage trend data + * @param params - Query parameters for filtering + * @returns Usage trend data for current user + */ +export async function getDashboardTrend(params?: TrendParams): Promise { + const { data } = await apiClient.get('/usage/dashboard/trend', { params }) + return data +} + +/** + * Get user model usage statistics + * @param params - Query parameters for filtering + * @returns Model usage statistics for current user + */ +export async function getDashboardModels(params?: { + start_date?: string + end_date?: string +}): Promise { + const { data } = await apiClient.get('/usage/dashboard/models', { params }) + return data +} + +export interface BatchApiKeyUsageStats { + api_key_id: number + today_actual_cost: number + total_actual_cost: number +} + +export interface BatchApiKeysUsageResponse { + stats: Record +} + +/** + * Get batch usage stats for user's own API keys + * @param apiKeyIds - Array of API key IDs + * @param options - Optional request options + * @returns Usage stats map keyed by API key ID + */ +export async function getDashboardApiKeysUsage( + apiKeyIds: number[], + options?: { + signal?: AbortSignal + } +): Promise { + const { data } = await apiClient.post( + '/usage/dashboard/api-keys-usage', + { + api_key_ids: apiKeyIds + }, + { + signal: options?.signal + } + ) + return data +} + +export const usageAPI = { + list, + query, + getStats, + getStatsByDateRange, + getByDateRange, + getById, + // Dashboard + getDashboardStats, + getDashboardTrend, + getDashboardModels, + getDashboardApiKeysUsage +} + +export default usageAPI diff --git a/frontend/src/api/user.ts b/frontend/src/api/user.ts new file mode 100644 index 00000000..bfc0e30b --- /dev/null +++ b/frontend/src/api/user.ts @@ -0,0 +1,54 @@ +/** + * User API endpoints + * Handles user profile management and password changes + */ + +import { apiClient } from './client' +import type { User, ChangePasswordRequest } from '@/types' + +/** + * Get current user profile + * @returns User profile data + */ +export async function getProfile(): Promise { + const { data } = await apiClient.get('/user/profile') + return data +} + +/** + * Update current user profile + * @param profile - Profile data to update + * @returns Updated user profile data + */ +export async function updateProfile(profile: { + username?: string +}): Promise { + const { data } = await apiClient.put('/user', profile) + return data +} + +/** + * Change current user password + * @param passwords - Old and new password + * @returns Success message + */ +export async function changePassword( + oldPassword: string, + newPassword: string +): Promise<{ message: string }> { + const payload: ChangePasswordRequest = { + old_password: oldPassword, + new_password: newPassword + } + + const { data } = await apiClient.put<{ message: string }>('/user/password', payload) + return data +} + +export const userAPI = { + getProfile, + updateProfile, + changePassword +} + +export default userAPI diff --git a/frontend/src/components/Guide/steps.ts b/frontend/src/components/Guide/steps.ts new file mode 100644 index 00000000..aeb38ab5 --- /dev/null +++ b/frontend/src/components/Guide/steps.ts @@ -0,0 +1,309 @@ +import { DriveStep } from 'driver.js' + +/** + * 管理员完整引导流程 + * 交互式引导:指引用户实际操作 + * @param t 国际化函数 + * @param isSimpleMode 是否为简易模式(简易模式下会过滤分组相关步骤) + */ +export const getAdminSteps = (t: (key: string) => string, isSimpleMode = false): DriveStep[] => { + const allSteps: DriveStep[] = [ + // ========== 欢迎介绍 ========== + { + popover: { + title: t('onboarding.admin.welcome.title'), + description: t('onboarding.admin.welcome.description'), + align: 'center', + nextBtnText: t('onboarding.admin.welcome.nextBtn'), + prevBtnText: t('onboarding.admin.welcome.prevBtn') + } + }, + + // ========== 第一部分:创建分组 ========== + { + element: '#sidebar-group-manage', + popover: { + title: t('onboarding.admin.groupManage.title'), + description: t('onboarding.admin.groupManage.description'), + side: 'right', + align: 'center', + showButtons: ['close'], + } + }, + { + element: '[data-tour="groups-create-btn"]', + popover: { + title: t('onboarding.admin.createGroup.title'), + description: t('onboarding.admin.createGroup.description'), + side: 'bottom', + align: 'end', + showButtons: ['close'] + } + }, + { + element: '[data-tour="group-form-name"]', + popover: { + title: t('onboarding.admin.groupName.title'), + description: t('onboarding.admin.groupName.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="group-form-platform"]', + popover: { + title: t('onboarding.admin.groupPlatform.title'), + description: t('onboarding.admin.groupPlatform.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="group-form-multiplier"]', + popover: { + title: t('onboarding.admin.groupMultiplier.title'), + description: t('onboarding.admin.groupMultiplier.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="group-form-exclusive"]', + popover: { + title: t('onboarding.admin.groupExclusive.title'), + description: t('onboarding.admin.groupExclusive.description'), + side: 'top', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="group-form-submit"]', + popover: { + title: t('onboarding.admin.groupSubmit.title'), + description: t('onboarding.admin.groupSubmit.description'), + side: 'left', + align: 'center', + showButtons: ['close'] + } + }, + + // ========== 第二部分:创建账号授权 ========== + { + element: '#sidebar-channel-manage', + popover: { + title: t('onboarding.admin.accountManage.title'), + description: t('onboarding.admin.accountManage.description'), + side: 'right', + align: 'center', + showButtons: ['close'] + } + }, + { + element: '[data-tour="accounts-create-btn"]', + popover: { + title: t('onboarding.admin.createAccount.title'), + description: t('onboarding.admin.createAccount.description'), + side: 'bottom', + align: 'end', + showButtons: ['close'] + } + }, + { + element: '[data-tour="account-form-name"]', + popover: { + title: t('onboarding.admin.accountName.title'), + description: t('onboarding.admin.accountName.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="account-form-platform"]', + popover: { + title: t('onboarding.admin.accountPlatform.title'), + description: t('onboarding.admin.accountPlatform.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="account-form-type"]', + popover: { + title: t('onboarding.admin.accountType.title'), + description: t('onboarding.admin.accountType.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="account-form-priority"]', + popover: { + title: t('onboarding.admin.accountPriority.title'), + description: t('onboarding.admin.accountPriority.description'), + side: 'top', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="account-form-groups"]', + popover: { + title: t('onboarding.admin.accountGroups.title'), + description: t('onboarding.admin.accountGroups.description'), + side: 'top', + align: 'center', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="account-form-submit"]', + popover: { + title: t('onboarding.admin.accountSubmit.title'), + description: t('onboarding.admin.accountSubmit.description'), + side: 'left', + align: 'center', + showButtons: ['close'] + } + }, + + // ========== 第三部分:创建API密钥 ========== + { + element: '[data-tour="sidebar-my-keys"]', + popover: { + title: t('onboarding.admin.keyManage.title'), + description: t('onboarding.admin.keyManage.description'), + side: 'right', + align: 'center', + showButtons: ['close'] + } + }, + { + element: '[data-tour="keys-create-btn"]', + popover: { + title: t('onboarding.admin.createKey.title'), + description: t('onboarding.admin.createKey.description'), + side: 'bottom', + align: 'end', + showButtons: ['close'] + } + }, + { + element: '[data-tour="key-form-name"]', + popover: { + title: t('onboarding.admin.keyName.title'), + description: t('onboarding.admin.keyName.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="key-form-group"]', + popover: { + title: t('onboarding.admin.keyGroup.title'), + description: t('onboarding.admin.keyGroup.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="key-form-submit"]', + popover: { + title: t('onboarding.admin.keySubmit.title'), + description: t('onboarding.admin.keySubmit.description'), + side: 'left', + align: 'center', + showButtons: ['close'] + } + } + ] + + // 简易模式下过滤分组相关步骤 + if (isSimpleMode) { + return allSteps.filter(step => { + const element = step.element as string | undefined + // 过滤掉分组管理和账号分组选择相关步骤 + return !element || ( + !element.includes('sidebar-group-manage') && + !element.includes('groups-create-btn') && + !element.includes('group-form-') && + !element.includes('account-form-groups') + ) + }) + } + + return allSteps +} + +/** + * 普通用户引导流程 + */ +export const getUserSteps = (t: (key: string) => string): DriveStep[] => [ + { + popover: { + title: t('onboarding.user.welcome.title'), + description: t('onboarding.user.welcome.description'), + align: 'center', + nextBtnText: t('onboarding.user.welcome.nextBtn'), + prevBtnText: t('onboarding.user.welcome.prevBtn') + } + }, + { + element: '[data-tour="sidebar-my-keys"]', + popover: { + title: t('onboarding.user.keyManage.title'), + description: t('onboarding.user.keyManage.description'), + side: 'right', + align: 'center', + showButtons: ['close'] + } + }, + { + element: '[data-tour="keys-create-btn"]', + popover: { + title: t('onboarding.user.createKey.title'), + description: t('onboarding.user.createKey.description'), + side: 'bottom', + align: 'end', + showButtons: ['close'] + } + }, + { + element: '[data-tour="key-form-name"]', + popover: { + title: t('onboarding.user.keyName.title'), + description: t('onboarding.user.keyName.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="key-form-group"]', + popover: { + title: t('onboarding.user.keyGroup.title'), + description: t('onboarding.user.keyGroup.description'), + side: 'right', + align: 'start', + showButtons: ['next', 'previous'] + } + }, + { + element: '[data-tour="key-form-submit"]', + popover: { + title: t('onboarding.user.keySubmit.title'), + description: t('onboarding.user.keySubmit.description'), + side: 'left', + align: 'center', + showButtons: ['close'] + } + } +] diff --git a/frontend/src/components/TurnstileWidget.vue b/frontend/src/components/TurnstileWidget.vue new file mode 100644 index 00000000..2f0022bf --- /dev/null +++ b/frontend/src/components/TurnstileWidget.vue @@ -0,0 +1,182 @@ + + + + + diff --git a/frontend/src/components/account/AccountGroupsCell.vue b/frontend/src/components/account/AccountGroupsCell.vue new file mode 100644 index 00000000..512383a5 --- /dev/null +++ b/frontend/src/components/account/AccountGroupsCell.vue @@ -0,0 +1,158 @@ + + + diff --git a/frontend/src/components/account/AccountQuotaInfo.vue b/frontend/src/components/account/AccountQuotaInfo.vue new file mode 100644 index 00000000..2f7f80de --- /dev/null +++ b/frontend/src/components/account/AccountQuotaInfo.vue @@ -0,0 +1,198 @@ + + + diff --git a/frontend/src/components/account/AccountStatsModal.vue b/frontend/src/components/account/AccountStatsModal.vue new file mode 100644 index 00000000..7968fa8d --- /dev/null +++ b/frontend/src/components/account/AccountStatsModal.vue @@ -0,0 +1,736 @@ + + + diff --git a/frontend/src/components/account/AccountStatusIndicator.vue b/frontend/src/components/account/AccountStatusIndicator.vue new file mode 100644 index 00000000..7dae33bb --- /dev/null +++ b/frontend/src/components/account/AccountStatusIndicator.vue @@ -0,0 +1,171 @@ + + + diff --git a/frontend/src/components/account/AccountTestModal.vue b/frontend/src/components/account/AccountTestModal.vue new file mode 100644 index 00000000..42f3c1b9 --- /dev/null +++ b/frontend/src/components/account/AccountTestModal.vue @@ -0,0 +1,469 @@ + + + diff --git a/frontend/src/components/account/AccountTodayStatsCell.vue b/frontend/src/components/account/AccountTodayStatsCell.vue new file mode 100644 index 00000000..a920f314 --- /dev/null +++ b/frontend/src/components/account/AccountTodayStatsCell.vue @@ -0,0 +1,100 @@ + + + diff --git a/frontend/src/components/account/AccountUsageCell.vue b/frontend/src/components/account/AccountUsageCell.vue new file mode 100644 index 00000000..c0212c5a --- /dev/null +++ b/frontend/src/components/account/AccountUsageCell.vue @@ -0,0 +1,845 @@ + + + diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue new file mode 100644 index 00000000..fb776e96 --- /dev/null +++ b/frontend/src/components/account/BulkEditAccountModal.vue @@ -0,0 +1,1038 @@ + + + diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue new file mode 100644 index 00000000..c81de00e --- /dev/null +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -0,0 +1,2538 @@ + + + diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue new file mode 100644 index 00000000..00cd9b24 --- /dev/null +++ b/frontend/src/components/account/EditAccountModal.vue @@ -0,0 +1,1227 @@ + + + diff --git a/frontend/src/components/account/ModelWhitelistSelector.vue b/frontend/src/components/account/ModelWhitelistSelector.vue new file mode 100644 index 00000000..c8c1b852 --- /dev/null +++ b/frontend/src/components/account/ModelWhitelistSelector.vue @@ -0,0 +1,200 @@ + + + diff --git a/frontend/src/components/account/OAuthAuthorizationFlow.vue b/frontend/src/components/account/OAuthAuthorizationFlow.vue new file mode 100644 index 00000000..194237fa --- /dev/null +++ b/frontend/src/components/account/OAuthAuthorizationFlow.vue @@ -0,0 +1,582 @@ + + + diff --git a/frontend/src/components/account/ReAuthAccountModal.vue b/frontend/src/components/account/ReAuthAccountModal.vue new file mode 100644 index 00000000..b2734b4f --- /dev/null +++ b/frontend/src/components/account/ReAuthAccountModal.vue @@ -0,0 +1,533 @@ + + + diff --git a/frontend/src/components/account/SyncFromCrsModal.vue b/frontend/src/components/account/SyncFromCrsModal.vue new file mode 100644 index 00000000..4bd0320a --- /dev/null +++ b/frontend/src/components/account/SyncFromCrsModal.vue @@ -0,0 +1,189 @@ + + + diff --git a/frontend/src/components/account/TempUnschedStatusModal.vue b/frontend/src/components/account/TempUnschedStatusModal.vue new file mode 100644 index 00000000..b2c0b71b --- /dev/null +++ b/frontend/src/components/account/TempUnschedStatusModal.vue @@ -0,0 +1,249 @@ + + + diff --git a/frontend/src/components/account/UsageProgressBar.vue b/frontend/src/components/account/UsageProgressBar.vue new file mode 100644 index 00000000..93844295 --- /dev/null +++ b/frontend/src/components/account/UsageProgressBar.vue @@ -0,0 +1,167 @@ + + + diff --git a/frontend/src/components/account/index.ts b/frontend/src/components/account/index.ts new file mode 100644 index 00000000..0010e62c --- /dev/null +++ b/frontend/src/components/account/index.ts @@ -0,0 +1,13 @@ +export { default as CreateAccountModal } from './CreateAccountModal.vue' +export { default as EditAccountModal } from './EditAccountModal.vue' +export { default as BulkEditAccountModal } from './BulkEditAccountModal.vue' +export { default as ReAuthAccountModal } from './ReAuthAccountModal.vue' +export { default as OAuthAuthorizationFlow } from './OAuthAuthorizationFlow.vue' +export { default as AccountStatusIndicator } from './AccountStatusIndicator.vue' +export { default as AccountUsageCell } from './AccountUsageCell.vue' +export { default as UsageProgressBar } from './UsageProgressBar.vue' +export { default as AccountStatsModal } from './AccountStatsModal.vue' +export { default as AccountTestModal } from './AccountTestModal.vue' +export { default as AccountTodayStatsCell } from './AccountTodayStatsCell.vue' +export { default as TempUnschedStatusModal } from './TempUnschedStatusModal.vue' +export { default as SyncFromCrsModal } from './SyncFromCrsModal.vue' diff --git a/frontend/src/components/admin/account/AccountActionMenu.vue b/frontend/src/components/admin/account/AccountActionMenu.vue new file mode 100644 index 00000000..980fd352 --- /dev/null +++ b/frontend/src/components/admin/account/AccountActionMenu.vue @@ -0,0 +1,50 @@ + + + diff --git a/frontend/src/components/admin/account/AccountBulkActionsBar.vue b/frontend/src/components/admin/account/AccountBulkActionsBar.vue new file mode 100644 index 00000000..41111484 --- /dev/null +++ b/frontend/src/components/admin/account/AccountBulkActionsBar.vue @@ -0,0 +1,33 @@ + + + \ No newline at end of file diff --git a/frontend/src/components/admin/account/AccountStatsModal.vue b/frontend/src/components/admin/account/AccountStatsModal.vue new file mode 100644 index 00000000..72a71d36 --- /dev/null +++ b/frontend/src/components/admin/account/AccountStatsModal.vue @@ -0,0 +1,700 @@ + + + diff --git a/frontend/src/components/admin/account/AccountTableActions.vue b/frontend/src/components/admin/account/AccountTableActions.vue new file mode 100644 index 00000000..96fceaa0 --- /dev/null +++ b/frontend/src/components/admin/account/AccountTableActions.vue @@ -0,0 +1,19 @@ + + + diff --git a/frontend/src/components/admin/account/AccountTableFilters.vue b/frontend/src/components/admin/account/AccountTableFilters.vue new file mode 100644 index 00000000..47ceedd7 --- /dev/null +++ b/frontend/src/components/admin/account/AccountTableFilters.vue @@ -0,0 +1,25 @@ + + + diff --git a/frontend/src/components/admin/account/AccountTestModal.vue b/frontend/src/components/admin/account/AccountTestModal.vue new file mode 100644 index 00000000..2cb1c5a5 --- /dev/null +++ b/frontend/src/components/admin/account/AccountTestModal.vue @@ -0,0 +1,409 @@ + + + diff --git a/frontend/src/components/admin/account/ReAuthAccountModal.vue b/frontend/src/components/admin/account/ReAuthAccountModal.vue new file mode 100644 index 00000000..8133e029 --- /dev/null +++ b/frontend/src/components/admin/account/ReAuthAccountModal.vue @@ -0,0 +1,533 @@ + + + diff --git a/frontend/src/components/admin/usage/UsageExportProgress.vue b/frontend/src/components/admin/usage/UsageExportProgress.vue new file mode 100644 index 00000000..e571eff0 --- /dev/null +++ b/frontend/src/components/admin/usage/UsageExportProgress.vue @@ -0,0 +1,16 @@ + + + \ No newline at end of file diff --git a/frontend/src/components/admin/usage/UsageFilters.vue b/frontend/src/components/admin/usage/UsageFilters.vue new file mode 100644 index 00000000..0926d83c --- /dev/null +++ b/frontend/src/components/admin/usage/UsageFilters.vue @@ -0,0 +1,433 @@ + + + diff --git a/frontend/src/components/admin/usage/UsageStatsCards.vue b/frontend/src/components/admin/usage/UsageStatsCards.vue new file mode 100644 index 00000000..cd962a09 --- /dev/null +++ b/frontend/src/components/admin/usage/UsageStatsCards.vue @@ -0,0 +1,72 @@ + + + diff --git a/frontend/src/components/admin/usage/UsageTable.vue b/frontend/src/components/admin/usage/UsageTable.vue new file mode 100644 index 00000000..d2260c59 --- /dev/null +++ b/frontend/src/components/admin/usage/UsageTable.vue @@ -0,0 +1,327 @@ + + + diff --git a/frontend/src/components/admin/user/UserAllowedGroupsModal.vue b/frontend/src/components/admin/user/UserAllowedGroupsModal.vue new file mode 100644 index 00000000..c1783fd2 --- /dev/null +++ b/frontend/src/components/admin/user/UserAllowedGroupsModal.vue @@ -0,0 +1,59 @@ + + + \ No newline at end of file diff --git a/frontend/src/components/admin/user/UserApiKeysModal.vue b/frontend/src/components/admin/user/UserApiKeysModal.vue new file mode 100644 index 00000000..ef098ba1 --- /dev/null +++ b/frontend/src/components/admin/user/UserApiKeysModal.vue @@ -0,0 +1,47 @@ + + + \ No newline at end of file diff --git a/frontend/src/components/admin/user/UserBalanceModal.vue b/frontend/src/components/admin/user/UserBalanceModal.vue new file mode 100644 index 00000000..c669c2a5 --- /dev/null +++ b/frontend/src/components/admin/user/UserBalanceModal.vue @@ -0,0 +1,86 @@ + + + diff --git a/frontend/src/components/admin/user/UserCreateModal.vue b/frontend/src/components/admin/user/UserCreateModal.vue new file mode 100644 index 00000000..0e44d81e --- /dev/null +++ b/frontend/src/components/admin/user/UserCreateModal.vue @@ -0,0 +1,78 @@ + + + diff --git a/frontend/src/components/admin/user/UserEditModal.vue b/frontend/src/components/admin/user/UserEditModal.vue new file mode 100644 index 00000000..2c4b117a --- /dev/null +++ b/frontend/src/components/admin/user/UserEditModal.vue @@ -0,0 +1,110 @@ + + + diff --git a/frontend/src/components/auth/LinuxDoOAuthSection.vue b/frontend/src/components/auth/LinuxDoOAuthSection.vue new file mode 100644 index 00000000..8012b101 --- /dev/null +++ b/frontend/src/components/auth/LinuxDoOAuthSection.vue @@ -0,0 +1,61 @@ + + + + diff --git a/frontend/src/components/charts/ModelDistributionChart.vue b/frontend/src/components/charts/ModelDistributionChart.vue new file mode 100644 index 00000000..9374ef03 --- /dev/null +++ b/frontend/src/components/charts/ModelDistributionChart.vue @@ -0,0 +1,152 @@ + + + diff --git a/frontend/src/components/charts/TokenUsageTrend.vue b/frontend/src/components/charts/TokenUsageTrend.vue new file mode 100644 index 00000000..d9ceda87 --- /dev/null +++ b/frontend/src/components/charts/TokenUsageTrend.vue @@ -0,0 +1,187 @@ + + + diff --git a/frontend/src/components/common/BaseDialog.vue b/frontend/src/components/common/BaseDialog.vue new file mode 100644 index 00000000..3d38b568 --- /dev/null +++ b/frontend/src/components/common/BaseDialog.vue @@ -0,0 +1,142 @@ + + + diff --git a/frontend/src/components/common/ConfirmDialog.vue b/frontend/src/components/common/ConfirmDialog.vue new file mode 100644 index 00000000..abccc416 --- /dev/null +++ b/frontend/src/components/common/ConfirmDialog.vue @@ -0,0 +1,70 @@ + + + diff --git a/frontend/src/components/common/DataTable.vue b/frontend/src/components/common/DataTable.vue new file mode 100644 index 00000000..eab337ac --- /dev/null +++ b/frontend/src/components/common/DataTable.vue @@ -0,0 +1,538 @@ + + + + + diff --git a/frontend/src/components/common/DateRangePicker.vue b/frontend/src/components/common/DateRangePicker.vue new file mode 100644 index 00000000..cf5b56fc --- /dev/null +++ b/frontend/src/components/common/DateRangePicker.vue @@ -0,0 +1,425 @@ + + + + + diff --git a/frontend/src/components/common/EmptyState.vue b/frontend/src/components/common/EmptyState.vue new file mode 100644 index 00000000..132a85c0 --- /dev/null +++ b/frontend/src/components/common/EmptyState.vue @@ -0,0 +1,80 @@ + + + diff --git a/frontend/src/components/common/ExportProgressDialog.vue b/frontend/src/components/common/ExportProgressDialog.vue new file mode 100644 index 00000000..f8712a47 --- /dev/null +++ b/frontend/src/components/common/ExportProgressDialog.vue @@ -0,0 +1,68 @@ + + + diff --git a/frontend/src/components/common/GroupBadge.vue b/frontend/src/components/common/GroupBadge.vue new file mode 100644 index 00000000..239d0452 --- /dev/null +++ b/frontend/src/components/common/GroupBadge.vue @@ -0,0 +1,126 @@ + + + diff --git a/frontend/src/components/common/GroupOptionItem.vue b/frontend/src/components/common/GroupOptionItem.vue new file mode 100644 index 00000000..3283c330 --- /dev/null +++ b/frontend/src/components/common/GroupOptionItem.vue @@ -0,0 +1,52 @@ + + + diff --git a/frontend/src/components/common/GroupSelector.vue b/frontend/src/components/common/GroupSelector.vue new file mode 100644 index 00000000..c67d32fc --- /dev/null +++ b/frontend/src/components/common/GroupSelector.vue @@ -0,0 +1,82 @@ + + + diff --git a/frontend/src/components/common/HelpTooltip.vue b/frontend/src/components/common/HelpTooltip.vue new file mode 100644 index 00000000..7679ced4 --- /dev/null +++ b/frontend/src/components/common/HelpTooltip.vue @@ -0,0 +1,44 @@ + + + + diff --git a/frontend/src/components/common/Input.vue b/frontend/src/components/common/Input.vue new file mode 100644 index 00000000..a6c531cf --- /dev/null +++ b/frontend/src/components/common/Input.vue @@ -0,0 +1,103 @@ + + + diff --git a/frontend/src/components/common/LoadingSpinner.vue b/frontend/src/components/common/LoadingSpinner.vue new file mode 100644 index 00000000..b368ba58 --- /dev/null +++ b/frontend/src/components/common/LoadingSpinner.vue @@ -0,0 +1,65 @@ + + + + + diff --git a/frontend/src/components/common/LocaleSwitcher.vue b/frontend/src/components/common/LocaleSwitcher.vue new file mode 100644 index 00000000..f1ae1eff --- /dev/null +++ b/frontend/src/components/common/LocaleSwitcher.vue @@ -0,0 +1,91 @@ + + + + + diff --git a/frontend/src/components/common/ModelIcon.vue b/frontend/src/components/common/ModelIcon.vue new file mode 100644 index 00000000..2a05bf71 --- /dev/null +++ b/frontend/src/components/common/ModelIcon.vue @@ -0,0 +1,278 @@ + + + + + diff --git a/frontend/src/components/common/Pagination.vue b/frontend/src/components/common/Pagination.vue new file mode 100644 index 00000000..728bc0d3 --- /dev/null +++ b/frontend/src/components/common/Pagination.vue @@ -0,0 +1,205 @@ + + + + + diff --git a/frontend/src/components/common/PlatformIcon.vue b/frontend/src/components/common/PlatformIcon.vue new file mode 100644 index 00000000..1e137ae5 --- /dev/null +++ b/frontend/src/components/common/PlatformIcon.vue @@ -0,0 +1,52 @@ + + + diff --git a/frontend/src/components/common/PlatformTypeBadge.vue b/frontend/src/components/common/PlatformTypeBadge.vue new file mode 100644 index 00000000..ee8264ab --- /dev/null +++ b/frontend/src/components/common/PlatformTypeBadge.vue @@ -0,0 +1,92 @@ + + + diff --git a/frontend/src/components/common/ProxySelector.vue b/frontend/src/components/common/ProxySelector.vue new file mode 100644 index 00000000..a185d4b9 --- /dev/null +++ b/frontend/src/components/common/ProxySelector.vue @@ -0,0 +1,426 @@ + + + + + diff --git a/frontend/src/components/common/README.md b/frontend/src/components/common/README.md new file mode 100644 index 00000000..1733cfad --- /dev/null +++ b/frontend/src/components/common/README.md @@ -0,0 +1,268 @@ +# Common Components + +This directory contains reusable Vue 3 components built with Composition API, TypeScript, and TailwindCSS. + +## Components + +### DataTable.vue + +A generic data table component with sorting, loading states, and custom cell rendering. + +**Props:** + +- `columns: Column[]` - Array of column definitions with key, label, sortable, and formatter +- `data: any[]` - Array of data objects to display +- `loading?: boolean` - Show loading skeleton +- `rowKey?: string | (row: any) => string | number` - Row key field or resolver (defaults to `row.id`, falls back to index) + +**Slots:** + +- `empty` - Custom empty state content +- `cell-{key}` - Custom cell renderer for specific column (receives `row` and `value`) + +**Usage:** + +```vue + + + +``` + +--- + +### Pagination.vue + +Pagination component with page numbers, navigation, and page size selector. + +**Props:** + +- `total: number` - Total number of items +- `page: number` - Current page (1-indexed) +- `pageSize: number` - Items per page +- `pageSizeOptions?: number[]` - Available page size options (default: [10, 20, 50, 100]) + +**Events:** + +- `update:page` - Emitted when page changes +- `update:pageSize` - Emitted when page size changes + +**Usage:** + +```vue + +``` + +--- + +### Modal.vue + +Modal dialog with customizable size and close behavior. + +**Props:** + +- `show: boolean` - Control modal visibility +- `title: string` - Modal title +- `size?: 'sm' | 'md' | 'lg' | 'xl' | 'full'` - Modal size (default: 'md') +- `closeOnEscape?: boolean` - Close on Escape key (default: true) +- `closeOnClickOutside?: boolean` - Close on backdrop click (default: true) + +**Events:** + +- `close` - Emitted when modal should close + +**Slots:** + +- `default` - Modal body content +- `footer` - Modal footer content + +**Usage:** + +```vue + +
+ + + + +
+``` + +--- + +### ConfirmDialog.vue + +Confirmation dialog built on top of Modal component. + +**Props:** + +- `show: boolean` - Control dialog visibility +- `title: string` - Dialog title +- `message: string` - Confirmation message +- `confirmText?: string` - Confirm button text (default: 'Confirm') +- `cancelText?: string` - Cancel button text (default: 'Cancel') +- `danger?: boolean` - Use danger/red styling (default: false) + +**Events:** + +- `confirm` - Emitted when user confirms +- `cancel` - Emitted when user cancels + +**Usage:** + +```vue + +``` + +--- + +### StatCard.vue + +Statistics card component for displaying metrics with optional change indicators. + +**Props:** + +- `title: string` - Card title +- `value: number | string` - Main value to display +- `icon?: Component` - Icon component +- `change?: number` - Percentage change value +- `changeType?: 'up' | 'down' | 'neutral'` - Change direction (default: 'neutral') +- `formatValue?: (value) => string` - Custom value formatter + +**Usage:** + +```vue + +``` + +--- + +### Toast.vue + +Toast notification component that automatically displays toasts from the app store. + +**Usage:** + +```vue + + +``` + +```typescript +// Trigger toasts from anywhere using the app store +import { useAppStore } from '@/stores/app' + +const appStore = useAppStore() + +appStore.addToast({ + type: 'success', + title: 'Success!', + message: 'User created successfully', + duration: 3000 +}) + +appStore.addToast({ + type: 'error', + message: 'Failed to delete user' +}) +``` + +--- + +### LoadingSpinner.vue + +Simple animated loading spinner. + +**Props:** + +- `size?: 'sm' | 'md' | 'lg' | 'xl'` - Spinner size (default: 'md') +- `color?: 'primary' | 'secondary' | 'white' | 'gray'` - Spinner color (default: 'primary') + +**Usage:** + +```vue + +``` + +--- + +### EmptyState.vue + +Empty state placeholder with icon, message, and optional action button. + +**Props:** + +- `icon?: Component` - Icon component +- `title: string` - Empty state title +- `description: string` - Empty state description +- `actionText?: string` - Action button text +- `actionTo?: string | object` - Router link destination +- `actionIcon?: boolean` - Show plus icon in button (default: true) + +**Slots:** + +- `icon` - Custom icon content +- `action` - Custom action button/link + +**Usage:** + +```vue + +``` + +## Import + +You can import components individually: + +```typescript +import { DataTable, Pagination, Modal } from '@/components/common' +``` + +Or import specific components: + +```typescript +import DataTable from '@/components/common/DataTable.vue' +``` + +## Features + +All components include: + +- **TypeScript support** with proper type definitions +- **Accessibility** with ARIA attributes and keyboard navigation +- **Responsive design** with mobile-friendly layouts +- **TailwindCSS styling** for consistent design +- **Vue 3 Composition API** with ` diff --git a/frontend/src/components/common/Select.vue b/frontend/src/components/common/Select.vue new file mode 100644 index 00000000..c90d0201 --- /dev/null +++ b/frontend/src/components/common/Select.vue @@ -0,0 +1,514 @@ + + + + + + + diff --git a/frontend/src/components/common/Skeleton.vue b/frontend/src/components/common/Skeleton.vue new file mode 100644 index 00000000..aa90a619 --- /dev/null +++ b/frontend/src/components/common/Skeleton.vue @@ -0,0 +1,46 @@ + + + diff --git a/frontend/src/components/common/StatCard.vue b/frontend/src/components/common/StatCard.vue new file mode 100644 index 00000000..203a2fa8 --- /dev/null +++ b/frontend/src/components/common/StatCard.vue @@ -0,0 +1,81 @@ + + + diff --git a/frontend/src/components/common/StatusBadge.vue b/frontend/src/components/common/StatusBadge.vue new file mode 100644 index 00000000..a844b6cc --- /dev/null +++ b/frontend/src/components/common/StatusBadge.vue @@ -0,0 +1,39 @@ + + + diff --git a/frontend/src/components/common/SubscriptionProgressMini.vue b/frontend/src/components/common/SubscriptionProgressMini.vue new file mode 100644 index 00000000..bba7aaf7 --- /dev/null +++ b/frontend/src/components/common/SubscriptionProgressMini.vue @@ -0,0 +1,320 @@ + + + + + diff --git a/frontend/src/components/common/TextArea.vue b/frontend/src/components/common/TextArea.vue new file mode 100644 index 00000000..d392fbfd --- /dev/null +++ b/frontend/src/components/common/TextArea.vue @@ -0,0 +1,81 @@ + + + diff --git a/frontend/src/components/common/Toast.vue b/frontend/src/components/common/Toast.vue new file mode 100644 index 00000000..1bfb9958 --- /dev/null +++ b/frontend/src/components/common/Toast.vue @@ -0,0 +1,164 @@ + + + diff --git a/frontend/src/components/common/Toggle.vue b/frontend/src/components/common/Toggle.vue new file mode 100644 index 00000000..9e9b4333 --- /dev/null +++ b/frontend/src/components/common/Toggle.vue @@ -0,0 +1,29 @@ + + + diff --git a/frontend/src/components/common/VersionBadge.vue b/frontend/src/components/common/VersionBadge.vue new file mode 100644 index 00000000..57de9c6c --- /dev/null +++ b/frontend/src/components/common/VersionBadge.vue @@ -0,0 +1,555 @@ + + + + + diff --git a/frontend/src/components/common/index.ts b/frontend/src/components/common/index.ts new file mode 100644 index 00000000..754034a2 --- /dev/null +++ b/frontend/src/components/common/index.ts @@ -0,0 +1,14 @@ +// Export all common components +export { default as DataTable } from './DataTable.vue' +export { default as Pagination } from './Pagination.vue' +export { default as BaseDialog } from './BaseDialog.vue' +export { default as ConfirmDialog } from './ConfirmDialog.vue' +export { default as StatCard } from './StatCard.vue' +export { default as Toast } from './Toast.vue' +export { default as LoadingSpinner } from './LoadingSpinner.vue' +export { default as EmptyState } from './EmptyState.vue' +export { default as LocaleSwitcher } from './LocaleSwitcher.vue' +export { default as ExportProgressDialog } from './ExportProgressDialog.vue' + +// Export types +export type { Column } from './types' diff --git a/frontend/src/components/common/types.ts b/frontend/src/components/common/types.ts new file mode 100644 index 00000000..4a0ca8d3 --- /dev/null +++ b/frontend/src/components/common/types.ts @@ -0,0 +1,10 @@ +/** + * Common component types + */ + +export interface Column { + key: string + label: string + sortable?: boolean + formatter?: (value: any, row: any) => string +} diff --git a/frontend/src/components/icons/Icon.vue b/frontend/src/components/icons/Icon.vue new file mode 100644 index 00000000..c8ab8aed --- /dev/null +++ b/frontend/src/components/icons/Icon.vue @@ -0,0 +1,140 @@ + + + diff --git a/frontend/src/components/icons/index.ts b/frontend/src/components/icons/index.ts new file mode 100644 index 00000000..ea5ccfd4 --- /dev/null +++ b/frontend/src/components/icons/index.ts @@ -0,0 +1 @@ +export { default as Icon } from './Icon.vue' diff --git a/frontend/src/components/keys/UseKeyModal.vue b/frontend/src/components/keys/UseKeyModal.vue new file mode 100644 index 00000000..8075ba70 --- /dev/null +++ b/frontend/src/components/keys/UseKeyModal.vue @@ -0,0 +1,625 @@ + + + diff --git a/frontend/src/components/layout/AppHeader.vue b/frontend/src/components/layout/AppHeader.vue new file mode 100644 index 00000000..fd8742c3 --- /dev/null +++ b/frontend/src/components/layout/AppHeader.vue @@ -0,0 +1,304 @@ + + + + + diff --git a/frontend/src/components/layout/AppLayout.vue b/frontend/src/components/layout/AppLayout.vue new file mode 100644 index 00000000..4c5b0c5d --- /dev/null +++ b/frontend/src/components/layout/AppLayout.vue @@ -0,0 +1,52 @@ + + + diff --git a/frontend/src/components/layout/AppSidebar.vue b/frontend/src/components/layout/AppSidebar.vue new file mode 100644 index 00000000..391f858f --- /dev/null +++ b/frontend/src/components/layout/AppSidebar.vue @@ -0,0 +1,547 @@ + + + + + diff --git a/frontend/src/components/layout/AuthLayout.vue b/frontend/src/components/layout/AuthLayout.vue new file mode 100644 index 00000000..3cfc1d4d --- /dev/null +++ b/frontend/src/components/layout/AuthLayout.vue @@ -0,0 +1,90 @@ + + + + + diff --git a/frontend/src/components/layout/EXAMPLES.md b/frontend/src/components/layout/EXAMPLES.md new file mode 100644 index 00000000..6a4bbd22 --- /dev/null +++ b/frontend/src/components/layout/EXAMPLES.md @@ -0,0 +1,424 @@ +# Layout Component Examples + +## Example 1: Dashboard Page + +```vue + + + +``` + +--- + +## Example 2: Login Page + +```vue + + + +``` + +--- + +## Example 3: API Keys Page with Custom Header Title + +```vue + + + +``` + +--- + +## Example 4: Admin Users Page + +```vue + + + +``` + +--- + +## Example 5: Profile Page + +```vue + + + +``` + +--- + +## Tips for Using Layouts + +1. **Page Titles**: Set route meta to automatically display page titles in the header +2. **Loading States**: Use `appStore.setLoading(true/false)` for global loading indicators +3. **Toast Notifications**: Use `appStore.showSuccess()`, `appStore.showError()`, etc. +4. **Authentication**: All authenticated pages should use `AppLayout` +5. **Auth Pages**: Login and Register pages should use `AuthLayout` +6. **Sidebar State**: The sidebar state persists across navigation +7. **Mobile First**: All examples are responsive by default using Tailwind's mobile-first approach diff --git a/frontend/src/components/layout/INTEGRATION.md b/frontend/src/components/layout/INTEGRATION.md new file mode 100644 index 00000000..c2041aa7 --- /dev/null +++ b/frontend/src/components/layout/INTEGRATION.md @@ -0,0 +1,484 @@ +# Layout Components Integration Guide + +## Quick Start + +### 1. Import Layout Components + +```typescript +// In your view files +import { AppLayout, AuthLayout } from '@/components/layout' +``` + +### 2. Use in Routes + +```typescript +// src/router/index.ts +import { createRouter, createWebHistory } from 'vue-router' +import type { RouteRecordRaw } from 'vue-router' + +// Views +import DashboardView from '@/views/DashboardView.vue' +import LoginView from '@/views/auth/LoginView.vue' +import RegisterView from '@/views/auth/RegisterView.vue' + +const routes: RouteRecordRaw[] = [ + // Auth routes (no layout needed - views use AuthLayout internally) + { + path: '/login', + name: 'Login', + component: LoginView, + meta: { requiresAuth: false } + }, + { + path: '/register', + name: 'Register', + component: RegisterView, + meta: { requiresAuth: false } + }, + + // User routes (use AppLayout) + { + path: '/dashboard', + name: 'Dashboard', + component: DashboardView, + meta: { requiresAuth: true, title: 'Dashboard' } + }, + { + path: '/api-keys', + name: 'ApiKeys', + component: () => import('@/views/ApiKeysView.vue'), + meta: { requiresAuth: true, title: 'API Keys' } + }, + { + path: '/usage', + name: 'Usage', + component: () => import('@/views/UsageView.vue'), + meta: { requiresAuth: true, title: 'Usage Statistics' } + }, + { + path: '/redeem', + name: 'Redeem', + component: () => import('@/views/RedeemView.vue'), + meta: { requiresAuth: true, title: 'Redeem Code' } + }, + { + path: '/profile', + name: 'Profile', + component: () => import('@/views/ProfileView.vue'), + meta: { requiresAuth: true, title: 'Profile Settings' } + }, + + // Admin routes (use AppLayout, admin only) + { + path: '/admin/dashboard', + name: 'AdminDashboard', + component: () => import('@/views/admin/DashboardView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'Admin Dashboard' } + }, + { + path: '/admin/users', + name: 'AdminUsers', + component: () => import('@/views/admin/UsersView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'User Management' } + }, + { + path: '/admin/groups', + name: 'AdminGroups', + component: () => import('@/views/admin/GroupsView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'Groups' } + }, + { + path: '/admin/accounts', + name: 'AdminAccounts', + component: () => import('@/views/admin/AccountsView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'Accounts' } + }, + { + path: '/admin/proxies', + name: 'AdminProxies', + component: () => import('@/views/admin/ProxiesView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'Proxies' } + }, + { + path: '/admin/redeem-codes', + name: 'AdminRedeemCodes', + component: () => import('@/views/admin/RedeemCodesView.vue'), + meta: { requiresAuth: true, requiresAdmin: true, title: 'Redeem Codes' } + }, + + // Default redirect + { + path: '/', + redirect: '/dashboard' + } +] + +const router = createRouter({ + history: createWebHistory(), + routes +}) + +// Navigation guards +router.beforeEach((to, from, next) => { + const authStore = useAuthStore() + + if (to.meta.requiresAuth && !authStore.isAuthenticated) { + // Redirect to login if not authenticated + next('/login') + } else if (to.meta.requiresAdmin && !authStore.isAdmin) { + // Redirect to dashboard if not admin + next('/dashboard') + } else { + next() + } +}) + +export default router +``` + +### 3. Initialize Stores in main.ts + +```typescript +// src/main.ts +import { createApp } from 'vue' +import { createPinia } from 'pinia' +import App from './App.vue' +import router from './router' +import './style.css' + +const app = createApp(App) +const pinia = createPinia() + +app.use(pinia) +app.use(router) + +// Initialize auth state on app startup +import { useAuthStore } from '@/stores' +const authStore = useAuthStore() +authStore.checkAuth() + +app.mount('#app') +``` + +### 4. Update App.vue + +```vue + + + + +``` + +--- + +## View Component Templates + +### Authenticated Page Template + +```vue + + + + +``` + +### Auth Page Template + +```vue + + + + +``` + +--- + +## Customization + +### Changing Colors + +The components use Tailwind's indigo color scheme by default. To change: + +```vue + +

+
+``` + +### Adding Custom Icons + +Replace HTML entity icons with your preferred icon library: + +```vue + +📈 + + + +``` + +### Sidebar Customization + +Modify navigation items in `AppSidebar.vue`: + +```typescript +// Add/remove/modify navigation items +const userNavItems = [ + { path: '/dashboard', label: 'Dashboard', icon: '📈' }, + { path: '/new-page', label: 'New Page', icon: '📄' } // Add new item + // ... +] +``` + +### Header Customization + +Modify user dropdown in `AppHeader.vue`: + +```vue + + + + Settings + +``` + +--- + +## Mobile Responsive Behavior + +### Sidebar + +- **Desktop (md+)**: Always visible, can be collapsed to icon-only view +- **Mobile**: Hidden by default, shown via menu toggle in header + +### Header + +- **Desktop**: Shows full user info and balance +- **Mobile**: Shows compact view with hamburger menu + +To improve mobile experience, you can add overlay and transitions: + +```vue + + + + +
+``` + +--- + +## State Management Integration + +### Auth Store Usage + +```typescript +import { useAuthStore } from '@/stores' + +const authStore = useAuthStore() + +// Check if user is authenticated +if (authStore.isAuthenticated) { + // User is logged in +} + +// Check if user is admin +if (authStore.isAdmin) { + // User has admin role +} + +// Get current user +const user = authStore.user +``` + +### App Store Usage + +```typescript +import { useAppStore } from '@/stores' + +const appStore = useAppStore() + +// Toggle sidebar +appStore.toggleSidebar() + +// Show notifications +appStore.showSuccess('Operation completed!') +appStore.showError('Something went wrong') +appStore.showInfo('Did you know...') +appStore.showWarning('Be careful!') + +// Loading state +appStore.setLoading(true) +// ... perform operation +appStore.setLoading(false) + +// Or use helper +await appStore.withLoading(async () => { + // Your async operation +}) +``` + +--- + +## Accessibility Features + +All layout components include: + +- **Semantic HTML**: Proper use of `